repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
scipy
|
scipy-main/scipy/fftpack/tests/test_helper.py
|
# Created by Pearu Peterson, September 2002
__usage__ = """
Build fftpack:
python setup_fftpack.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.fftpack.test(<level>)'
Run tests if fftpack is not installed:
python tests/test_helper.py [<level>]
"""
from numpy.testing import assert_array_almost_equal
from scipy.fftpack import fftshift, ifftshift, fftfreq, rfftfreq
from numpy import pi, random
class TestFFTShift:
def test_definition(self):
x = [0,1,2,3,4,-4,-3,-2,-1]
y = [-4,-3,-2,-1,0,1,2,3,4]
assert_array_almost_equal(fftshift(x),y)
assert_array_almost_equal(ifftshift(y),x)
x = [0,1,2,3,4,-5,-4,-3,-2,-1]
y = [-5,-4,-3,-2,-1,0,1,2,3,4]
assert_array_almost_equal(fftshift(x),y)
assert_array_almost_equal(ifftshift(y),x)
def test_inverse(self):
for n in [1,4,9,100,211]:
x = random.random((n,))
assert_array_almost_equal(ifftshift(fftshift(x)),x)
class TestFFTFreq:
def test_definition(self):
x = [0,1,2,3,4,-4,-3,-2,-1]
assert_array_almost_equal(9*fftfreq(9),x)
assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
x = [0,1,2,3,4,-5,-4,-3,-2,-1]
assert_array_almost_equal(10*fftfreq(10),x)
assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
class TestRFFTFreq:
def test_definition(self):
x = [0,1,1,2,2,3,3,4,4]
assert_array_almost_equal(9*rfftfreq(9),x)
assert_array_almost_equal(9*pi*rfftfreq(9,pi),x)
x = [0,1,1,2,2,3,3,4,4,5]
assert_array_almost_equal(10*rfftfreq(10),x)
assert_array_almost_equal(10*pi*rfftfreq(10,pi),x)
| 1,675
| 29.472727
| 64
|
py
|
scipy
|
scipy-main/scipy/fftpack/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/misc/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('misc',parent_package,top_path)
config.add_data_files('*.dat')
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 374
| 30.25
| 58
|
py
|
scipy
|
scipy-main/scipy/misc/doccer.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from scipy._lib import doccer
__all__ = [ # noqa: F822
'docformat', 'inherit_docstring_from', 'indentcount_lines',
'filldoc', 'unindent_dict', 'unindent_string', 'extend_notes_in_docstring',
'replace_notes_in_docstring'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.misc.doccer is deprecated and has no attribute "
f"{name}.")
warnings.warn("The `scipy.misc.doccer` namespace is deprecated and "
"will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(doccer, name)
| 766
| 24.566667
| 79
|
py
|
scipy
|
scipy-main/scipy/misc/common.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.datasets` namespace for importing the dataset functions
# included below.
import warnings
from . import _common
__all__ = [ # noqa: F822
'central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram', 'arange', 'newaxis', 'hstack', 'prod', 'array', 'load'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.misc.common is deprecated and has no attribute "
f"{name}. Try looking in scipy.datasets instead.")
warnings.warn(f"Please use `{name}` from the `scipy.misc` namespace, "
"the `scipy.misc.common` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_common, name)
| 869
| 28
| 79
|
py
|
scipy
|
scipy-main/scipy/misc/__init__.py
|
"""
==========================================
Miscellaneous routines (:mod:`scipy.misc`)
==========================================
.. currentmodule:: scipy.misc
.. deprecated:: 1.10.0
This module is deprecated and will be completely
removed in SciPy v2.0.0.
Various utilities that don't have another home.
.. autosummary::
:toctree: generated/
ascent - Get example image for processing
central_diff_weights - Weights for an n-point central mth derivative
derivative - Find the nth derivative of a function at a point
face - Get example image for processing
electrocardiogram - Load an example of a 1-D signal
"""
from ._common import *
from . import _common
import warnings
# Deprecated namespaces, to be removed in v2.0.0
from . import common, doccer
__all__ = _common.__all__
dataset_methods = ['ascent', 'face', 'electrocardiogram']
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.misc is deprecated and has no attribute "
f"{name}.")
if name in dataset_methods:
msg = ("The module `scipy.misc` is deprecated and will be "
"completely removed in SciPy v2.0.0. "
f"All dataset methods including {name}, must be imported "
"directly from the new `scipy.datasets` module.")
else:
msg = (f"The method `{name}` from the `scipy.misc` namespace is"
" deprecated, and will be removed in SciPy v1.12.0.")
warnings.warn(msg, category=DeprecationWarning, stacklevel=2)
return getattr(name)
del _common
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 1,726
| 24.397059
| 73
|
py
|
scipy
|
scipy-main/scipy/misc/_common.py
|
"""
Functions which are common and require SciPy Base and Level 1 SciPy
(special, linalg)
"""
from scipy._lib.deprecation import _deprecated
from scipy._lib._finite_differences import _central_diff_weights, _derivative
from numpy import array, frombuffer, load
__all__ = ['central_diff_weights', 'derivative', 'ascent', 'face',
'electrocardiogram']
@_deprecated(msg="scipy.misc.central_diff_weights is deprecated in "
"SciPy v1.10.0; and will be completely removed in "
"SciPy v1.12.0. You may consider using "
"findiff: https://github.com/maroba/findiff or "
"numdifftools: https://github.com/pbrod/numdifftools")
def central_diff_weights(Np, ndiv=1):
"""
Return weights for an Np-point central derivative.
Assumes equally-spaced function points.
If weights are in the vector w, then
derivative is w[0] * f(x-ho*dx) + ... + w[-1] * f(x+h0*dx)
.. deprecated:: 1.10.0
`central_diff_weights` has been deprecated from
`scipy.misc.central_diff_weights` in SciPy 1.10.0 and
it will be completely removed in SciPy 1.12.0.
You may consider using
findiff: https://github.com/maroba/findiff or
numdifftools: https://github.com/pbrod/numdifftools
Parameters
----------
Np : int
Number of points for the central derivative.
ndiv : int, optional
Number of divisions. Default is 1.
Returns
-------
w : ndarray
Weights for an Np-point central derivative. Its size is `Np`.
Notes
-----
Can be inaccurate for a large number of points.
Examples
--------
We can calculate a derivative value of a function.
>>> from scipy.misc import central_diff_weights
>>> def f(x):
... return 2 * x**2 + 3
>>> x = 3.0 # derivative point
>>> h = 0.1 # differential step
>>> Np = 3 # point number for central derivative
>>> weights = central_diff_weights(Np) # weights for first derivative
>>> vals = [f(x + (i - Np/2) * h) for i in range(Np)]
>>> sum(w * v for (w, v) in zip(weights, vals))/h
11.79999999999998
This value is close to the analytical solution:
f'(x) = 4x, so f'(3) = 12
References
----------
.. [1] https://en.wikipedia.org/wiki/Finite_difference
"""
return _central_diff_weights(Np, ndiv)
@_deprecated(msg="scipy.misc.derivative is deprecated in "
"SciPy v1.10.0; and will be completely removed in "
"SciPy v1.12.0. You may consider using "
"findiff: https://github.com/maroba/findiff or "
"numdifftools: https://github.com/pbrod/numdifftools")
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
"""
Find the nth derivative of a function at a point.
Given a function, use a central difference formula with spacing `dx` to
compute the nth derivative at `x0`.
.. deprecated:: 1.10.0
`derivative` has been deprecated from `scipy.misc.derivative`
in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
You may consider using
findiff: https://github.com/maroba/findiff or
numdifftools: https://github.com/pbrod/numdifftools
Parameters
----------
func : function
Input function.
x0 : float
The point at which the nth derivative is found.
dx : float, optional
Spacing.
n : int, optional
Order of the derivative. Default is 1.
args : tuple, optional
Arguments
order : int, optional
Number of points to use, must be odd.
Notes
-----
Decreasing the step size too small can result in round-off error.
Examples
--------
>>> from scipy.misc import derivative
>>> def f(x):
... return x**3 + x**2
>>> derivative(f, 1.0, dx=1e-6)
4.9999999999217337
"""
return _derivative(func, x0, dx, n, args, order)
@_deprecated(msg="scipy.misc.ascent has been deprecated in SciPy v1.10.0;"
" and will be completely removed in SciPy v1.12.0. "
"Dataset methods have moved into the scipy.datasets "
"module. Use scipy.datasets.ascent instead.")
def ascent():
"""
Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy use in demos
The image is derived from accent-to-the-top.jpg at
http://www.public-domain-image.com/people-public-domain-images-pictures/
.. deprecated:: 1.10.0
`ascent` has been deprecated from `scipy.misc.ascent`
in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
Dataset methods have moved into the `scipy.datasets` module.
Use `scipy.datasets.ascent` instead.
Parameters
----------
None
Returns
-------
ascent : ndarray
convenient image to use for testing and demonstration
Examples
--------
>>> import scipy.misc
>>> ascent = scipy.misc.ascent()
>>> ascent.shape
(512, 512)
>>> ascent.max()
255
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(ascent)
>>> plt.show()
"""
import pickle
import os
fname = os.path.join(os.path.dirname(__file__),'ascent.dat')
with open(fname, 'rb') as f:
ascent = array(pickle.load(f))
return ascent
@_deprecated(msg="scipy.misc.face has been deprecated in SciPy v1.10.0; "
"and will be completely removed in SciPy v1.12.0. "
"Dataset methods have moved into the scipy.datasets "
"module. Use scipy.datasets.face instead.")
def face(gray=False):
"""
Get a 1024 x 768, color image of a raccoon face.
raccoon-procyon-lotor.jpg at http://www.public-domain-image.com
.. deprecated:: 1.10.0
`face` has been deprecated from `scipy.misc.face`
in SciPy 1.10.0 and it will be completely removed in SciPy 1.12.0.
Dataset methods have moved into the `scipy.datasets` module.
Use `scipy.datasets.face` instead.
Parameters
----------
gray : bool, optional
If True return 8-bit grey-scale image, otherwise return a color image
Returns
-------
face : ndarray
image of a racoon face
Examples
--------
>>> import scipy.misc
>>> face = scipy.misc.face()
>>> face.shape
(768, 1024, 3)
>>> face.max()
255
>>> face.dtype
dtype('uint8')
>>> import matplotlib.pyplot as plt
>>> plt.gray()
>>> plt.imshow(face)
>>> plt.show()
"""
import bz2
import os
with open(os.path.join(os.path.dirname(__file__), 'face.dat'), 'rb') as f:
rawdata = f.read()
data = bz2.decompress(rawdata)
face = frombuffer(data, dtype='uint8')
face.shape = (768, 1024, 3)
if gray is True:
face = (0.21 * face[:,:,0] + 0.71 * face[:,:,1] + 0.07 * face[:,:,2]).astype('uint8')
return face
@_deprecated(msg="scipy.misc.electrocardiogram has been "
"deprecated in SciPy v1.10.0; and will "
"be completely removed in SciPy v1.12.0. "
"Dataset methods have moved into the scipy.datasets "
"module. Use scipy.datasets.electrocardiogram instead.")
def electrocardiogram():
"""
Load an electrocardiogram as an example for a 1-D signal.
The returned signal is a 5 minute long electrocardiogram (ECG), a medical
recording of the heart's electrical activity, sampled at 360 Hz.
.. deprecated:: 1.10.0
`electrocardiogram` has been deprecated from
`scipy.misc.electrocardiogram` in SciPy 1.10.0 and it will be
completely removed in SciPy 1.12.0.
Dataset methods have moved into the `scipy.datasets` module.
Use `scipy.datasets.electrocardiogram` instead.
Returns
-------
ecg : ndarray
The electrocardiogram in millivolt (mV) sampled at 360 Hz.
Notes
-----
The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
(lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
heartbeats as well as pathological changes.
.. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
.. versionadded:: 1.1.0
References
----------
.. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
(PMID: 11446209); :doi:`10.13026/C2F305`
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
PhysioToolkit, and PhysioNet: Components of a New Research Resource
for Complex Physiologic Signals. Circulation 101(23):e215-e220;
:doi:`10.1161/01.CIR.101.23.e215`
Examples
--------
>>> from scipy.misc import electrocardiogram
>>> ecg = electrocardiogram()
>>> ecg
array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
>>> ecg.shape, ecg.mean(), ecg.std()
((108000,), -0.16510875, 0.5992473991177294)
As stated the signal features several areas with a different morphology.
E.g., the first few seconds show the electrical activity of a heart in
normal sinus rhythm as seen below.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> fs = 360
>>> time = np.arange(ecg.size) / fs
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(9, 10.2)
>>> plt.ylim(-1, 1.5)
>>> plt.show()
After second 16, however, the first premature ventricular contractions, also
called extrasystoles, appear. These have a different morphology compared to
typical heartbeats. The difference can easily be observed in the following
plot.
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(46.5, 50)
>>> plt.ylim(-2, 1.5)
>>> plt.show()
At several points large artifacts disturb the recording, e.g.:
>>> plt.plot(time, ecg)
>>> plt.xlabel("time in s")
>>> plt.ylabel("ECG in mV")
>>> plt.xlim(207, 215)
>>> plt.ylim(-2, 3.5)
>>> plt.show()
Finally, examining the power spectrum reveals that most of the biosignal is
made up of lower frequencies. At 60 Hz the noise induced by the mains
electricity can be clearly observed.
>>> from scipy.signal import welch
>>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
>>> plt.semilogy(f, Pxx)
>>> plt.xlabel("Frequency in Hz")
>>> plt.ylabel("Power spectrum of the ECG in mV**2")
>>> plt.xlim(f[[0, -1]])
>>> plt.show()
"""
import os
file_path = os.path.join(os.path.dirname(__file__), "ecg.dat")
with load(file_path) as file:
ecg = file["ecg"].astype(int) # np.uint16 -> int
# Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
ecg = (ecg - 1024) / 200.0
return ecg
| 11,120
| 31.422741
| 93
|
py
|
scipy
|
scipy-main/scipy/misc/tests/test_common.py
|
from numpy.testing import assert_equal, assert_almost_equal, suppress_warnings
from scipy.misc import face, ascent, electrocardiogram
def test_face():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
assert_equal(face().shape, (768, 1024, 3))
def test_ascent():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
assert_equal(ascent().shape, (512, 512))
def test_electrocardiogram():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
# Test shape, dtype and stats of signal
ecg = electrocardiogram()
assert ecg.dtype == float
assert_equal(ecg.shape, (108000,))
assert_almost_equal(ecg.mean(), -0.16510875)
assert_almost_equal(ecg.std(), 0.5992473991177294)
| 833
| 29.888889
| 78
|
py
|
scipy
|
scipy-main/scipy/misc/tests/test_config.py
|
"""
Check the SciPy config is valid.
"""
import scipy
import pytest
from unittest.mock import patch
pytestmark = pytest.mark.skipif(
not hasattr(scipy.__config__, "_built_with_meson"),
reason="Requires Meson builds",
)
class TestSciPyConfigs:
REQUIRED_CONFIG_KEYS = [
"Compilers",
"Machine Information",
"Python Information",
]
@patch("scipy.__config__._check_pyyaml")
def test_pyyaml_not_found(self, mock_yaml_importer):
mock_yaml_importer.side_effect = ModuleNotFoundError()
with pytest.warns(UserWarning):
scipy.show_config()
def test_dict_mode(self):
config = scipy.show_config(mode="dicts")
assert isinstance(config, dict)
assert all([key in config for key in self.REQUIRED_CONFIG_KEYS]), (
"Required key missing,"
" see index of `False` with `REQUIRED_CONFIG_KEYS`"
)
def test_invalid_mode(self):
with pytest.raises(AttributeError):
scipy.show_config(mode="foo")
def test_warn_to_add_tests(self):
assert len(scipy.__config__.DisplayModes) == 2, (
"New mode detected,"
" please add UT if applicable and increment this count"
)
| 1,244
| 26.666667
| 75
|
py
|
scipy
|
scipy-main/scipy/misc/tests/test_doccer.py
|
''' Some tests for the documenting decorator and support functions '''
import sys
import pytest
from numpy.testing import assert_equal, suppress_warnings
from scipy._lib import doccer
# python -OO strips docstrings
DOCSTRINGS_STRIPPED = sys.flags.optimize > 1
docstring = \
"""Docstring
%(strtest1)s
%(strtest2)s
%(strtest3)s
"""
param_doc1 = \
"""Another test
with some indent"""
param_doc2 = \
"""Another test, one line"""
param_doc3 = \
""" Another test
with some indent"""
doc_dict = {'strtest1':param_doc1,
'strtest2':param_doc2,
'strtest3':param_doc3}
filled_docstring = \
"""Docstring
Another test
with some indent
Another test, one line
Another test
with some indent
"""
def test_unindent():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
assert_equal(doccer.unindent_string(param_doc1), param_doc1)
assert_equal(doccer.unindent_string(param_doc2), param_doc2)
assert_equal(doccer.unindent_string(param_doc3), param_doc1)
def test_unindent_dict():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
d2 = doccer.unindent_dict(doc_dict)
assert_equal(d2['strtest1'], doc_dict['strtest1'])
assert_equal(d2['strtest2'], doc_dict['strtest2'])
assert_equal(d2['strtest3'], doc_dict['strtest1'])
def test_docformat():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
udd = doccer.unindent_dict(doc_dict)
formatted = doccer.docformat(docstring, udd)
assert_equal(formatted, filled_docstring)
single_doc = 'Single line doc %(strtest1)s'
formatted = doccer.docformat(single_doc, doc_dict)
# Note - initial indent of format string does not
# affect subsequent indent of inserted parameter
assert_equal(formatted, """Single line doc Another test
with some indent""")
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_decorator():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
# with unindentation of parameters
decorator = doccer.filldoc(doc_dict, True)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
assert_equal(func.__doc__, """ Docstring
Another test
with some indent
""")
# without unindentation of parameters
decorator = doccer.filldoc(doc_dict, False)
@decorator
def func():
""" Docstring
%(strtest3)s
"""
assert_equal(func.__doc__, """ Docstring
Another test
with some indent
""")
@pytest.mark.skipif(DOCSTRINGS_STRIPPED, reason="docstrings stripped")
def test_inherit_docstring_from():
with suppress_warnings() as sup:
sup.filter(category=DeprecationWarning)
class Foo:
def func(self):
'''Do something useful.'''
return
def func2(self):
'''Something else.'''
class Bar(Foo):
@doccer.inherit_docstring_from(Foo)
def func(self):
'''%(super)sABC'''
return
@doccer.inherit_docstring_from(Foo)
def func2(self):
# No docstring.
return
assert_equal(Bar.func.__doc__, Foo.func.__doc__ + 'ABC')
assert_equal(Bar.func2.__doc__, Foo.func2.__doc__)
bar = Bar()
assert_equal(bar.func.__doc__, Foo.func.__doc__ + 'ABC')
assert_equal(bar.func2.__doc__, Foo.func2.__doc__)
| 3,738
| 26.696296
| 70
|
py
|
scipy
|
scipy-main/scipy/misc/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/special/_lambertw.py
|
from ._ufuncs import _lambertw
def lambertw(z, k=0, tol=1e-8):
r"""
lambertw(z, k=0, tol=1e-8)
Lambert W function.
The Lambert W function `W(z)` is defined as the inverse function
of ``w * exp(w)``. In other words, the value of ``W(z)`` is
such that ``z = W(z) * exp(W(z))`` for any complex number
``z``.
The Lambert W function is a multivalued function with infinitely
many branches. Each branch gives a separate solution of the
equation ``z = w exp(w)``. Here, the branches are indexed by the
integer `k`.
Parameters
----------
z : array_like
Input argument.
k : int, optional
Branch index.
tol : float, optional
Evaluation tolerance.
Returns
-------
w : array
`w` will have the same shape as `z`.
Notes
-----
All branches are supported by `lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real ``z > -1/e``, and the
``k = -1`` branch is real for ``-1/e < z < 0``. All branches except
``k = 0`` have a logarithmic singularity at ``z = 0``.
**Possible issues**
The evaluation can become inaccurate very close to the branch point
at ``-1/e``. In some corner cases, `lambertw` might currently
fail to converge, or can end up on the wrong branch.
**Algorithm**
Halley's iteration is used to invert ``w * exp(w)``, using a first-order
asymptotic approximation (O(log(w)) or `O(w)`) as the initial estimate.
The definition, implementation and choice of branches is based on [2]_.
See Also
--------
wrightomega : the Wright Omega function
References
----------
.. [1] https://en.wikipedia.org/wiki/Lambert_W_function
.. [2] Corless et al, "On the Lambert W function", Adv. Comp. Math. 5
(1996) 329-359.
https://cs.uwaterloo.ca/research/tr/1993/03/W.pdf
Examples
--------
The Lambert W function is the inverse of ``w exp(w)``:
>>> import numpy as np
>>> from scipy.special import lambertw
>>> w = lambertw(1)
>>> w
(0.56714329040978384+0j)
>>> w * np.exp(w)
(1.0+0j)
Any branch gives a valid inverse:
>>> w = lambertw(1, k=3)
>>> w
(-2.8535817554090377+17.113535539412148j)
>>> w*np.exp(w)
(1.0000000000000002+1.609823385706477e-15j)
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations. We give two examples here.
First, the function can be used to solve implicit equations of the
form
:math:`x = a + b e^{c x}`
for :math:`x`. We assume :math:`c` is not zero. After a little
algebra, the equation may be written
:math:`z e^z = -b c e^{a c}`
where :math:`z = c (a - x)`. :math:`z` may then be expressed using
the Lambert W function
:math:`z = W(-b c e^{a c})`
giving
:math:`x = a - W(-b c e^{a c})/c`
For example,
>>> a = 3
>>> b = 2
>>> c = -0.5
The solution to :math:`x = a + b e^{c x}` is:
>>> x = a - lambertw(-b*c*np.exp(a*c))/c
>>> x
(3.3707498368978794+0j)
Verify that it solves the equation:
>>> a + b*np.exp(c*x)
(3.37074983689788+0j)
The Lambert W function may also be used find the value of the infinite
power tower :math:`z^{z^{z^{\ldots}}}`:
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(0.5, 100)
0.641185744504986
>>> -lambertw(-np.log(0.5)) / np.log(0.5)
(0.64118574450498589+0j)
"""
return _lambertw(z, k, tol)
| 3,806
| 25.255172
| 76
|
py
|
scipy
|
scipy-main/scipy/special/_testutils.py
|
import os
import functools
import operator
from scipy._lib import _pep440
import numpy as np
from numpy.testing import assert_
import pytest
import scipy.special as sc
__all__ = ['with_special_errors', 'assert_func_equal', 'FuncData']
#------------------------------------------------------------------------------
# Check if a module is present to be used in tests
#------------------------------------------------------------------------------
class MissingModule:
def __init__(self, name):
self.name = name
def check_version(module, min_ver):
if type(module) == MissingModule:
return pytest.mark.skip(reason=f"{module.name} is not installed")
return pytest.mark.skipif(_pep440.parse(module.__version__) < _pep440.Version(min_ver),
reason=f"{module.__name__} version >= {min_ver} required")
#------------------------------------------------------------------------------
# Enable convergence and loss of precision warnings -- turn off one by one
#------------------------------------------------------------------------------
def with_special_errors(func):
"""
Enable special function errors (such as underflow, overflow,
loss of precision, etc.)
"""
@functools.wraps(func)
def wrapper(*a, **kw):
with sc.errstate(all='raise'):
res = func(*a, **kw)
return res
return wrapper
#------------------------------------------------------------------------------
# Comparing function values at many data points at once, with helpful
# error reports
#------------------------------------------------------------------------------
def assert_func_equal(func, results, points, rtol=None, atol=None,
param_filter=None, knownfailure=None,
vectorized=True, dtype=None, nan_ok=False,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
if hasattr(points, 'next'):
# it's a generator
points = list(points)
points = np.asarray(points)
if points.ndim == 1:
points = points[:,None]
nparams = points.shape[1]
if hasattr(results, '__name__'):
# function
data = points
result_columns = None
result_func = results
else:
# dataset
data = np.c_[points, results]
result_columns = list(range(nparams, data.shape[1]))
result_func = None
fdata = FuncData(func, data, list(range(nparams)),
result_columns=result_columns, result_func=result_func,
rtol=rtol, atol=atol, param_filter=param_filter,
knownfailure=knownfailure, nan_ok=nan_ok, vectorized=vectorized,
ignore_inf_sign=ignore_inf_sign,
distinguish_nan_and_inf=distinguish_nan_and_inf)
fdata.check()
class FuncData:
"""
Data set for checking a special function.
Parameters
----------
func : function
Function to test
data : numpy array
columnar data to use for testing
param_columns : int or tuple of ints
Columns indices in which the parameters to `func` lie.
Can be imaginary integers to indicate that the parameter
should be cast to complex.
result_columns : int or tuple of ints, optional
Column indices for expected results from `func`.
result_func : callable, optional
Function to call to obtain results.
rtol : float, optional
Required relative tolerance. Default is 5*eps.
atol : float, optional
Required absolute tolerance. Default is 5*tiny.
param_filter : function, or tuple of functions/Nones, optional
Filter functions to exclude some parameter ranges.
If omitted, no filtering is done.
knownfailure : str, optional
Known failure error message to raise when the test is run.
If omitted, no exception is raised.
nan_ok : bool, optional
If nan is always an accepted result.
vectorized : bool, optional
Whether all functions passed in are vectorized.
ignore_inf_sign : bool, optional
Whether to ignore signs of infinities.
(Doesn't matter for complex-valued functions.)
distinguish_nan_and_inf : bool, optional
If True, treat numbers which contain nans or infs as
equal. Sets ignore_inf_sign to be True.
"""
def __init__(self, func, data, param_columns, result_columns=None,
result_func=None, rtol=None, atol=None, param_filter=None,
knownfailure=None, dataname=None, nan_ok=False, vectorized=True,
ignore_inf_sign=False, distinguish_nan_and_inf=True):
self.func = func
self.data = data
self.dataname = dataname
if not hasattr(param_columns, '__len__'):
param_columns = (param_columns,)
self.param_columns = tuple(param_columns)
if result_columns is not None:
if not hasattr(result_columns, '__len__'):
result_columns = (result_columns,)
self.result_columns = tuple(result_columns)
if result_func is not None:
raise ValueError("Only result_func or result_columns should be provided")
elif result_func is not None:
self.result_columns = None
else:
raise ValueError("Either result_func or result_columns should be provided")
self.result_func = result_func
self.rtol = rtol
self.atol = atol
if not hasattr(param_filter, '__len__'):
param_filter = (param_filter,)
self.param_filter = param_filter
self.knownfailure = knownfailure
self.nan_ok = nan_ok
self.vectorized = vectorized
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not self.distinguish_nan_and_inf:
self.ignore_inf_sign = True
def get_tolerances(self, dtype):
if not np.issubdtype(dtype, np.inexact):
dtype = np.dtype(float)
info = np.finfo(dtype)
rtol, atol = self.rtol, self.atol
if rtol is None:
rtol = 5*info.eps
if atol is None:
atol = 5*info.tiny
return rtol, atol
def check(self, data=None, dtype=None, dtypes=None):
"""Check the special function against the data."""
__tracebackhide__ = operator.methodcaller(
'errisinstance', AssertionError
)
if self.knownfailure:
pytest.xfail(reason=self.knownfailure)
if data is None:
data = self.data
if dtype is None:
dtype = data.dtype
else:
data = data.astype(dtype)
rtol, atol = self.get_tolerances(dtype)
# Apply given filter functions
if self.param_filter:
param_mask = np.ones((data.shape[0],), np.bool_)
for j, filter in zip(self.param_columns, self.param_filter):
if filter:
param_mask &= list(filter(data[:,j]))
data = data[param_mask]
# Pick parameters from the correct columns
params = []
for idx, j in enumerate(self.param_columns):
if np.iscomplexobj(j):
j = int(j.imag)
params.append(data[:,j].astype(complex))
elif dtypes and idx < len(dtypes):
params.append(data[:, j].astype(dtypes[idx]))
else:
params.append(data[:,j])
# Helper for evaluating results
def eval_func_at_params(func, skip_mask=None):
if self.vectorized:
got = func(*params)
else:
got = []
for j in range(len(params[0])):
if skip_mask is not None and skip_mask[j]:
got.append(np.nan)
continue
got.append(func(*tuple([params[i][j] for i in range(len(params))])))
got = np.asarray(got)
if not isinstance(got, tuple):
got = (got,)
return got
# Evaluate function to be tested
got = eval_func_at_params(self.func)
# Grab the correct results
if self.result_columns is not None:
# Correct results passed in with the data
wanted = tuple([data[:,icol] for icol in self.result_columns])
else:
# Function producing correct results passed in
skip_mask = None
if self.nan_ok and len(got) == 1:
# Don't spend time evaluating what doesn't need to be evaluated
skip_mask = np.isnan(got[0])
wanted = eval_func_at_params(self.result_func, skip_mask=skip_mask)
# Check the validity of each output returned
assert_(len(got) == len(wanted))
for output_num, (x, y) in enumerate(zip(got, wanted)):
if np.issubdtype(x.dtype, np.complexfloating) or self.ignore_inf_sign:
pinf_x = np.isinf(x)
pinf_y = np.isinf(y)
minf_x = np.isinf(x)
minf_y = np.isinf(y)
else:
pinf_x = np.isposinf(x)
pinf_y = np.isposinf(y)
minf_x = np.isneginf(x)
minf_y = np.isneginf(y)
nan_x = np.isnan(x)
nan_y = np.isnan(y)
with np.errstate(all='ignore'):
abs_y = np.absolute(y)
abs_y[~np.isfinite(abs_y)] = 0
diff = np.absolute(x - y)
diff[~np.isfinite(diff)] = 0
rdiff = diff / np.absolute(y)
rdiff[~np.isfinite(rdiff)] = 0
tol_mask = (diff <= atol + rtol*abs_y)
pinf_mask = (pinf_x == pinf_y)
minf_mask = (minf_x == minf_y)
nan_mask = (nan_x == nan_y)
bad_j = ~(tol_mask & pinf_mask & minf_mask & nan_mask)
point_count = bad_j.size
if self.nan_ok:
bad_j &= ~nan_x
bad_j &= ~nan_y
point_count -= (nan_x | nan_y).sum()
if not self.distinguish_nan_and_inf and not self.nan_ok:
# If nan's are okay we've already covered all these cases
inf_x = np.isinf(x)
inf_y = np.isinf(y)
both_nonfinite = (inf_x & nan_y) | (nan_x & inf_y)
bad_j &= ~both_nonfinite
point_count -= both_nonfinite.sum()
if np.any(bad_j):
# Some bad results: inform what, where, and how bad
msg = [""]
msg.append("Max |adiff|: %g" % diff[bad_j].max())
msg.append("Max |rdiff|: %g" % rdiff[bad_j].max())
msg.append("Bad results (%d out of %d) for the following points (in output %d):"
% (np.sum(bad_j), point_count, output_num,))
for j in np.nonzero(bad_j)[0]:
j = int(j)
def fmt(x):
return '%30s' % np.array2string(x[j], precision=18)
a = " ".join(map(fmt, params))
b = " ".join(map(fmt, got))
c = " ".join(map(fmt, wanted))
d = fmt(rdiff)
msg.append(f"{a} => {b} != {c} (rdiff {d})")
assert_(False, "\n".join(msg))
def __repr__(self):
"""Pretty-printing, esp. for Nose output"""
if np.any(list(map(np.iscomplexobj, self.param_columns))):
is_complex = " (complex)"
else:
is_complex = ""
if self.dataname:
return "<Data for {}{}: {}>".format(self.func.__name__, is_complex,
os.path.basename(self.dataname))
else:
return f"<Data for {self.func.__name__}{is_complex}>"
| 11,971
| 36.647799
| 96
|
py
|
scipy
|
scipy-main/scipy/special/_generate_pyx.py
|
"""
python _generate_pyx.py
Generate Ufunc definition source files for scipy.special. Produces
files '_ufuncs.c' and '_ufuncs_cxx.c' by first producing Cython.
This will generate both calls to PyUFunc_FromFuncAndData and the
required ufunc inner loops.
The functions signatures are contained in 'functions.json', the syntax
for a function signature is
<function>: <name> ':' <input> '*' <output>
'->' <retval> '*' <ignored_retval>
<input>: <typecode>*
<output>: <typecode>*
<retval>: <typecode>?
<ignored_retval>: <typecode>?
<headers>: <header_name> [',' <header_name>]*
The input parameter types are denoted by single character type
codes, according to
'f': 'float'
'd': 'double'
'g': 'long double'
'F': 'float complex'
'D': 'double complex'
'G': 'long double complex'
'i': 'int'
'l': 'long'
'v': 'void'
If multiple kernel functions are given for a single ufunc, the one
which is used is determined by the standard ufunc mechanism. Kernel
functions that are listed first are also matched first against the
ufunc input types, so functions listed earlier take precedence.
In addition, versions with casted variables, such as d->f,D->F and
i->d are automatically generated.
There should be either a single header that contains all of the kernel
functions listed, or there should be one header for each kernel
function. Cython pxd files are allowed in addition to .h files.
Cython functions may use fused types, but the names in the list
should be the specialized ones, such as 'somefunc[float]'.
Function coming from C++ should have ``++`` appended to the name of
the header.
Floating-point exceptions inside these Ufuncs are converted to
special function errors --- which are separately controlled by the
user, and off by default, as they are usually not especially useful
for the user.
The C++ module
--------------
In addition to ``_ufuncs`` module, a second module ``_ufuncs_cxx`` is
generated. This module only exports function pointers that are to be
used when constructing some of the ufuncs in ``_ufuncs``. The function
pointers are exported via Cython's standard mechanism.
This mainly avoids build issues --- Python distutils has no way to
figure out what to do if you want to link both C++ and Fortran code in
the same shared library.
"""
import itertools
import json
import os
from stat import ST_MTIME
import argparse
import re
import textwrap
from typing import List
import numpy
# -----------------------------------------------------------------------------
# Extra code
# -----------------------------------------------------------------------------
UFUNCS_EXTRA_CODE_COMMON = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from libc.math cimport NAN
include "_ufuncs_extra_code_common.pxi"
"""
UFUNCS_EXTRA_CODE = """\
include "_ufuncs_extra_code.pxi"
"""
UFUNCS_EXTRA_CODE_BOTTOM = """\
#
# Aliases
#
jn = jv
"""
CYTHON_SPECIAL_PXD = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
ctypedef fused number_t:
double complex
double
cpdef number_t spherical_jn(long n, number_t z, bint derivative=*) noexcept nogil
cpdef number_t spherical_yn(long n, number_t z, bint derivative=*) noexcept nogil
cpdef number_t spherical_in(long n, number_t z, bint derivative=*) noexcept nogil
cpdef number_t spherical_kn(long n, number_t z, bint derivative=*) noexcept nogil
"""
CYTHON_SPECIAL_PYX = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
\"\"\"
.. highlight:: cython
Cython API for special functions
================================
Scalar, typed versions of many of the functions in ``scipy.special``
can be accessed directly from Cython; the complete list is given
below. Functions are overloaded using Cython fused types so their
names match their Python counterpart. The module follows the following
conventions:
- If a function's Python counterpart returns multiple values, then the
function returns its outputs via pointers in the final arguments.
- If a function's Python counterpart returns a single value, then the
function's output is returned directly.
The module is usable from Cython via::
cimport scipy.special.cython_special
Error handling
--------------
Functions can indicate an error by returning ``nan``; however they
cannot emit warnings like their counterparts in ``scipy.special``.
Available functions
-------------------
FUNCLIST
Custom functions
----------------
Some functions in ``scipy.special`` which are not ufuncs have custom
Cython wrappers.
Spherical Bessel functions
~~~~~~~~~~~~~~~~~~~~~~~~~~
The optional ``derivative`` boolean argument is replaced with an
optional Cython ``bint``, leading to the following signatures.
- :py:func:`~scipy.special.spherical_jn`::
double complex spherical_jn(long, double complex)
double complex spherical_jn(long, double complex, bint)
double spherical_jn(long, double)
double spherical_jn(long, double, bint)
- :py:func:`~scipy.special.spherical_yn`::
double complex spherical_yn(long, double complex)
double complex spherical_yn(long, double complex, bint)
double spherical_yn(long, double)
double spherical_yn(long, double, bint)
- :py:func:`~scipy.special.spherical_in`::
double complex spherical_in(long, double complex)
double complex spherical_in(long, double complex, bint)
double spherical_in(long, double)
double spherical_in(long, double, bint)
- :py:func:`~scipy.special.spherical_kn`::
double complex spherical_kn(long, double complex)
double complex spherical_kn(long, double complex, bint)
double spherical_kn(long, double)
double spherical_kn(long, double, bint)
\"\"\"
from libc.math cimport NAN
include "_cython_special.pxi"
include "_cython_special_custom.pxi"
"""
STUBS = """\
# This file is automatically generated by _generate_pyx.py.
# Do not edit manually!
from typing import Any, Dict
import numpy as np
__all__ = [
'geterr',
'seterr',
'errstate',
{ALL}
]
def geterr() -> Dict[str, str]: ...
def seterr(**kwargs: str) -> Dict[str, str]: ...
class errstate:
def __init__(self, **kargs: str) -> None: ...
def __enter__(self) -> None: ...
def __exit__(
self,
exc_type: Any, # Unused
exc_value: Any, # Unused
traceback: Any, # Unused
) -> None: ...
{STUBS}
"""
# -----------------------------------------------------------------------------
# Code generation
# -----------------------------------------------------------------------------
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
add_newdocs = __import__('_add_newdocs')
CY_TYPES = {
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long',
'v': 'void',
}
C_TYPES = {
'f': 'npy_float',
'd': 'npy_double',
'g': 'npy_longdouble',
'F': 'npy_cfloat',
'D': 'npy_cdouble',
'G': 'npy_clongdouble',
'i': 'npy_int',
'l': 'npy_long',
'v': 'void',
}
TYPE_NAMES = {
'f': 'NPY_FLOAT',
'd': 'NPY_DOUBLE',
'g': 'NPY_LONGDOUBLE',
'F': 'NPY_CFLOAT',
'D': 'NPY_CDOUBLE',
'G': 'NPY_CLONGDOUBLE',
'i': 'NPY_INT',
'l': 'NPY_LONG',
}
CYTHON_SPECIAL_BENCHFUNCS = {
'airy': ['d*dddd', 'D*DDDD'],
'beta': ['dd'],
'erf': ['d', 'D'],
'exprel': ['d'],
'gamma': ['d', 'D'],
'jv': ['dd', 'dD'],
'loggamma': ['D'],
'logit': ['d'],
'psi': ['d', 'D'],
}
def underscore(arg):
return arg.replace(" ", "_")
def cast_order(c):
return ['ilfdgFDG'.index(x) for x in c]
# These downcasts will cause the function to return NaNs, unless the
# values happen to coincide exactly.
DANGEROUS_DOWNCAST = {
('F', 'i'), ('F', 'l'), ('F', 'f'), ('F', 'd'), ('F', 'g'),
('D', 'i'), ('D', 'l'), ('D', 'f'), ('D', 'd'), ('D', 'g'),
('G', 'i'), ('G', 'l'), ('G', 'f'), ('G', 'd'), ('G', 'g'),
('f', 'i'), ('f', 'l'),
('d', 'i'), ('d', 'l'),
('g', 'i'), ('g', 'l'),
('l', 'i'),
}
NAN_VALUE = {
'f': 'NAN',
'd': 'NAN',
'g': 'NAN',
'F': 'NAN',
'D': 'NAN',
'G': 'NAN',
'i': '0xbad0bad0',
'l': '0xbad0bad0',
}
def generate_loop(func_inputs, func_outputs, func_retval,
ufunc_inputs, ufunc_outputs):
"""
Generate a UFunc loop function that calls a function given as its
data parameter with the specified input and output arguments and
return value.
This function can be passed to PyUFunc_FromFuncAndData.
Parameters
----------
func_inputs, func_outputs, func_retval : str
Signature of the function to call, given as type codes of the
input, output and return value arguments. These 1-character
codes are given according to the CY_TYPES and TYPE_NAMES
lists above.
The corresponding C function signature to be called is:
retval func(intype1 iv1, intype2 iv2, ..., outtype1 *ov1, ...);
If len(ufunc_outputs) == len(func_outputs)+1, the return value
is treated as the first output argument. Otherwise, the return
value is ignored.
ufunc_inputs, ufunc_outputs : str
Ufunc input and output signature.
This does not have to exactly match the function signature,
as long as the type casts work out on the C level.
Returns
-------
loop_name
Name of the generated loop function.
loop_body
Generated C code for the loop.
"""
if len(func_inputs) != len(ufunc_inputs):
raise ValueError("Function and ufunc have different number of inputs")
if len(func_outputs) != len(ufunc_outputs) and not (
func_retval != "v" and len(func_outputs)+1 == len(ufunc_outputs)):
raise ValueError("Function retval and ufunc outputs don't match")
name = "loop_{}_{}_{}_As_{}_{}".format(
func_retval, func_inputs, func_outputs, ufunc_inputs, ufunc_outputs
)
body = "cdef void %s(char **args, np.npy_intp *dims, np.npy_intp *steps, void *data) noexcept nogil:\n" % name
body += " cdef np.npy_intp i, n = dims[0]\n"
body += " cdef void *func = (<void**>data)[0]\n"
body += " cdef char *func_name = <char*>(<void**>data)[1]\n"
for j in range(len(ufunc_inputs)):
body += " cdef char *ip%d = args[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " cdef char *op%d = args[%d]\n" % (j, j + len(ufunc_inputs))
ftypes = []
fvars = []
outtypecodes = []
for j in range(len(func_inputs)):
ftypes.append(CY_TYPES[func_inputs[j]])
fvars.append("<%s>(<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]],
CY_TYPES[ufunc_inputs[j]], j))
if len(func_outputs)+1 == len(ufunc_outputs):
func_joff = 1
outtypecodes.append(func_retval)
body += f" cdef {CY_TYPES[func_retval]} ov0\n"
else:
func_joff = 0
for j, outtype in enumerate(func_outputs):
body += " cdef %s ov%d\n" % (CY_TYPES[outtype], j+func_joff)
ftypes.append("%s *" % CY_TYPES[outtype])
fvars.append("&ov%d" % (j+func_joff))
outtypecodes.append(outtype)
body += " for i in range(n):\n"
if len(func_outputs)+1 == len(ufunc_outputs):
rv = "ov0 = "
else:
rv = ""
funcall = " {}(<{}(*)({}) noexcept nogil>func)({})\n".format(
rv, CY_TYPES[func_retval], ", ".join(ftypes), ", ".join(fvars))
# Cast-check inputs and call function
input_checks = []
for j in range(len(func_inputs)):
if (ufunc_inputs[j], func_inputs[j]) in DANGEROUS_DOWNCAST:
chk = "<%s>(<%s*>ip%d)[0] == (<%s*>ip%d)[0]" % (
CY_TYPES[func_inputs[j]], CY_TYPES[ufunc_inputs[j]], j,
CY_TYPES[ufunc_inputs[j]], j)
input_checks.append(chk)
if input_checks:
body += " if %s:\n" % (" and ".join(input_checks))
body += " " + funcall
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid input argument\")\n"
for j, outtype in enumerate(outtypecodes):
body += " ov%d = <%s>%s\n" % (
j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += funcall
# Assign and cast-check output values
for j, (outtype, fouttype) in enumerate(zip(ufunc_outputs, outtypecodes)):
if (fouttype, outtype) in DANGEROUS_DOWNCAST:
body += " if ov%d == <%s>ov%d:\n" % (j, CY_TYPES[outtype], j)
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
body += " else:\n"
body += " sf_error.error(func_name, sf_error.DOMAIN, \"invalid output\")\n"
body += " (<%s *>op%d)[0] = <%s>%s\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], NAN_VALUE[outtype])
else:
body += " (<%s *>op%d)[0] = <%s>ov%d\n" % (
CY_TYPES[outtype], j, CY_TYPES[outtype], j)
for j in range(len(ufunc_inputs)):
body += " ip%d += steps[%d]\n" % (j, j)
for j in range(len(ufunc_outputs)):
body += " op%d += steps[%d]\n" % (j, j + len(ufunc_inputs))
body += " sf_error.check_fpe(func_name)\n"
return name, body
def generate_fused_type(codes):
"""
Generate name of and cython code for a fused type.
Parameters
----------
codes : str
Valid inputs to CY_TYPES (i.e. f, d, g, ...).
"""
cytypes = [CY_TYPES[x] for x in codes]
name = codes + "_number_t"
declaration = ["ctypedef fused " + name + ":"]
for cytype in cytypes:
declaration.append(" " + cytype)
declaration = "\n".join(declaration)
return name, declaration
def generate_bench(name, codes):
tab = " "*4
top, middle, end = [], [], []
tmp = codes.split("*")
if len(tmp) > 1:
incodes = tmp[0]
outcodes = tmp[1]
else:
incodes = tmp[0]
outcodes = ""
inargs, inargs_and_types = [], []
for n, code in enumerate(incodes):
arg = f"x{n}"
inargs.append(arg)
inargs_and_types.append(f"{CY_TYPES[code]} {arg}")
line = "def {{}}(int N, {}):".format(", ".join(inargs_and_types))
top.append(line)
top.append(tab + "cdef int n")
outargs = []
for n, code in enumerate(outcodes):
arg = f"y{n}"
outargs.append(f"&{arg}")
line = f"cdef {CY_TYPES[code]} {arg}"
middle.append(tab + line)
end.append(tab + "for n in range(N):")
end.append(2*tab + "{}({})")
pyfunc = "_bench_{}_{}_{}".format(name, incodes, "py")
cyfunc = "_bench_{}_{}_{}".format(name, incodes, "cy")
pytemplate = "\n".join(top + end)
cytemplate = "\n".join(top + middle + end)
pybench = pytemplate.format(pyfunc, "_ufuncs." + name, ", ".join(inargs))
cybench = cytemplate.format(cyfunc, name, ", ".join(inargs + outargs))
return pybench, cybench
def generate_doc(name, specs):
tab = " "*4
doc = [f"- :py:func:`~scipy.special.{name}`::\n"]
for spec in specs:
incodes, outcodes = spec.split("->")
incodes = incodes.split("*")
intypes = [CY_TYPES[x] for x in incodes[0]]
if len(incodes) > 1:
types = [f"{CY_TYPES[x]} *" for x in incodes[1]]
intypes.extend(types)
outtype = CY_TYPES[outcodes]
line = "{} {}({})".format(outtype, name, ", ".join(intypes))
doc.append(2*tab + line)
doc[-1] = f"{doc[-1]}\n"
doc = "\n".join(doc)
return doc
def npy_cdouble_from_double_complex(var):
"""Cast a Cython double complex to a NumPy cdouble."""
res = f"_complexstuff.npy_cdouble_from_double_complex({var})"
return res
def double_complex_from_npy_cdouble(var):
"""Cast a NumPy cdouble to a Cython double complex."""
res = f"_complexstuff.double_complex_from_npy_cdouble({var})"
return res
def iter_variants(inputs, outputs):
"""
Generate variants of UFunc signatures, by changing variable types,
within the limitation that the corresponding C types casts still
work out.
This does not generate all possibilities, just the ones required
for the ufunc to work properly with the most common data types.
Parameters
----------
inputs, outputs : str
UFunc input and output signature strings
Yields
------
new_input, new_output : str
Modified input and output strings.
Also the original input/output pair is yielded.
"""
maps = [
# always use long instead of int (more common type on 64-bit)
('i', 'l'),
]
# float32-preserving signatures
if not ('i' in inputs or 'l' in inputs):
# Don't add float32 versions of ufuncs with integer arguments, as this
# can lead to incorrect dtype selection if the integer arguments are
# arrays, but float arguments are scalars.
# For instance sph_harm(0,[0],0,0).dtype == complex64
# This may be a NumPy bug, but we need to work around it.
# cf. gh-4895, https://github.com/numpy/numpy/issues/5895
maps = maps + [(a + 'dD', b + 'fF') for a, b in maps]
# do the replacements
for src, dst in maps:
new_inputs = inputs
new_outputs = outputs
for a, b in zip(src, dst):
new_inputs = new_inputs.replace(a, b)
new_outputs = new_outputs.replace(a, b)
yield new_inputs, new_outputs
class Func:
"""
Base class for Ufunc and FusedFunc.
"""
def __init__(self, name, signatures):
self.name = name
self.signatures = []
self.function_name_overrides = {}
for header in signatures.keys():
for name, sig in signatures[header].items():
inarg, outarg, ret = self._parse_signature(sig)
self.signatures.append((name, inarg, outarg, ret, header))
def _parse_signature(self, sig):
m = re.match(r"\s*([fdgFDGil]*)\s*\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig)
if m:
inarg, outarg, ret = (x.strip() for x in m.groups())
if ret.count('*') > 1:
raise ValueError(f"{self.name}: Invalid signature: {sig}")
return inarg, outarg, ret
m = re.match(r"\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig)
if m:
inarg, ret = (x.strip() for x in m.groups())
return inarg, "", ret
raise ValueError(f"{self.name}: Invalid signature: {sig}")
def get_prototypes(self, nptypes_for_h=False):
prototypes = []
for func_name, inarg, outarg, ret, header in self.signatures:
ret = ret.replace('*', '')
c_args = ([C_TYPES[x] for x in inarg]
+ [C_TYPES[x] + ' *' for x in outarg])
cy_args = ([CY_TYPES[x] for x in inarg]
+ [CY_TYPES[x] + ' *' for x in outarg])
c_proto = "{} (*)({})".format(C_TYPES[ret], ", ".join(c_args))
if header.endswith("h") and nptypes_for_h:
cy_proto = c_proto + "nogil"
else:
cy_proto = "{} (*)({}) noexcept nogil".format(CY_TYPES[ret], ", ".join(cy_args))
prototypes.append((func_name, c_proto, cy_proto, header))
return prototypes
def cython_func_name(self, c_name, specialized=False, prefix="_func_",
override=True):
# act on function name overrides
if override and c_name in self.function_name_overrides:
c_name = self.function_name_overrides[c_name]
prefix = ""
# support fused types
m = re.match(r'^(.*?)(\[.*\])$', c_name)
if m:
c_base_name, fused_part = m.groups()
else:
c_base_name, fused_part = c_name, ""
if specialized:
return "{}{}{}".format(prefix, c_base_name, fused_part.replace(' ', '_'))
else:
return f"{prefix}{c_base_name}"
class Ufunc(Func):
"""
Ufunc signature, restricted format suitable for special functions.
Parameters
----------
name
Name of the ufunc to create
signature
String of form 'func: fff*ff->f, func2: ddd->*i' describing
the C-level functions and types of their input arguments
and return values.
The syntax is 'function_name: inputparams*outputparams->output_retval*ignored_retval'
Attributes
----------
name : str
Python name for the Ufunc
signatures : list of (func_name, inarg_spec, outarg_spec, ret_spec, header_name)
List of parsed signatures
doc : str
Docstring, obtained from add_newdocs
function_name_overrides : dict of str->str
Overrides for the function names in signatures
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = add_newdocs.get(name)
if self.doc is None:
raise ValueError("No docstring for ufunc %r" % name)
self.doc = textwrap.dedent(self.doc).strip()
def _get_signatures_and_loops(self, all_loops):
inarg_num = None
outarg_num = None
seen = set()
variants = []
def add_variant(func_name, inarg, outarg, ret, inp, outp):
if inp in seen:
return
seen.add(inp)
sig = (func_name, inp, outp)
if "v" in outp:
raise ValueError(f"{self.name}: void signature {sig!r}")
if len(inp) != inarg_num or len(outp) != outarg_num:
raise ValueError("%s: signature %r does not have %d/%d input/output args" % (
self.name, sig,
inarg_num, outarg_num))
loop_name, loop = generate_loop(inarg, outarg, ret, inp, outp)
all_loops[loop_name] = loop
variants.append((func_name, loop_name, inp, outp))
# First add base variants
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then the supplementary ones
for func_name, inarg, outarg, ret, header in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
ret = ret.replace('*', '')
for inp, outp in iter_variants(inarg, outp):
add_variant(func_name, inarg, outarg, ret, inp, outp)
# Then sort variants to input argument cast order
# -- the sort is stable, so functions earlier in the signature list
# are still preferred
variants.sort(key=lambda v: cast_order(v[2]))
return variants, inarg_num, outarg_num
def generate(self, all_loops):
toplevel = ""
variants, inarg_num, outarg_num = self._get_signatures_and_loops(
all_loops)
loops = []
funcs = []
types = []
for func_name, loop_name, inputs, outputs in variants:
for x in inputs:
types.append(TYPE_NAMES[x])
for x in outputs:
types.append(TYPE_NAMES[x])
loops.append(loop_name)
funcs.append(func_name)
toplevel += "cdef np.PyUFuncGenericFunction ufunc_%s_loops[%d]\n" % (self.name, len(loops))
toplevel += "cdef void *ufunc_%s_ptr[%d]\n" % (self.name, 2*len(funcs))
toplevel += "cdef void *ufunc_%s_data[%d]\n" % (self.name, len(funcs))
toplevel += "cdef char ufunc_%s_types[%d]\n" % (self.name, len(types))
toplevel += 'cdef char *ufunc_{}_doc = (\n "{}")\n'.format(
self.name,
self.doc.replace("\\", "\\\\").replace('"', '\\"').replace('\n', '\\n\"\n "')
)
for j, function in enumerate(loops):
toplevel += "ufunc_%s_loops[%d] = <np.PyUFuncGenericFunction>%s\n" % (self.name, j, function)
for j, type in enumerate(types):
toplevel += "ufunc_%s_types[%d] = <char>%s\n" % (self.name, j, type)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_ptr[2*%d] = <void*>%s\n" % (self.name, j,
self.cython_func_name(func, specialized=True))
toplevel += "ufunc_%s_ptr[2*%d+1] = <void*>(<char*>\"%s\")\n" % (self.name, j,
self.name)
for j, func in enumerate(funcs):
toplevel += "ufunc_%s_data[%d] = &ufunc_%s_ptr[2*%d]\n" % (
self.name, j, self.name, j)
toplevel += ('@ = np.PyUFunc_FromFuncAndData(ufunc_@_loops, '
'ufunc_@_data, ufunc_@_types, %d, %d, %d, 0, '
'"@", ufunc_@_doc, 0)\n' % (len(types)/(inarg_num+outarg_num),
inarg_num, outarg_num)
).replace('@', self.name)
return toplevel
class FusedFunc(Func):
"""
Generate code for a fused-type special function that can be
cimported in Cython.
"""
def __init__(self, name, signatures):
super().__init__(name, signatures)
self.doc = "See the documentation for scipy.special." + self.name
# "codes" are the keys for CY_TYPES
self.incodes, self.outcodes = self._get_codes()
self.fused_types = set()
self.intypes, infused_types = self._get_types(self.incodes)
self.fused_types.update(infused_types)
self.outtypes, outfused_types = self._get_types(self.outcodes)
self.fused_types.update(outfused_types)
self.invars, self.outvars = self._get_vars()
def _get_codes(self):
inarg_num, outarg_num = None, None
all_inp, all_outp = [], []
for _, inarg, outarg, ret, _ in self.signatures:
outp = re.sub(r'\*.*', '', ret) + outarg
if inarg_num is None:
inarg_num = len(inarg)
outarg_num = len(outp)
inp, outp = list(iter_variants(inarg, outp))[0]
all_inp.append(inp)
all_outp.append(outp)
incodes = []
for n in range(inarg_num):
codes = unique([x[n] for x in all_inp])
codes.sort()
incodes.append(''.join(codes))
outcodes = []
for n in range(outarg_num):
codes = unique([x[n] for x in all_outp])
codes.sort()
outcodes.append(''.join(codes))
return tuple(incodes), tuple(outcodes)
def _get_types(self, codes):
all_types = []
fused_types = set()
for code in codes:
if len(code) == 1:
# It's not a fused type
all_types.append((CY_TYPES[code], code))
else:
# It's a fused type
fused_type, dec = generate_fused_type(code)
fused_types.add(dec)
all_types.append((fused_type, code))
return all_types, fused_types
def _get_vars(self):
invars = [f"x{n}" for n in range(len(self.intypes))]
outvars = [f"y{n}" for n in range(len(self.outtypes))]
return invars, outvars
def _get_conditional(self, types, codes, adverb):
"""Generate an if/elif/else clause that selects a specialization of
fused types.
"""
clauses = []
seen = set()
for (typ, typcode), code in zip(types, codes):
if len(typcode) == 1:
continue
if typ not in seen:
clauses.append(f"{typ} is {underscore(CY_TYPES[code])}")
seen.add(typ)
if clauses and adverb != "else":
line = "{} {}:".format(adverb, " and ".join(clauses))
elif clauses and adverb == "else":
line = "else:"
else:
line = None
return line
def _get_incallvars(self, intypes, c):
"""Generate pure input variables to a specialization,
i.e., variables that aren't used to return a value.
"""
incallvars = []
for n, intype in enumerate(intypes):
var = self.invars[n]
if c and intype == "double complex":
var = npy_cdouble_from_double_complex(var)
incallvars.append(var)
return incallvars
def _get_outcallvars(self, outtypes, c):
"""Generate output variables to a specialization,
i.e., pointers that are used to return values.
"""
outcallvars, tmpvars, casts = [], [], []
# If there are more out variables than out types, we want the
# tail of the out variables
start = len(self.outvars) - len(outtypes)
outvars = self.outvars[start:]
for n, (var, outtype) in enumerate(zip(outvars, outtypes)):
if c and outtype == "double complex":
tmp = f"tmp{n}"
tmpvars.append(tmp)
outcallvars.append(f"&{tmp}")
tmpcast = double_complex_from_npy_cdouble(tmp)
casts.append(f"{var}[0] = {tmpcast}")
else:
outcallvars.append(f"{var}")
return outcallvars, tmpvars, casts
def _get_nan_decs(self):
"""Set all variables to nan for specializations of fused types for
which don't have signatures.
"""
# Set non fused-type variables to nan
tab = " "*4
fused_types, lines = [], [tab + "else:"]
seen = set()
for outvar, outtype, code in zip(self.outvars, self.outtypes,
self.outcodes):
if len(code) == 1:
line = f"{outvar}[0] = {NAN_VALUE[code]}"
lines.append(2*tab + line)
else:
fused_type = outtype
name, _ = fused_type
if name not in seen:
fused_types.append(fused_type)
seen.add(name)
if not fused_types:
return lines
# Set fused-type variables to nan
all_codes = tuple([codes for _unused, codes in fused_types])
codelens = [len(x) for x in all_codes]
last = numpy.prod(codelens) - 1
for m, codes in enumerate(itertools.product(*all_codes)):
fused_codes, decs = [], []
for n, fused_type in enumerate(fused_types):
code = codes[n]
fused_codes.append(underscore(CY_TYPES[code]))
for nn, outvar in enumerate(self.outvars):
if self.outtypes[nn] == fused_type:
line = f"{outvar}[0] = {NAN_VALUE[code]}"
decs.append(line)
if m == 0:
adverb = "if"
elif m == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(fused_types, codes, adverb)
lines.append(2*tab + cond)
lines.extend([3*tab + x for x in decs])
return lines
def _get_tmp_decs(self, all_tmpvars):
"""Generate the declarations of any necessary temporary
variables.
"""
tab = " "*4
tmpvars = list(all_tmpvars)
tmpvars.sort()
tmpdecs = [tab + f"cdef npy_cdouble {tmpvar}"
for tmpvar in tmpvars]
return tmpdecs
def _get_python_wrap(self):
"""Generate a Python wrapper for functions which pass their
arguments as pointers.
"""
tab = " "*4
body, callvars = [], []
for (intype, _), invar in zip(self.intypes, self.invars):
callvars.append(f"{intype} {invar}")
line = "def _{}_pywrap({}):".format(self.name, ", ".join(callvars))
body.append(line)
for (outtype, _), outvar in zip(self.outtypes, self.outvars):
line = f"cdef {outtype} {outvar}"
body.append(tab + line)
addr_outvars = [f"&{x}" for x in self.outvars]
line = "{}({}, {})".format(self.name, ", ".join(self.invars),
", ".join(addr_outvars))
body.append(tab + line)
line = "return {}".format(", ".join(self.outvars))
body.append(tab + line)
body = "\n".join(body)
return body
def _get_common(self, signum, sig):
"""Generate code common to all the _generate_* methods."""
tab = " "*4
func_name, incodes, outcodes, retcode, header = sig
# Convert ints to longs; cf. iter_variants()
incodes = incodes.replace('i', 'l')
outcodes = outcodes.replace('i', 'l')
retcode = retcode.replace('i', 'l')
if header.endswith("h"):
c = True
else:
c = False
if header.endswith("++"):
cpp = True
else:
cpp = False
intypes = [CY_TYPES[x] for x in incodes]
outtypes = [CY_TYPES[x] for x in outcodes]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
rettype = CY_TYPES[retcode]
if cpp:
# Functions from _ufuncs_cxx are exported as a void*
# pointers; cast them to the correct types
func_name = f"scipy.special._ufuncs_cxx._export_{func_name}"
func_name = "(<{}(*)({}) noexcept nogil>{})"\
.format(rettype, ", ".join(intypes + outtypes), func_name)
else:
func_name = self.cython_func_name(func_name, specialized=True)
if signum == 0:
adverb = "if"
else:
adverb = "elif"
cond = self._get_conditional(self.intypes, incodes, adverb)
if cond:
lines = [tab + cond]
sp = 2*tab
else:
lines = []
sp = tab
return func_name, incodes, outcodes, retcode, \
intypes, outtypes, rettype, c, lines, sp
def _generate_from_return_and_no_outargs(self):
tab = " "*4
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
line = sp + f"return {call}"
body.append(line)
sig = f"{incodes}->{retcode}"
specs.append(sig)
if len(specs) > 1:
# Return nan for signatures without a specialization
body.append(tab + "else:")
outtype, outcodes = self.outtypes[0]
last = len(outcodes) - 1
if len(outcodes) == 1:
line = f"return {NAN_VALUE[outcodes]}"
body.append(2*tab + line)
else:
for n, code in enumerate(outcodes):
if n == 0:
adverb = "if"
elif n == last:
adverb = "else"
else:
adverb = "elif"
cond = self._get_conditional(self.outtypes, code, adverb)
body.append(2*tab + cond)
line = f"return {NAN_VALUE[code]}"
body.append(3*tab + line)
# Generate the head of the function
callvars, head = [], []
for n, (intype, _) in enumerate(self.intypes):
callvars.append(f"{intype} {self.invars[n]}")
(outtype, _) = self.outtypes[0]
dec = "cpdef {} {}({}) nogil".format(outtype, self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + f'"""{self.doc}"""')
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_no_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
body.append(sp + call)
body.extend([sp + x for x in casts])
if len(outcodes) == 1:
sig = f"{incodes}->{outcodes}"
specs.append(sig)
else:
sig = f"{incodes}*{outcodes}->v"
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
if len(self.outvars) == 1:
line = f"return {self.outvars[0]}[0]"
body.append(tab + line)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append(f"{intype} {invar}")
if len(self.outvars) > 1:
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append(f"{outtype} *{outvar}")
if len(self.outvars) == 1:
outtype, _ = self.outtypes[0]
dec = "cpdef {} {}({}) noexcept nogil".format(outtype, self.name, ", ".join(callvars))
else:
dec = "cdef void {}({}) noexcept nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + f'"""{self.doc}"""')
if len(self.outvars) == 1:
outvar = self.outvars[0]
outtype, _ = self.outtypes[0]
line = f"cdef {outtype} {outvar}"
head.append(tab + line)
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def _generate_from_outargs_and_return(self):
tab = " "*4
all_tmpvars = set()
specs, body = [], []
for signum, sig in enumerate(self.signatures):
func_name, incodes, outcodes, retcode, intypes, outtypes, \
rettype, c, lines, sp = self._get_common(signum, sig)
body.extend(lines)
# Generate the call to the specialized function
callvars = self._get_incallvars(intypes, c)
outcallvars, tmpvars, casts = self._get_outcallvars(outtypes, c)
callvars.extend(outcallvars)
all_tmpvars.update(tmpvars)
call = "{}({})".format(func_name, ", ".join(callvars))
if c and rettype == "double complex":
call = double_complex_from_npy_cdouble(call)
call = f"{self.outvars[0]}[0] = {call}"
body.append(sp + call)
body.extend([sp + x for x in casts])
sig = f"{incodes}*{outcodes + retcode}->v"
specs.append(sig)
if len(specs) > 1:
lines = self._get_nan_decs()
body.extend(lines)
# Generate the head of the function
callvars, head = [], []
for invar, (intype, _) in zip(self.invars, self.intypes):
callvars.append(f"{intype} {invar}")
for outvar, (outtype, _) in zip(self.outvars, self.outtypes):
callvars.append(f"{outtype} *{outvar}")
dec = "cdef void {}({}) noexcept nogil".format(self.name, ", ".join(callvars))
head.append(dec + ":")
head.append(tab + f'"""{self.doc}"""')
head.extend(self._get_tmp_decs(all_tmpvars))
src = "\n".join(head + body)
return dec, src, specs
def generate(self):
_, _, outcodes, retcode, _ = self.signatures[0]
retcode = re.sub(r'\*.*', '', retcode)
if not retcode:
retcode = 'v'
if len(outcodes) == 0 and retcode != 'v':
dec, src, specs = self._generate_from_return_and_no_outargs()
elif len(outcodes) > 0 and retcode == 'v':
dec, src, specs = self._generate_from_outargs_and_no_return()
elif len(outcodes) > 0 and retcode != 'v':
dec, src, specs = self._generate_from_outargs_and_return()
else:
raise ValueError("Invalid signature")
if len(self.outvars) > 1:
wrap = self._get_python_wrap()
else:
wrap = None
return dec, src, specs, self.fused_types, wrap
def get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename):
"""
Construct a Cython declaration of a function coming either from a
pxd or a header file. Do sufficient tricks to enable compile-time
type checking against the signature expected by the ufunc.
"""
defs = []
defs_h = []
var_name = c_name.replace('[', '_').replace(']', '_').replace(' ', '_')
if header.endswith('.pxd'):
defs.append("from .{} cimport {} as {}".format(
header[:-4], ufunc.cython_func_name(c_name, prefix=""),
ufunc.cython_func_name(c_name)))
# check function signature at compile time
proto_name = '_proto_%s_t' % var_name
defs.append("ctypedef %s" % (cy_proto.replace('(*)', proto_name)))
defs.append("cdef {} *{}_var = &{}".format(
proto_name, proto_name, ufunc.cython_func_name(c_name, specialized=True)))
else:
# redeclare the function, so that the assumed
# signature is checked at compile time
new_name = f"{ufunc.cython_func_name(c_name)} \"{c_name}\""
defs.append(f'cdef extern from r"{proto_h_filename}":')
defs.append(" cdef %s" % (cy_proto.replace('(*)', new_name)))
defs_h.append(f'#include "{header}"')
defs_h.append("%s;" % (c_proto.replace('(*)', c_name)))
return defs, defs_h, var_name
def generate_ufuncs(fn_prefix, cxx_fn_prefix, ufuncs):
filename = fn_prefix + ".pyx"
proto_h_filename = fn_prefix + '_defs.h'
cxx_proto_h_filename = cxx_fn_prefix + '_defs.h'
cxx_pyx_filename = cxx_fn_prefix + ".pyx"
cxx_pxd_filename = cxx_fn_prefix + ".pxd"
toplevel = ""
# for _ufuncs*
defs = []
defs_h = []
all_loops = {}
# for _ufuncs_cxx*
cxx_defs = []
cxx_pxd_defs = [
"from . cimport sf_error",
"cdef void _set_action(sf_error.sf_error_t, sf_error.sf_action_t) noexcept nogil"
]
cxx_defs_h = []
ufuncs.sort(key=lambda u: u.name)
for ufunc in ufuncs:
# generate function declaration and type checking snippets
cfuncs = ufunc.get_prototypes()
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
header = header[:-2]
# for the CXX module
item_defs, item_defs_h, var_name = get_declaration(ufunc, c_name, c_proto, cy_proto,
header, cxx_proto_h_filename)
cxx_defs.extend(item_defs)
cxx_defs_h.extend(item_defs_h)
cxx_defs.append("cdef void *_export_{} = <void*>{}".format(
var_name, ufunc.cython_func_name(c_name, specialized=True, override=False)))
cxx_pxd_defs.append(f"cdef void *_export_{var_name}")
# let cython grab the function pointer from the c++ shared library
ufunc.function_name_overrides[c_name] = "scipy.special._ufuncs_cxx._export_" + var_name
else:
# usual case
item_defs, item_defs_h, _ = get_declaration(ufunc, c_name, c_proto, cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
defs_h.extend(item_defs_h)
# ufunc creation code snippet
t = ufunc.generate(all_loops)
toplevel += t + "\n"
# Produce output
toplevel = "\n".join(sorted(all_loops.values()) + defs + [toplevel])
# Generate an `__all__` for the module
all_ufuncs = (
[
f"'{ufunc.name}'"
for ufunc in ufuncs if not ufunc.name.startswith('_')
]
+ ["'geterr'", "'seterr'", "'errstate'", "'jn'"]
)
module_all = '__all__ = [{}]'.format(', '.join(all_ufuncs))
with open(filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write(UFUNCS_EXTRA_CODE)
f.write(module_all)
f.write("\n")
f.write(toplevel)
f.write(UFUNCS_EXTRA_CODE_BOTTOM)
defs_h = unique(defs_h)
with open(proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(defs_h))
f.write("\n#endif\n")
cxx_defs_h = unique(cxx_defs_h)
with open(cxx_proto_h_filename, 'w') as f:
f.write("#ifndef UFUNCS_PROTO_H\n#define UFUNCS_PROTO_H 1\n")
f.write("\n".join(cxx_defs_h))
f.write("\n#endif\n")
with open(cxx_pyx_filename, 'w') as f:
f.write(UFUNCS_EXTRA_CODE_COMMON)
f.write("\n")
f.write("\n".join(cxx_defs))
f.write("\n# distutils: language = c++\n")
with open(cxx_pxd_filename, 'w') as f:
f.write("\n".join(cxx_pxd_defs))
def generate_fused_funcs(modname, ufunc_fn_prefix, fused_funcs):
pxdfile = modname + ".pxd"
pyxfile = modname + ".pyx"
proto_h_filename = ufunc_fn_prefix + '_defs.h'
sources = []
declarations = []
# Code for benchmarks
bench_aux = []
fused_types = set()
# Parameters for the tests
doc = []
defs = []
for func in fused_funcs:
if func.name.startswith("_"):
# Don't try to deal with functions that have extra layers
# of wrappers.
continue
# Get the function declaration for the .pxd and the source
# code for the .pyx
dec, src, specs, func_fused_types, wrap = func.generate()
declarations.append(dec)
sources.append(src)
if wrap:
sources.append(wrap)
fused_types.update(func_fused_types)
# Declare the specializations
cfuncs = func.get_prototypes(nptypes_for_h=True)
for c_name, c_proto, cy_proto, header in cfuncs:
if header.endswith('++'):
# We grab the c++ functions from the c++ module
continue
item_defs, _, _ = get_declaration(func, c_name, c_proto,
cy_proto, header,
proto_h_filename)
defs.extend(item_defs)
# Add a line to the documentation
doc.append(generate_doc(func.name, specs))
# Generate code for benchmarks
if func.name in CYTHON_SPECIAL_BENCHFUNCS:
for codes in CYTHON_SPECIAL_BENCHFUNCS[func.name]:
pybench, cybench = generate_bench(func.name, codes)
bench_aux.extend([pybench, cybench])
fused_types = list(fused_types)
fused_types.sort()
with open(pxdfile, 'w') as f:
f.write(CYTHON_SPECIAL_PXD)
f.write("\n")
f.write("\n\n".join(fused_types))
f.write("\n\n")
f.write("\n".join(declarations))
with open(pyxfile, 'w') as f:
header = CYTHON_SPECIAL_PYX
header = header.replace("FUNCLIST", "\n".join(doc))
f.write(header)
f.write("\n")
f.write("\n".join(defs))
f.write("\n\n")
f.write("\n\n".join(sources))
f.write("\n\n")
f.write("\n\n".join(bench_aux))
def generate_ufuncs_type_stubs(module_name: str, ufuncs: List[Ufunc]):
stubs, module_all = [], []
for ufunc in ufuncs:
stubs.append(f'{ufunc.name}: np.ufunc')
if not ufunc.name.startswith('_'):
module_all.append(f"'{ufunc.name}'")
# jn is an alias for jv.
module_all.append("'jn'")
stubs.append('jn: np.ufunc')
module_all.sort()
stubs.sort()
contents = STUBS.format(
ALL=',\n '.join(module_all),
STUBS='\n'.join(stubs),
)
stubs_file = f'{module_name}.pyi'
with open(stubs_file, 'w') as f:
f.write(contents)
def unique(lst):
"""
Return a list without repeated entries (first occurrence is kept),
preserving order.
"""
seen = set()
new_lst = []
for item in lst:
if item in seen:
continue
seen.add(item)
new_lst.append(item)
return new_lst
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def all_newer(src_files, dst_files):
return all(os.path.exists(dst) and newer(dst, src)
for dst in dst_files for src in src_files)
def main(outdir):
pwd = os.path.dirname(__file__)
src_files = (os.path.abspath(__file__),
os.path.abspath(os.path.join(pwd, 'functions.json')),
os.path.abspath(os.path.join(pwd, '_add_newdocs.py')))
dst_files = ('_ufuncs.pyx',
'_ufuncs_defs.h',
'_ufuncs_cxx.pyx',
'_ufuncs_cxx.pxd',
'_ufuncs_cxx_defs.h',
'_ufuncs.pyi',
'cython_special.pyx',
'cython_special.pxd')
dst_files = (os.path.join(outdir, f) for f in dst_files)
os.chdir(BASE_DIR)
if all_newer(src_files, dst_files):
print("scipy/special/_generate_pyx.py: all files up-to-date")
return
ufuncs, fused_funcs = [], []
with open('functions.json') as data:
functions = json.load(data)
for f, sig in functions.items():
ufuncs.append(Ufunc(f, sig))
fused_funcs.append(FusedFunc(f, sig))
generate_ufuncs(os.path.join(outdir, "_ufuncs"),
os.path.join(outdir, "_ufuncs_cxx"),
ufuncs)
generate_ufuncs_type_stubs(os.path.join(outdir, "_ufuncs"),
ufuncs)
generate_fused_funcs(os.path.join(outdir, "cython_special"),
os.path.join(outdir, "_ufuncs"),
fused_funcs)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--outdir", type=str,
help="Path to the output directory")
args = parser.parse_args()
if not args.outdir:
#raise ValueError(f"Missing `--outdir` argument to _generate_pyx.py")
# We're dealing with a distutils build here, write in-place:
outdir_abs = os.path.abspath(os.path.dirname(__file__))
else:
outdir_abs = os.path.join(os.getcwd(), args.outdir)
main(outdir_abs)
| 52,068
| 33.255921
| 114
|
py
|
scipy
|
scipy-main/scipy/special/specfun.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
import warnings
from . import _specfun # type: ignore
__all__ = [ # noqa: F822
'airyzo',
'bernob',
'cerzo',
'clpmn',
'clpn',
'clqmn',
'clqn',
'cpbdn',
'cyzo',
'eulerb',
'fcoef',
'fcszo',
'jdzo',
'jyzo',
'klvnzo',
'lamn',
'lamv',
'lpmn',
'lpn',
'lqmn',
'lqnb',
'pbdv',
'rctj',
'rcty',
'segv'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.specfun is deprecated and has no attribute "
f"{name}. Try looking in scipy.special instead.")
warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
"the `scipy.special.specfun` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_specfun, name)
| 1,059
| 19.384615
| 77
|
py
|
scipy
|
scipy-main/scipy/special/_mptestutils.py
|
import os
import sys
import time
from itertools import zip_longest
import numpy as np
from numpy.testing import assert_
import pytest
from scipy.special._testutils import assert_func_equal
try:
import mpmath
except ImportError:
pass
# ------------------------------------------------------------------------------
# Machinery for systematic tests with mpmath
# ------------------------------------------------------------------------------
class Arg:
"""Generate a set of numbers on the real axis, concentrating on
'interesting' regions and covering all orders of magnitude.
"""
def __init__(self, a=-np.inf, b=np.inf, inclusive_a=True, inclusive_b=True):
if a > b:
raise ValueError("a should be less than or equal to b")
if a == -np.inf:
a = -0.5*np.finfo(float).max
if b == np.inf:
b = 0.5*np.finfo(float).max
self.a, self.b = a, b
self.inclusive_a, self.inclusive_b = inclusive_a, inclusive_b
def _positive_values(self, a, b, n):
if a < 0:
raise ValueError("a should be positive")
# Try to put half of the points into a linspace between a and
# 10 the other half in a logspace.
if n % 2 == 0:
nlogpts = n//2
nlinpts = nlogpts
else:
nlogpts = n//2
nlinpts = nlogpts + 1
if a >= 10:
# Outside of linspace range; just return a logspace.
pts = np.logspace(np.log10(a), np.log10(b), n)
elif a > 0 and b < 10:
# Outside of logspace range; just return a linspace
pts = np.linspace(a, b, n)
elif a > 0:
# Linspace between a and 10 and a logspace between 10 and
# b.
linpts = np.linspace(a, 10, nlinpts, endpoint=False)
logpts = np.logspace(1, np.log10(b), nlogpts)
pts = np.hstack((linpts, logpts))
elif a == 0 and b <= 10:
# Linspace between 0 and b and a logspace between 0 and
# the smallest positive point of the linspace
linpts = np.linspace(0, b, nlinpts)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts = np.logspace(-30, right, nlogpts, endpoint=False)
pts = np.hstack((logpts, linpts))
else:
# Linspace between 0 and 10, logspace between 0 and the
# smallest positive point of the linspace, and a logspace
# between 10 and b.
if nlogpts % 2 == 0:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1
else:
nlogpts1 = nlogpts//2
nlogpts2 = nlogpts1 + 1
linpts = np.linspace(0, 10, nlinpts, endpoint=False)
if linpts.size > 1:
right = np.log10(linpts[1])
else:
right = -30
logpts1 = np.logspace(-30, right, nlogpts1, endpoint=False)
logpts2 = np.logspace(1, np.log10(b), nlogpts2)
pts = np.hstack((logpts1, linpts, logpts2))
return np.sort(pts)
def values(self, n):
"""Return an array containing n numbers."""
a, b = self.a, self.b
if a == b:
return np.zeros(n)
if not self.inclusive_a:
n += 1
if not self.inclusive_b:
n += 1
if n % 2 == 0:
n1 = n//2
n2 = n1
else:
n1 = n//2
n2 = n1 + 1
if a >= 0:
pospts = self._positive_values(a, b, n)
negpts = []
elif b <= 0:
pospts = []
negpts = -self._positive_values(-b, -a, n)
else:
pospts = self._positive_values(0, b, n1)
negpts = -self._positive_values(0, -a, n2 + 1)
# Don't want to get zero twice
negpts = negpts[1:]
pts = np.hstack((negpts[::-1], pospts))
if not self.inclusive_a:
pts = pts[1:]
if not self.inclusive_b:
pts = pts[:-1]
return pts
class FixedArg:
def __init__(self, values):
self._values = np.asarray(values)
def values(self, n):
return self._values
class ComplexArg:
def __init__(self, a=complex(-np.inf, -np.inf), b=complex(np.inf, np.inf)):
self.real = Arg(a.real, b.real)
self.imag = Arg(a.imag, b.imag)
def values(self, n):
m = int(np.floor(np.sqrt(n)))
x = self.real.values(m)
y = self.imag.values(m + 1)
return (x[:,None] + 1j*y[None,:]).ravel()
class IntArg:
def __init__(self, a=-1000, b=1000):
self.a = a
self.b = b
def values(self, n):
v1 = Arg(self.a, self.b).values(max(1 + n//2, n-5)).astype(int)
v2 = np.arange(-5, 5)
v = np.unique(np.r_[v1, v2])
v = v[(v >= self.a) & (v < self.b)]
return v
def get_args(argspec, n):
if isinstance(argspec, np.ndarray):
args = argspec.copy()
else:
nargs = len(argspec)
ms = np.asarray([1.5 if isinstance(spec, ComplexArg) else 1.0 for spec in argspec])
ms = (n**(ms/sum(ms))).astype(int) + 1
args = [spec.values(m) for spec, m in zip(argspec, ms)]
args = np.array(np.broadcast_arrays(*np.ix_(*args))).reshape(nargs, -1).T
return args
class MpmathData:
def __init__(self, scipy_func, mpmath_func, arg_spec, name=None,
dps=None, prec=None, n=None, rtol=1e-7, atol=1e-300,
ignore_inf_sign=False, distinguish_nan_and_inf=True,
nan_ok=True, param_filter=None):
# mpmath tests are really slow (see gh-6989). Use a small number of
# points by default, increase back to 5000 (old default) if XSLOW is
# set
if n is None:
try:
is_xslow = int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
is_xslow = False
n = 5000 if is_xslow else 500
self.scipy_func = scipy_func
self.mpmath_func = mpmath_func
self.arg_spec = arg_spec
self.dps = dps
self.prec = prec
self.n = n
self.rtol = rtol
self.atol = atol
self.ignore_inf_sign = ignore_inf_sign
self.nan_ok = nan_ok
if isinstance(self.arg_spec, np.ndarray):
self.is_complex = np.issubdtype(self.arg_spec.dtype, np.complexfloating)
else:
self.is_complex = any([isinstance(arg, ComplexArg) for arg in self.arg_spec])
self.ignore_inf_sign = ignore_inf_sign
self.distinguish_nan_and_inf = distinguish_nan_and_inf
if not name or name == '<lambda>':
name = getattr(scipy_func, '__name__', None)
if not name or name == '<lambda>':
name = getattr(mpmath_func, '__name__', None)
self.name = name
self.param_filter = param_filter
def check(self):
np.random.seed(1234)
# Generate values for the arguments
argarr = get_args(self.arg_spec, self.n)
# Check
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
if self.dps is not None:
dps_list = [self.dps]
else:
dps_list = [20]
if self.prec is not None:
mpmath.mp.prec = self.prec
# Proper casting of mpmath input and output types. Using
# native mpmath types as inputs gives improved precision
# in some cases.
if np.issubdtype(argarr.dtype, np.complexfloating):
pytype = mpc2complex
def mptype(x):
return mpmath.mpc(complex(x))
else:
def mptype(x):
return mpmath.mpf(float(x))
def pytype(x):
if abs(x.imag) > 1e-16*(1 + abs(x.real)):
return np.nan
else:
return mpf2float(x.real)
# Try out different dps until one (or none) works
for j, dps in enumerate(dps_list):
mpmath.mp.dps = dps
try:
assert_func_equal(self.scipy_func,
lambda *a: pytype(self.mpmath_func(*map(mptype, a))),
argarr,
vectorized=False,
rtol=self.rtol, atol=self.atol,
ignore_inf_sign=self.ignore_inf_sign,
distinguish_nan_and_inf=self.distinguish_nan_and_inf,
nan_ok=self.nan_ok,
param_filter=self.param_filter)
break
except AssertionError:
if j >= len(dps_list)-1:
# reraise the Exception
tp, value, tb = sys.exc_info()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
def __repr__(self):
if self.is_complex:
return f"<MpmathData: {self.name} (complex)>"
else:
return f"<MpmathData: {self.name}>"
def assert_mpmath_equal(*a, **kw):
d = MpmathData(*a, **kw)
d.check()
def nonfunctional_tooslow(func):
return pytest.mark.skip(reason=" Test not yet functional (too slow), needs more work.")(func)
# ------------------------------------------------------------------------------
# Tools for dealing with mpmath quirks
# ------------------------------------------------------------------------------
def mpf2float(x):
"""
Convert an mpf to the nearest floating point number. Just using
float directly doesn't work because of results like this:
with mp.workdps(50):
float(mpf("0.99999999999999999")) = 0.9999999999999999
"""
return float(mpmath.nstr(x, 17, min_fixed=0, max_fixed=0))
def mpc2complex(x):
return complex(mpf2float(x.real), mpf2float(x.imag))
def trace_args(func):
def tofloat(x):
if isinstance(x, mpmath.mpc):
return complex(x)
else:
return float(x)
def wrap(*a, **kw):
sys.stderr.write(f"{tuple(map(tofloat, a))!r}: ")
sys.stderr.flush()
try:
r = func(*a, **kw)
sys.stderr.write("-> %r" % r)
finally:
sys.stderr.write("\n")
sys.stderr.flush()
return r
return wrap
try:
import signal
POSIX = ('setitimer' in dir(signal))
except ImportError:
POSIX = False
class TimeoutError(Exception):
pass
def time_limited(timeout=0.5, return_val=np.nan, use_sigalrm=True):
"""
Decorator for setting a timeout for pure-Python functions.
If the function does not return within `timeout` seconds, the
value `return_val` is returned instead.
On POSIX this uses SIGALRM by default. On non-POSIX, settrace is
used. Do not use this with threads: the SIGALRM implementation
does probably not work well. The settrace implementation only
traces the current thread.
The settrace implementation slows down execution speed. Slowdown
by a factor around 10 is probably typical.
"""
if POSIX and use_sigalrm:
def sigalrm_handler(signum, frame):
raise TimeoutError()
def deco(func):
def wrap(*a, **kw):
old_handler = signal.signal(signal.SIGALRM, sigalrm_handler)
signal.setitimer(signal.ITIMER_REAL, timeout)
try:
return func(*a, **kw)
except TimeoutError:
return return_val
finally:
signal.setitimer(signal.ITIMER_REAL, 0)
signal.signal(signal.SIGALRM, old_handler)
return wrap
else:
def deco(func):
def wrap(*a, **kw):
start_time = time.time()
def trace(frame, event, arg):
if time.time() - start_time > timeout:
raise TimeoutError()
return trace
sys.settrace(trace)
try:
return func(*a, **kw)
except TimeoutError:
sys.settrace(None)
return return_val
finally:
sys.settrace(None)
return wrap
return deco
def exception_to_nan(func):
"""Decorate function to return nan if it raises an exception"""
def wrap(*a, **kw):
try:
return func(*a, **kw)
except Exception:
return np.nan
return wrap
def inf_to_nan(func):
"""Decorate function to return nan if it returns inf"""
def wrap(*a, **kw):
v = func(*a, **kw)
if not np.isfinite(v):
return np.nan
return v
return wrap
def mp_assert_allclose(res, std, atol=0, rtol=1e-17):
"""
Compare lists of mpmath.mpf's or mpmath.mpc's directly so that it
can be done to higher precision than double.
"""
failures = []
for k, (resval, stdval) in enumerate(zip_longest(res, std)):
if resval is None or stdval is None:
raise ValueError('Lengths of inputs res and std are not equal.')
if mpmath.fabs(resval - stdval) > atol + rtol*mpmath.fabs(stdval):
failures.append((k, resval, stdval))
nfail = len(failures)
if nfail > 0:
ndigits = int(abs(np.log10(rtol)))
msg = [""]
msg.append("Bad results ({} out of {}) for the following points:"
.format(nfail, k + 1))
for k, resval, stdval in failures:
resrep = mpmath.nstr(resval, ndigits, min_fixed=0, max_fixed=0)
stdrep = mpmath.nstr(stdval, ndigits, min_fixed=0, max_fixed=0)
if stdval == 0:
rdiff = "inf"
else:
rdiff = mpmath.fabs((resval - stdval)/stdval)
rdiff = mpmath.nstr(rdiff, 3)
msg.append("{}: {} != {} (rdiff {})".format(k, resrep, stdrep,
rdiff))
assert_(False, "\n".join(msg))
| 14,517
| 31.478747
| 100
|
py
|
scipy
|
scipy-main/scipy/special/setup.py
|
import os
import sys
from os.path import join, dirname
from distutils.sysconfig import get_python_inc
import subprocess
import numpy
from numpy.distutils.misc_util import get_numpy_include_dirs, get_info
from scipy._build_utils.compiler_helper import (set_c_flags_hook,
set_cxx_flags_hook)
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info as get_system_info
from scipy._build_utils import combine_dict, uses_blas64
config = Configuration('special', parent_package, top_path)
if uses_blas64():
lapack_opt = get_system_info('lapack_ilp64_opt')
else:
lapack_opt = get_system_info('lapack_opt')
define_macros = []
if sys.platform == 'win32':
# define_macros.append(('NOINFINITIES',None))
# define_macros.append(('NONANS',None))
define_macros.append(('_USE_MATH_DEFINES',None))
curdir = os.path.abspath(os.path.dirname(__file__))
python_inc_dirs = get_python_inc()
plat_specific_python_inc_dirs = get_python_inc(plat_specific=1)
inc_dirs = [get_numpy_include_dirs(), python_inc_dirs]
if python_inc_dirs != plat_specific_python_inc_dirs:
inc_dirs.append(plat_specific_python_inc_dirs)
inc_dirs.append(join(dirname(dirname(__file__)), '_lib'))
inc_dirs.append(join(dirname(dirname(__file__)), '_lib', 'boost_math',
'include'))
inc_dirs.append(join(dirname(dirname(__file__)), '_build_utils', 'src'))
# C libraries
cephes_src = [join('cephes','*.c')]
cephes_hdr = [join('cephes', '*.h')]
config.add_library('sc_cephes',sources=cephes_src,
include_dirs=[curdir] + inc_dirs,
depends=(cephes_hdr + ['*.h']),
macros=define_macros)
# Fortran/C++ libraries
mach_src = [join('mach','*.f')]
amos_src = [join('amos','*.f')]
cdf_src = [join('cdflib','*.f')]
specfun_src = [join('specfun','*.f')]
config.add_library('sc_mach',sources=mach_src,
config_fc={'noopt':(__file__,1)})
config.add_library('sc_amos',sources=amos_src)
config.add_library('sc_cdf',sources=cdf_src)
config.add_library('sc_specfun',sources=specfun_src)
# Extension specfun
config.add_extension('_specfun',
sources=['specfun.pyf'],
f2py_options=['--no-wrap-functions'],
depends=specfun_src,
define_macros=[],
libraries=['sc_specfun'])
# Extension _ufuncs
headers = ['*.h', join('cephes', '*.h')]
ufuncs_src = ['_ufuncs.c', 'sf_error.c',
'amos_wrappers.c', 'cdf_wrappers.c', 'specfun_wrappers.c',
'_cosine.c', 'scaled_exp1.c']
ufuncs_dep = (
headers
+ ufuncs_src
+ amos_src
+ cephes_src
+ mach_src
+ cdf_src
+ specfun_src
)
cfg = combine_dict(lapack_opt,
include_dirs=[curdir] + inc_dirs + [numpy.get_include()],
libraries=['sc_amos', 'sc_cephes', 'sc_mach',
'sc_cdf', 'sc_specfun'],
define_macros=define_macros)
_ufuncs = config.add_extension('_ufuncs',
depends=ufuncs_dep,
sources=ufuncs_src,
extra_info=get_info("npymath"),
**cfg)
_ufuncs._pre_build_hook = set_c_flags_hook
# Extension _ufuncs_cxx
ufuncs_cxx_src = ['_ufuncs_cxx.cxx', 'sf_error.cc',
'ellint_carlson_wrap.cxx',
'_faddeeva.cxx', 'Faddeeva.cc',
'_wright.cxx', 'wright.cc']
ufuncs_cxx_dep = (headers + ufuncs_cxx_src + cephes_src
+ ['*.hh', join('ellint_carlson_cpp_lite', '*.hh')])
ufuncs_cxx_ext = config.add_extension('_ufuncs_cxx',
sources=ufuncs_cxx_src,
depends=ufuncs_cxx_dep,
include_dirs=[curdir] + inc_dirs,
define_macros=define_macros,
extra_info=get_info("npymath"))
ufuncs_cxx_ext._pre_build_hook = set_cxx_flags_hook
cfg = combine_dict(lapack_opt, include_dirs=inc_dirs)
config.add_extension('_ellip_harm_2',
sources=['_ellip_harm_2.c', 'sf_error.c',],
**cfg)
# Cython API
config.add_data_files('cython_special.pxd')
cython_special_src = ['cython_special.c', 'sf_error.c',
'amos_wrappers.c', 'cdf_wrappers.c',
'specfun_wrappers.c', '_cosine.c']
cython_special_dep = (
headers
+ ufuncs_src
+ ufuncs_cxx_src
+ amos_src
+ cephes_src
+ mach_src
+ cdf_src
+ specfun_src
)
cfg = combine_dict(lapack_opt,
include_dirs=[curdir] + inc_dirs + [numpy.get_include()],
libraries=['sc_amos', 'sc_cephes', 'sc_mach',
'sc_cdf', 'sc_specfun'],
define_macros=define_macros)
cython_special = config.add_extension('cython_special',
depends=cython_special_dep,
sources=cython_special_src,
extra_info=get_info("npymath"),
**cfg)
cython_special._pre_build_hook = set_c_flags_hook
# combinatorics
config.add_extension('_comb',
sources=['_comb.c'])
# testing for _round.h and cephes/dd_real.c functions
config.add_extension('_test_internal',
sources=['_test_internal.c', 'cephes/dd_real.c'],
depends=['_round.h', 'cephes/dd_idefs.h',
'cephes/dd_real.h'],
include_dirs=[numpy.get_include()] + inc_dirs,
extra_info=get_info('npymath'))
config.add_data_files('tests/*.py')
config.add_data_files('tests/data/README')
# regenerate npz data files
makenpz = os.path.join(os.path.dirname(__file__),
'utils', 'makenpz.py')
data_dir = os.path.join(os.path.dirname(__file__),
'tests', 'data')
for name in ['boost', 'gsl', 'local']:
subprocess.check_call([sys.executable, makenpz,
'--use-timestamp',
os.path.join(data_dir, name)])
config.add_data_files('tests/data/*.npz')
config.add_subpackage('_precompute')
# Type stubs
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 7,177
| 38.43956
| 80
|
py
|
scipy
|
scipy-main/scipy/special/_spfun_stats.py
|
# Last Change: Sat Mar 21 02:00 PM 2009 J
# Copyright (c) 2001, 2002 Enthought, Inc.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# a. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# b. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# c. Neither the name of the Enthought nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
"""Some more special functions which may be useful for multivariate statistical
analysis."""
import numpy as np
from scipy.special import gammaln as loggam
__all__ = ['multigammaln']
def multigammaln(a, d):
r"""Returns the log of multivariate gamma, also sometimes called the
generalized gamma.
Parameters
----------
a : ndarray
The multivariate gamma is computed for each item of `a`.
d : int
The dimension of the space of integration.
Returns
-------
res : ndarray
The values of the log multivariate gamma at the given points `a`.
Notes
-----
The formal definition of the multivariate gamma of dimension d for a real
`a` is
.. math::
\Gamma_d(a) = \int_{A>0} e^{-tr(A)} |A|^{a - (d+1)/2} dA
with the condition :math:`a > (d-1)/2`, and :math:`A > 0` being the set of
all the positive definite matrices of dimension `d`. Note that `a` is a
scalar: the integrand only is multivariate, the argument is not (the
function is defined over a subset of the real set).
This can be proven to be equal to the much friendlier equation
.. math::
\Gamma_d(a) = \pi^{d(d-1)/4} \prod_{i=1}^{d} \Gamma(a - (i-1)/2).
References
----------
R. J. Muirhead, Aspects of multivariate statistical theory (Wiley Series in
probability and mathematical statistics).
Examples
--------
>>> import numpy as np
>>> from scipy.special import multigammaln, gammaln
>>> a = 23.5
>>> d = 10
>>> multigammaln(a, d)
454.1488605074416
Verify that the result agrees with the logarithm of the equation
shown above:
>>> d*(d-1)/4*np.log(np.pi) + gammaln(a - 0.5*np.arange(0, d)).sum()
454.1488605074416
"""
a = np.asarray(a)
if not np.isscalar(d) or (np.floor(d) != d):
raise ValueError("d should be a positive integer (dimension)")
if np.any(a <= 0.5 * (d - 1)):
raise ValueError("condition a (%f) > 0.5 * (d-1) (%f) not met"
% (a, 0.5 * (d-1)))
res = (d * (d-1) * 0.25) * np.log(np.pi)
res += np.sum(loggam([(a - (j - 1.)/2) for j in range(1, d+1)]), axis=0)
return res
| 3,806
| 34.25
| 79
|
py
|
scipy
|
scipy-main/scipy/special/spfun_stats.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
import warnings
from . import _spfun_stats
__all__ = ['multigammaln', 'loggam'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.spfun_stats is deprecated and has no attribute "
f"{name}. Try looking in scipy.special instead.")
warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
"the `scipy.special.spfun_stats` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_spfun_stats, name)
| 770
| 28.653846
| 77
|
py
|
scipy
|
scipy-main/scipy/special/_basic.py
|
#
# Author: Travis Oliphant, 2002
#
import operator
import numpy as np
import math
import warnings
from numpy import (pi, asarray, floor, isscalar, iscomplex, real,
imag, sqrt, where, mgrid, sin, place, issubdtype,
extract, inexact, nan, zeros, sinc)
from . import _ufuncs
from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
psi, hankel1, hankel2, yv, kv, poch, binom)
from . import _specfun
from ._comb import _comb_int
from scipy._lib.deprecation import _NoValue
__all__ = [
'ai_zeros',
'assoc_laguerre',
'bei_zeros',
'beip_zeros',
'ber_zeros',
'bernoulli',
'berp_zeros',
'bi_zeros',
'clpmn',
'comb',
'digamma',
'diric',
'erf_zeros',
'euler',
'factorial',
'factorial2',
'factorialk',
'fresnel_zeros',
'fresnelc_zeros',
'fresnels_zeros',
'h1vp',
'h2vp',
'ivp',
'jn_zeros',
'jnjnp_zeros',
'jnp_zeros',
'jnyn_zeros',
'jvp',
'kei_zeros',
'keip_zeros',
'kelvin_zeros',
'ker_zeros',
'kerp_zeros',
'kvp',
'lmbda',
'lpmn',
'lpn',
'lqmn',
'lqn',
'mathieu_even_coef',
'mathieu_odd_coef',
'obl_cv_seq',
'pbdn_seq',
'pbdv_seq',
'pbvv_seq',
'perm',
'polygamma',
'pro_cv_seq',
'riccati_jn',
'riccati_yn',
'sinc',
'y0_zeros',
'y1_zeros',
'y1p_zeros',
'yn_zeros',
'ynp_zeros',
'yvp',
'zeta'
]
# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int64).max
_FACTORIALK_LIMITS_64BITS = {1: 20, 2: 33, 3: 44, 4: 54, 5: 65,
6: 74, 7: 84, 8: 93, 9: 101}
# mapping k to last n such that factorialk(n, k) < np.iinfo(np.int32).max
_FACTORIALK_LIMITS_32BITS = {1: 12, 2: 19, 3: 25, 4: 31, 5: 37,
6: 43, 7: 47, 8: 51, 9: 56}
def _nonneg_int_or_fail(n, var_name, strict=True):
try:
if strict:
# Raises an exception if float
n = operator.index(n)
elif n == floor(n):
n = int(n)
else:
raise ValueError()
if n < 0:
raise ValueError()
except (ValueError, TypeError) as err:
raise err.__class__(f"{var_name} must be a non-negative integer") from err
return n
def diric(x, n):
"""Periodic sinc function, also called the Dirichlet function.
The Dirichlet function is defined as::
diric(x, n) = sin(x * n/2) / (n * sin(x / 2)),
where `n` is a positive integer.
Parameters
----------
x : array_like
Input data
n : int
Integer defining the periodicity.
Returns
-------
diric : ndarray
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-8*np.pi, 8*np.pi, num=201)
>>> plt.figure(figsize=(8, 8));
>>> for idx, n in enumerate([2, 3, 4, 9]):
... plt.subplot(2, 2, idx+1)
... plt.plot(x, special.diric(x, n))
... plt.title('diric, n={}'.format(n))
>>> plt.show()
The following example demonstrates that `diric` gives the magnitudes
(modulo the sign and scaling) of the Fourier coefficients of a
rectangular pulse.
Suppress output of values that are effectively 0:
>>> np.set_printoptions(suppress=True)
Create a signal `x` of length `m` with `k` ones:
>>> m = 8
>>> k = 3
>>> x = np.zeros(m)
>>> x[:k] = 1
Use the FFT to compute the Fourier transform of `x`, and
inspect the magnitudes of the coefficients:
>>> np.abs(np.fft.fft(x))
array([ 3. , 2.41421356, 1. , 0.41421356, 1. ,
0.41421356, 1. , 2.41421356])
Now find the same values (up to sign) using `diric`. We multiply
by `k` to account for the different scaling conventions of
`numpy.fft.fft` and `diric`:
>>> theta = np.linspace(0, 2*np.pi, m, endpoint=False)
>>> k * special.diric(theta, k)
array([ 3. , 2.41421356, 1. , -0.41421356, -1. ,
-0.41421356, 1. , 2.41421356])
"""
x, n = asarray(x), asarray(n)
n = asarray(n + (x-x))
x = asarray(x + (n-n))
if issubdtype(x.dtype, inexact):
ytype = x.dtype
else:
ytype = float
y = zeros(x.shape, ytype)
# empirical minval for 32, 64 or 128 bit float computations
# where sin(x/2) < minval, result is fixed at +1 or -1
if np.finfo(ytype).eps < 1e-18:
minval = 1e-11
elif np.finfo(ytype).eps < 1e-15:
minval = 1e-7
else:
minval = 1e-3
mask1 = (n <= 0) | (n != floor(n))
place(y, mask1, nan)
x = x / 2
denom = sin(x)
mask2 = (1-mask1) & (abs(denom) < minval)
xsub = extract(mask2, x)
nsub = extract(mask2, n)
zsub = xsub / pi
place(y, mask2, pow(-1, np.round(zsub)*(nsub-1)))
mask = (1-mask1) & (1-mask2)
xsub = extract(mask, x)
nsub = extract(mask, n)
dsub = extract(mask, denom)
place(y, mask, sin(nsub*xsub)/(nsub*dsub))
return y
def jnjnp_zeros(nt):
"""Compute zeros of integer-order Bessel functions Jn and Jn'.
Results are arranged in order of the magnitudes of the zeros.
Parameters
----------
nt : int
Number (<=1200) of zeros to compute
Returns
-------
zo[l-1] : ndarray
Value of the lth zero of Jn(x) and Jn'(x). Of length `nt`.
n[l-1] : ndarray
Order of the Jn(x) or Jn'(x) associated with lth zero. Of length `nt`.
m[l-1] : ndarray
Serial number of the zeros of Jn(x) or Jn'(x) associated
with lth zero. Of length `nt`.
t[l-1] : ndarray
0 if lth zero in zo is zero of Jn(x), 1 if it is a zero of Jn'(x). Of
length `nt`.
See Also
--------
jn_zeros, jnp_zeros : to get separated arrays of zeros.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt > 1200):
raise ValueError("Number must be integer <= 1200.")
nt = int(nt)
n, m, t, zo = _specfun.jdzo(nt)
return zo[1:nt+1], n[:nt], m[:nt], t[:nt]
def jnyn_zeros(n, nt):
"""Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
Returns 4 arrays of length `nt`, corresponding to the first `nt`
zeros of Jn(x), Jn'(x), Yn(x), and Yn'(x), respectively. The zeros
are returned in ascending order.
Parameters
----------
n : int
Order of the Bessel functions
nt : int
Number (<=1200) of zeros to compute
Returns
-------
Jn : ndarray
First `nt` zeros of Jn
Jnp : ndarray
First `nt` zeros of Jn'
Yn : ndarray
First `nt` zeros of Yn
Ynp : ndarray
First `nt` zeros of Yn'
See Also
--------
jn_zeros, jnp_zeros, yn_zeros, ynp_zeros
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first three roots of :math:`J_1`, :math:`J_1'`,
:math:`Y_1` and :math:`Y_1'`.
>>> from scipy.special import jnyn_zeros
>>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3)
>>> jn_roots, yn_roots
(array([ 3.83170597, 7.01558667, 10.17346814]),
array([2.19714133, 5.42968104, 8.59600587]))
Plot :math:`J_1`, :math:`J_1'`, :math:`Y_1`, :math:`Y_1'` and their roots.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import jnyn_zeros, jvp, jn, yvp, yn
>>> jn_roots, jnp_roots, yn_roots, ynp_roots = jnyn_zeros(1, 3)
>>> fig, ax = plt.subplots()
>>> xmax= 11
>>> x = np.linspace(0, xmax)
>>> x[0] += 1e-15
>>> ax.plot(x, jn(1, x), label=r"$J_1$", c='r')
>>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$", c='b')
>>> ax.plot(x, yn(1, x), label=r"$Y_1$", c='y')
>>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$", c='c')
>>> zeros = np.zeros((3, ))
>>> ax.scatter(jn_roots, zeros, s=30, c='r', zorder=5,
... label=r"$J_1$ roots")
>>> ax.scatter(jnp_roots, zeros, s=30, c='b', zorder=5,
... label=r"$J_1'$ roots")
>>> ax.scatter(yn_roots, zeros, s=30, c='y', zorder=5,
... label=r"$Y_1$ roots")
>>> ax.scatter(ynp_roots, zeros, s=30, c='c', zorder=5,
... label=r"$Y_1'$ roots")
>>> ax.hlines(0, 0, xmax, color='k')
>>> ax.set_ylim(-0.6, 0.6)
>>> ax.set_xlim(0, xmax)
>>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75))
>>> plt.tight_layout()
>>> plt.show()
"""
if not (isscalar(nt) and isscalar(n)):
raise ValueError("Arguments must be scalars.")
if (floor(n) != n) or (floor(nt) != nt):
raise ValueError("Arguments must be integers.")
if (nt <= 0):
raise ValueError("nt > 0")
return _specfun.jyzo(abs(n), nt)
def jn_zeros(n, nt):
r"""Compute zeros of integer-order Bessel functions Jn.
Compute `nt` zeros of the Bessel functions :math:`J_n(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order. Note that this interval excludes the zero at :math:`x = 0`
that exists for :math:`n > 0`.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `nt` zeros of the Bessel function.
See Also
--------
jv: Real-order Bessel functions of the first kind
jnp_zeros: Zeros of :math:`Jn'`
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first four positive roots of :math:`J_3`.
>>> from scipy.special import jn_zeros
>>> jn_zeros(3, 4)
array([ 6.3801619 , 9.76102313, 13.01520072, 16.22346616])
Plot :math:`J_3` and its first four positive roots. Note
that the root located at 0 is not returned by `jn_zeros`.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import jn, jn_zeros
>>> j3_roots = jn_zeros(3, 4)
>>> xmax = 18
>>> xmin = -1
>>> x = np.linspace(xmin, xmax, 500)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, jn(3, x), label=r'$J_3$')
>>> ax.scatter(j3_roots, np.zeros((4, )), s=30, c='r',
... label=r"$J_3$_Zeros", zorder=5)
>>> ax.scatter(0, 0, s=30, c='k',
... label=r"Root at 0", zorder=5)
>>> ax.hlines(0, 0, xmax, color='k')
>>> ax.set_xlim(xmin, xmax)
>>> plt.legend()
>>> plt.show()
"""
return jnyn_zeros(n, nt)[0]
def jnp_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function derivatives Jn'.
Compute `nt` zeros of the functions :math:`J_n'(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order. Note that this interval excludes the zero at :math:`x = 0`
that exists for :math:`n > 1`.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `nt` zeros of the Bessel function.
See Also
--------
jvp: Derivatives of integer-order Bessel functions of the first kind
jv: Float-order Bessel functions of the first kind
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first four roots of :math:`J_2'`.
>>> from scipy.special import jnp_zeros
>>> jnp_zeros(2, 4)
array([ 3.05423693, 6.70613319, 9.96946782, 13.17037086])
As `jnp_zeros` yields the roots of :math:`J_n'`, it can be used to
compute the locations of the peaks of :math:`J_n`. Plot
:math:`J_2`, :math:`J_2'` and the locations of the roots of :math:`J_2'`.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import jn, jnp_zeros, jvp
>>> j2_roots = jnp_zeros(2, 4)
>>> xmax = 15
>>> x = np.linspace(0, xmax, 500)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, jn(2, x), label=r'$J_2$')
>>> ax.plot(x, jvp(2, x, 1), label=r"$J_2'$")
>>> ax.hlines(0, 0, xmax, color='k')
>>> ax.scatter(j2_roots, np.zeros((4, )), s=30, c='r',
... label=r"Roots of $J_2'$", zorder=5)
>>> ax.set_ylim(-0.4, 0.8)
>>> ax.set_xlim(0, xmax)
>>> plt.legend()
>>> plt.show()
"""
return jnyn_zeros(n, nt)[1]
def yn_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function Yn(x).
Compute `nt` zeros of the functions :math:`Y_n(x)` on the interval
:math:`(0, \infty)`. The zeros are returned in ascending order.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `nt` zeros of the Bessel function.
See Also
--------
yn: Bessel function of the second kind for integer order
yv: Bessel function of the second kind for real order
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first four roots of :math:`Y_2`.
>>> from scipy.special import yn_zeros
>>> yn_zeros(2, 4)
array([ 3.38424177, 6.79380751, 10.02347798, 13.20998671])
Plot :math:`Y_2` and its first four roots.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import yn, yn_zeros
>>> xmin = 2
>>> xmax = 15
>>> x = np.linspace(xmin, xmax, 500)
>>> fig, ax = plt.subplots()
>>> ax.hlines(0, xmin, xmax, color='k')
>>> ax.plot(x, yn(2, x), label=r'$Y_2$')
>>> ax.scatter(yn_zeros(2, 4), np.zeros((4, )), s=30, c='r',
... label='Roots', zorder=5)
>>> ax.set_ylim(-0.4, 0.4)
>>> ax.set_xlim(xmin, xmax)
>>> plt.legend()
>>> plt.show()
"""
return jnyn_zeros(n, nt)[2]
def ynp_zeros(n, nt):
r"""Compute zeros of integer-order Bessel function derivatives Yn'(x).
Compute `nt` zeros of the functions :math:`Y_n'(x)` on the
interval :math:`(0, \infty)`. The zeros are returned in ascending
order.
Parameters
----------
n : int
Order of Bessel function
nt : int
Number of zeros to return
Returns
-------
ndarray
First `nt` zeros of the Bessel derivative function.
See Also
--------
yvp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first four roots of the first derivative of the
Bessel function of second kind for order 0 :math:`Y_0'`.
>>> from scipy.special import ynp_zeros
>>> ynp_zeros(0, 4)
array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483])
Plot :math:`Y_0`, :math:`Y_0'` and confirm visually that the roots of
:math:`Y_0'` are located at local extrema of :math:`Y_0`.
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import yn, ynp_zeros, yvp
>>> zeros = ynp_zeros(0, 4)
>>> xmax = 13
>>> x = np.linspace(0, xmax, 500)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, yn(0, x), label=r'$Y_0$')
>>> ax.plot(x, yvp(0, x, 1), label=r"$Y_0'$")
>>> ax.scatter(zeros, np.zeros((4, )), s=30, c='r',
... label=r"Roots of $Y_0'$", zorder=5)
>>> for root in zeros:
... y0_extremum = yn(0, root)
... lower = min(0, y0_extremum)
... upper = max(0, y0_extremum)
... ax.vlines(root, lower, upper, color='r')
>>> ax.hlines(0, 0, xmax, color='k')
>>> ax.set_ylim(-0.6, 0.6)
>>> ax.set_xlim(0, xmax)
>>> plt.legend()
>>> plt.show()
"""
return jnyn_zeros(n, nt)[3]
def y0_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
The derivatives are given by Y0'(z0) = -Y1(z0) at each zero z0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z0n : ndarray
Location of nth zero of Y0(z)
y0pz0n : ndarray
Value of derivative Y0'(z0) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first 4 real roots and the derivatives at the roots of
:math:`Y_0`:
>>> import numpy as np
>>> from scipy.special import y0_zeros
>>> zeros, grads = y0_zeros(4)
>>> with np.printoptions(precision=5):
... print(f"Roots: {zeros}")
... print(f"Gradients: {grads}")
Roots: [ 0.89358+0.j 3.95768+0.j 7.08605+0.j 10.22235+0.j]
Gradients: [-0.87942+0.j 0.40254+0.j -0.3001 +0.j 0.2497 +0.j]
Plot the real part of :math:`Y_0` and the first four computed roots.
>>> import matplotlib.pyplot as plt
>>> from scipy.special import y0
>>> xmin = 0
>>> xmax = 11
>>> x = np.linspace(xmin, xmax, 500)
>>> fig, ax = plt.subplots()
>>> ax.hlines(0, xmin, xmax, color='k')
>>> ax.plot(x, y0(x), label=r'$Y_0$')
>>> zeros, grads = y0_zeros(4)
>>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r',
... label=r'$Y_0$_zeros', zorder=5)
>>> ax.set_ylim(-0.5, 0.6)
>>> ax.set_xlim(xmin, xmax)
>>> plt.legend(ncol=2)
>>> plt.show()
Compute the first 4 complex roots and the derivatives at the roots of
:math:`Y_0` by setting ``complex=True``:
>>> y0_zeros(4, True)
(array([ -2.40301663+0.53988231j, -5.5198767 +0.54718001j,
-8.6536724 +0.54841207j, -11.79151203+0.54881912j]),
array([ 0.10074769-0.88196771j, -0.02924642+0.5871695j ,
0.01490806-0.46945875j, -0.00937368+0.40230454j]))
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 0
kc = not complex
return _specfun.cyzo(nt, kf, kc)
def y1_zeros(nt, complex=False):
"""Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
The derivatives are given by Y1'(z1) = Y0(z1) at each zero z1.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1n : ndarray
Location of nth zero of Y1(z)
y1pz1n : ndarray
Value of derivative Y1'(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first 4 real roots and the derivatives at the roots of
:math:`Y_1`:
>>> import numpy as np
>>> from scipy.special import y1_zeros
>>> zeros, grads = y1_zeros(4)
>>> with np.printoptions(precision=5):
... print(f"Roots: {zeros}")
... print(f"Gradients: {grads}")
Roots: [ 2.19714+0.j 5.42968+0.j 8.59601+0.j 11.74915+0.j]
Gradients: [ 0.52079+0.j -0.34032+0.j 0.27146+0.j -0.23246+0.j]
Extract the real parts:
>>> realzeros = zeros.real
>>> realzeros
array([ 2.19714133, 5.42968104, 8.59600587, 11.74915483])
Plot :math:`Y_1` and the first four computed roots.
>>> import matplotlib.pyplot as plt
>>> from scipy.special import y1
>>> xmin = 0
>>> xmax = 13
>>> x = np.linspace(xmin, xmax, 500)
>>> zeros, grads = y1_zeros(4)
>>> fig, ax = plt.subplots()
>>> ax.hlines(0, xmin, xmax, color='k')
>>> ax.plot(x, y1(x), label=r'$Y_1$')
>>> ax.scatter(zeros.real, np.zeros((4, )), s=30, c='r',
... label=r'$Y_1$_zeros', zorder=5)
>>> ax.set_ylim(-0.5, 0.5)
>>> ax.set_xlim(xmin, xmax)
>>> plt.legend()
>>> plt.show()
Compute the first 4 complex roots and the derivatives at the roots of
:math:`Y_1` by setting ``complex=True``:
>>> y1_zeros(4, True)
(array([ -0.50274327+0.78624371j, -3.83353519+0.56235654j,
-7.01590368+0.55339305j, -10.17357383+0.55127339j]),
array([-0.45952768+1.31710194j, 0.04830191-0.69251288j,
-0.02012695+0.51864253j, 0.011614 -0.43203296j]))
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 1
kc = not complex
return _specfun.cyzo(nt, kf, kc)
def y1p_zeros(nt, complex=False):
"""Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
The values are given by Y1(z1) at each z1 where Y1'(z1)=0.
Parameters
----------
nt : int
Number of zeros to return
complex : bool, default False
Set to False to return only the real zeros; set to True to return only
the complex zeros with negative real part and positive imaginary part.
Note that the complex conjugates of the latter are also zeros of the
function, but are not returned by this routine.
Returns
-------
z1pn : ndarray
Location of nth zero of Y1'(z)
y1z1pn : ndarray
Value of derivative Y1(z1) for nth zero
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
Compute the first four roots of :math:`Y_1'` and the values of
:math:`Y_1` at these roots.
>>> import numpy as np
>>> from scipy.special import y1p_zeros
>>> y1grad_roots, y1_values = y1p_zeros(4)
>>> with np.printoptions(precision=5):
... print(f"Y1' Roots: {y1grad_roots}")
... print(f"Y1 values: {y1_values}")
Y1' Roots: [ 3.68302+0.j 6.9415 +0.j 10.1234 +0.j 13.28576+0.j]
Y1 values: [ 0.41673+0.j -0.30317+0.j 0.25091+0.j -0.21897+0.j]
`y1p_zeros` can be used to calculate the extremal points of :math:`Y_1`
directly. Here we plot :math:`Y_1` and the first four extrema.
>>> import matplotlib.pyplot as plt
>>> from scipy.special import y1, yvp
>>> y1_roots, y1_values_at_roots = y1p_zeros(4)
>>> real_roots = y1_roots.real
>>> xmax = 15
>>> x = np.linspace(0, xmax, 500)
>>> x[0] += 1e-15
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y1(x), label=r'$Y_1$')
>>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$")
>>> ax.scatter(real_roots, np.zeros((4, )), s=30, c='r',
... label=r"Roots of $Y_1'$", zorder=5)
>>> ax.scatter(real_roots, y1_values_at_roots.real, s=30, c='k',
... label=r"Extrema of $Y_1$", zorder=5)
>>> ax.hlines(0, 0, xmax, color='k')
>>> ax.set_ylim(-0.5, 0.5)
>>> ax.set_xlim(0, xmax)
>>> ax.legend(ncol=2, bbox_to_anchor=(1., 0.75))
>>> plt.tight_layout()
>>> plt.show()
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("Arguments must be scalar positive integer.")
kf = 2
kc = not complex
return _specfun.cyzo(nt, kf, kc)
def _bessel_diff_formula(v, z, n, L, phase):
# from AMS55.
# L(v, z) = J(v, z), Y(v, z), H1(v, z), H2(v, z), phase = -1
# L(v, z) = I(v, z) or exp(v*pi*i)K(v, z), phase = 1
# For K, you can pull out the exp((v-k)*pi*i) into the caller
v = asarray(v)
p = 1.0
s = L(v-n, z)
for i in range(1, n+1):
p = phase * (p * (n-i+1)) / i # = choose(k, i)
s += p*L(v-n + i*2, z)
return s / (2.**n)
def jvp(v, z, n=1):
"""Compute derivatives of Bessel functions of the first kind.
Compute the nth derivative of the Bessel function `Jv` with
respect to `z`.
Parameters
----------
v : array_like or float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative; can be real or
complex.
n : int, default 1
Order of derivative. For 0 returns the Bessel function `jv` itself.
Returns
-------
scalar or ndarray
Values of the derivative of the Bessel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
Examples
--------
Compute the Bessel function of the first kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import jvp
>>> jvp(0, 1, 0), jvp(0, 1, 1), jvp(0, 1, 2)
(0.7651976865579666, -0.44005058574493355, -0.3251471008130331)
Compute the first derivative of the Bessel function of the first
kind for several orders at 1 by providing an array for `v`.
>>> jvp([0, 1, 2], 1, 1)
array([-0.44005059, 0.3251471 , 0.21024362])
Compute the first derivative of the Bessel function of the first
kind of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0., 1.5, 3.])
>>> jvp(0, points, 1)
array([-0. , -0.55793651, -0.33905896])
Plot the Bessel function of the first kind of order 1 and its
first three derivatives.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10, 10, 1000)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, jvp(1, x, 0), label=r"$J_1$")
>>> ax.plot(x, jvp(1, x, 1), label=r"$J_1'$")
>>> ax.plot(x, jvp(1, x, 2), label=r"$J_1''$")
>>> ax.plot(x, jvp(1, x, 3), label=r"$J_1'''$")
>>> plt.legend()
>>> plt.show()
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return jv(v, z)
else:
return _bessel_diff_formula(v, z, n, jv, -1)
def yvp(v, z, n=1):
"""Compute derivatives of Bessel functions of the second kind.
Compute the nth derivative of the Bessel function `Yv` with
respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative. For 0 returns the BEssel function `yv`
See Also
--------
yv
Returns
-------
scalar or ndarray
nth derivative of the Bessel function.
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
Examples
--------
Compute the Bessel function of the second kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import yvp
>>> yvp(0, 1, 0), yvp(0, 1, 1), yvp(0, 1, 2)
(0.088256964215677, 0.7812128213002889, -0.8694697855159659)
Compute the first derivative of the Bessel function of the second
kind for several orders at 1 by providing an array for `v`.
>>> yvp([0, 1, 2], 1, 1)
array([0.78121282, 0.86946979, 2.52015239])
Compute the first derivative of the Bessel function of the
second kind of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> yvp(0, points, 1)
array([ 1.47147239, 0.41230863, -0.32467442])
Plot the Bessel function of the second kind of order 1 and its
first three derivatives.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> x[0] += 1e-15
>>> fig, ax = plt.subplots()
>>> ax.plot(x, yvp(1, x, 0), label=r"$Y_1$")
>>> ax.plot(x, yvp(1, x, 1), label=r"$Y_1'$")
>>> ax.plot(x, yvp(1, x, 2), label=r"$Y_1''$")
>>> ax.plot(x, yvp(1, x, 3), label=r"$Y_1'''$")
>>> ax.set_ylim(-10, 10)
>>> plt.legend()
>>> plt.show()
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return yv(v, z)
else:
return _bessel_diff_formula(v, z, n, yv, -1)
def kvp(v, z, n=1):
"""Compute derivatives of real-order modified Bessel function Kv(z)
Kv(z) is the modified Bessel function of the second kind.
Derivative is calculated with respect to `z`.
Parameters
----------
v : array_like of float
Order of Bessel function
z : array_like of complex
Argument at which to evaluate the derivative
n : int, default 1
Order of derivative. For 0 returns the Bessel function `kv` itself.
Returns
-------
out : ndarray
The results
See Also
--------
kv
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
Examples
--------
Compute the modified bessel function of the second kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import kvp
>>> kvp(0, 1, 0), kvp(0, 1, 1), kvp(0, 1, 2)
(0.42102443824070834, -0.6019072301972346, 1.0229316684379428)
Compute the first derivative of the modified Bessel function of the second
kind for several orders at 1 by providing an array for `v`.
>>> kvp([0, 1, 2], 1, 1)
array([-0.60190723, -1.02293167, -3.85158503])
Compute the first derivative of the modified Bessel function of the
second kind of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> kvp(0, points, 1)
array([-1.65644112, -0.2773878 , -0.04015643])
Plot the modified bessel function of the second kind and its
first three derivatives.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, kvp(1, x, 0), label=r"$K_1$")
>>> ax.plot(x, kvp(1, x, 1), label=r"$K_1'$")
>>> ax.plot(x, kvp(1, x, 2), label=r"$K_1''$")
>>> ax.plot(x, kvp(1, x, 3), label=r"$K_1'''$")
>>> ax.set_ylim(-2.5, 2.5)
>>> plt.legend()
>>> plt.show()
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return kv(v, z)
else:
return (-1)**n * _bessel_diff_formula(v, z, n, kv, 1)
def ivp(v, z, n=1):
"""Compute derivatives of modified Bessel functions of the first kind.
Compute the nth derivative of the modified Bessel function `Iv`
with respect to `z`.
Parameters
----------
v : array_like or float
Order of Bessel function
z : array_like
Argument at which to evaluate the derivative; can be real or
complex.
n : int, default 1
Order of derivative. For 0, returns the Bessel function `iv` itself.
Returns
-------
scalar or ndarray
nth derivative of the modified Bessel function.
See Also
--------
iv
Notes
-----
The derivative is computed using the relation DLFM 10.29.5 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 6.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.29.E5
Examples
--------
Compute the modified Bessel function of the first kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import ivp
>>> ivp(0, 1, 0), ivp(0, 1, 1), ivp(0, 1, 2)
(1.2660658777520084, 0.565159103992485, 0.7009067737595233)
Compute the first derivative of the modified Bessel function of the first
kind for several orders at 1 by providing an array for `v`.
>>> ivp([0, 1, 2], 1, 1)
array([0.5651591 , 0.70090677, 0.29366376])
Compute the first derivative of the modified Bessel function of the
first kind of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0., 1.5, 3.])
>>> ivp(0, points, 1)
array([0. , 0.98166643, 3.95337022])
Plot the modified Bessel function of the first kind of order 1 and its
first three derivatives.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, 1000)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, ivp(1, x, 0), label=r"$I_1$")
>>> ax.plot(x, ivp(1, x, 1), label=r"$I_1'$")
>>> ax.plot(x, ivp(1, x, 2), label=r"$I_1''$")
>>> ax.plot(x, ivp(1, x, 3), label=r"$I_1'''$")
>>> plt.legend()
>>> plt.show()
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return iv(v, z)
else:
return _bessel_diff_formula(v, z, n, iv, 1)
def h1vp(v, z, n=1):
"""Compute derivatives of Hankel function H1v(z) with respect to `z`.
Parameters
----------
v : array_like
Order of Hankel function
z : array_like
Argument at which to evaluate the derivative. Can be real or
complex.
n : int, default 1
Order of derivative. For 0 returns the Hankel function `h1v` itself.
Returns
-------
scalar or ndarray
Values of the derivative of the Hankel function.
See Also
--------
hankel1
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
Examples
--------
Compute the Hankel function of the first kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import h1vp
>>> h1vp(0, 1, 0), h1vp(0, 1, 1), h1vp(0, 1, 2)
((0.7651976865579664+0.088256964215677j),
(-0.44005058574493355+0.7812128213002889j),
(-0.3251471008130329-0.8694697855159659j))
Compute the first derivative of the Hankel function of the first kind
for several orders at 1 by providing an array for `v`.
>>> h1vp([0, 1, 2], 1, 1)
array([-0.44005059+0.78121282j, 0.3251471 +0.86946979j,
0.21024362+2.52015239j])
Compute the first derivative of the Hankel function of the first kind
of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> h1vp(0, points, 1)
array([-0.24226846+1.47147239j, -0.55793651+0.41230863j,
-0.33905896-0.32467442j])
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel1(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel1, -1)
def h2vp(v, z, n=1):
"""Compute derivatives of Hankel function H2v(z) with respect to `z`.
Parameters
----------
v : array_like
Order of Hankel function
z : array_like
Argument at which to evaluate the derivative. Can be real or
complex.
n : int, default 1
Order of derivative. For 0 returns the Hankel function `h2v` itself.
Returns
-------
scalar or ndarray
Values of the derivative of the Hankel function.
See Also
--------
hankel2
Notes
-----
The derivative is computed using the relation DLFM 10.6.7 [2]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 5.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.6.E7
Examples
--------
Compute the Hankel function of the second kind of order 0 and
its first two derivatives at 1.
>>> from scipy.special import h2vp
>>> h2vp(0, 1, 0), h2vp(0, 1, 1), h2vp(0, 1, 2)
((0.7651976865579664-0.088256964215677j),
(-0.44005058574493355-0.7812128213002889j),
(-0.3251471008130329+0.8694697855159659j))
Compute the first derivative of the Hankel function of the second kind
for several orders at 1 by providing an array for `v`.
>>> h2vp([0, 1, 2], 1, 1)
array([-0.44005059-0.78121282j, 0.3251471 -0.86946979j,
0.21024362-2.52015239j])
Compute the first derivative of the Hankel function of the second kind
of order 0 at several points by providing an array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> h2vp(0, points, 1)
array([-0.24226846-1.47147239j, -0.55793651-0.41230863j,
-0.33905896+0.32467442j])
"""
n = _nonneg_int_or_fail(n, 'n')
if n == 0:
return hankel2(v, z)
else:
return _bessel_diff_formula(v, z, n, hankel2, -1)
def riccati_jn(n, x):
r"""Compute Ricatti-Bessel function of the first kind and its derivative.
The Ricatti-Bessel function of the first kind is defined as :math:`x
j_n(x)`, where :math:`j_n` is the spherical Bessel function of the first
kind of order :math:`n`.
This function computes the value and first derivative of the
Ricatti-Bessel function for all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
jn : ndarray
Value of j0(x), ..., jn(x)
jnp : ndarray
First derivative j0'(x), ..., jn'(x)
Notes
-----
The computation is carried out via backward recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = _specfun.rctj(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def riccati_yn(n, x):
"""Compute Ricatti-Bessel function of the second kind and its derivative.
The Ricatti-Bessel function of the second kind is defined as :math:`x
y_n(x)`, where :math:`y_n` is the spherical Bessel function of the second
kind of order :math:`n`.
This function computes the value and first derivative of the function for
all orders up to and including `n`.
Parameters
----------
n : int
Maximum order of function to compute
x : float
Argument at which to evaluate
Returns
-------
yn : ndarray
Value of y0(x), ..., yn(x)
ynp : ndarray
First derivative y0'(x), ..., yn'(x)
Notes
-----
The computation is carried out via ascending recurrence, using the
relation DLMF 10.51.1 [2]_.
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions.
https://dlmf.nist.gov/10.51.E1
"""
if not (isscalar(n) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n == 0):
n1 = 1
else:
n1 = n
nm, jn, jnp = _specfun.rcty(n1, x)
return jn[:(n+1)], jnp[:(n+1)]
def erf_zeros(nt):
"""Compute the first nt zero in the first quadrant, ordered by absolute value.
Zeros in the other quadrants can be obtained by using the symmetries erf(-z) = erf(z) and
erf(conj(z)) = conj(erf(z)).
Parameters
----------
nt : int
The number of zeros to compute
Returns
-------
The locations of the zeros of erf : ndarray (complex)
Complex values at which zeros of erf(z)
Examples
--------
>>> from scipy import special
>>> special.erf_zeros(1)
array([1.45061616+1.880943j])
Check that erf is (close to) zero for the value returned by erf_zeros
>>> special.erf(special.erf_zeros(1))
array([4.95159469e-14-1.16407394e-16j])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return _specfun.cerzo(nt)
def fresnelc_zeros(nt):
"""Compute nt complex zeros of cosine Fresnel integral C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return _specfun.fcszo(1, nt)
def fresnels_zeros(nt):
"""Compute nt complex zeros of sine Fresnel integral S(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return _specfun.fcszo(2, nt)
def fresnel_zeros(nt):
"""Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if (floor(nt) != nt) or (nt <= 0) or not isscalar(nt):
raise ValueError("Argument must be positive scalar integer.")
return _specfun.fcszo(2, nt), _specfun.fcszo(1, nt)
def assoc_laguerre(x, n, k=0.0):
"""Compute the generalized (associated) Laguerre polynomial of degree n and order k.
The polynomial :math:`L^{(k)}_n(x)` is orthogonal over ``[0, inf)``,
with weighting function ``exp(-x) * x**k`` with ``k > -1``.
Notes
-----
`assoc_laguerre` is a simple wrapper around `eval_genlaguerre`, with
reversed argument order ``(x, n, k=0.0) --> (n, k, x)``.
"""
return _ufuncs.eval_genlaguerre(n, k, x)
digamma = psi
def polygamma(n, x):
r"""Polygamma functions.
Defined as :math:`\psi^{(n)}(x)` where :math:`\psi` is the
`digamma` function. See [dlmf]_ for details.
Parameters
----------
n : array_like
The order of the derivative of the digamma function; must be
integral
x : array_like
Real valued input
Returns
-------
ndarray
Function results
See Also
--------
digamma
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/5.15
Examples
--------
>>> from scipy import special
>>> x = [2, 3, 25.5]
>>> special.polygamma(1, x)
array([ 0.64493407, 0.39493407, 0.03999467])
>>> special.polygamma(0, x) == special.psi(x)
array([ True, True, True], dtype=bool)
"""
n, x = asarray(n), asarray(x)
fac2 = (-1.0)**(n+1) * gamma(n+1.0) * zeta(n+1, x)
return where(n == 0, psi(x), fac2)
def mathieu_even_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the even solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{ce}_{2n}(z, q) = \sum_{k=0}^{\infty} A_{(2n)}^{(2k)} \cos 2kz
.. math:: \mathrm{ce}_{2n+1}(z, q) = \sum_{k=0}^{\infty} A_{(2n+1)}^{(2k+1)} \cos (2k+1)z
This function returns the coefficients :math:`A_{(2n)}^{(2k)}` for even
input m=2n, and the coefficients :math:`A_{(2n+1)}^{(2k+1)}` for odd input
m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Ak : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/28.4#i
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m < 0):
raise ValueError("m must be an integer >=0.")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
warnings.warn("Too many predicted coefficients.", RuntimeWarning, 2)
kd = 1
m = int(floor(m))
if m % 2:
kd = 2
a = mathieu_a(m, q)
fc = _specfun.fcoef(kd, m, q, a)
return fc[:km]
def mathieu_odd_coef(m, q):
r"""Fourier coefficients for even Mathieu and modified Mathieu functions.
The Fourier series of the odd solutions of the Mathieu differential
equation are of the form
.. math:: \mathrm{se}_{2n+1}(z, q) = \sum_{k=0}^{\infty} B_{(2n+1)}^{(2k+1)} \sin (2k+1)z
.. math:: \mathrm{se}_{2n+2}(z, q) = \sum_{k=0}^{\infty} B_{(2n+2)}^{(2k+2)} \sin (2k+2)z
This function returns the coefficients :math:`B_{(2n+2)}^{(2k+2)}` for even
input m=2n+2, and the coefficients :math:`B_{(2n+1)}^{(2k+1)}` for odd
input m=2n+1.
Parameters
----------
m : int
Order of Mathieu functions. Must be non-negative.
q : float (>=0)
Parameter of Mathieu functions. Must be non-negative.
Returns
-------
Bk : ndarray
Even or odd Fourier coefficients, corresponding to even or odd m.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(q)):
raise ValueError("m and q must be scalars.")
if (q < 0):
raise ValueError("q >=0")
if (m != floor(m)) or (m <= 0):
raise ValueError("m must be an integer > 0")
if (q <= 1):
qm = 7.5 + 56.1*sqrt(q) - 134.7*q + 90.7*sqrt(q)*q
else:
qm = 17.0 + 3.1*sqrt(q) - .126*q + .0037*sqrt(q)*q
km = int(qm + 0.5*m)
if km > 251:
warnings.warn("Too many predicted coefficients.", RuntimeWarning, 2)
kd = 4
m = int(floor(m))
if m % 2:
kd = 3
b = mathieu_b(m, q)
fc = _specfun.fcoef(kd, m, q, b)
return fc[:km]
def lpmn(m, n, z):
"""Sequence of associated Legendre functions of the first kind.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
This function takes a real argument ``z``. For complex arguments ``z``
use clpmn instead.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float
Input value.
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
See Also
--------
clpmn: associated Legendre functions of the first kind for complex z
Notes
-----
In the interval (-1, 1), Ferrer's function of the first kind is
returned. The phase convention used for the intervals (1, inf)
and (-inf, -1) is such that the result is always real.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.3
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if iscomplex(z):
raise ValueError("Argument must be real. Use clpmn instead.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with _ufuncs.errstate(all='ignore'):
if abs(z) < 1:
# Ferrer function; DLMF 14.9.3
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
# Match to clpmn; DLMF 14.9.13
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = _specfun.lpmn(mp, n, z)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def clpmn(m, n, z, type=3):
"""Associated Legendre function of the first kind for complex arguments.
Computes the associated Legendre function of the first kind of order m and
degree n, ``Pmn(z)`` = :math:`P_n^m(z)`, and its derivative, ``Pmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Pmn(z)`` and
``Pmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : float or complex
Input value.
type : int, optional
takes values 2 or 3
2: cut on the real axis ``|x| > 1``
3: cut on the real axis ``-1 < x < 1`` (default)
Returns
-------
Pmn_z : (m+1, n+1) array
Values for all orders ``0..m`` and degrees ``0..n``
Pmn_d_z : (m+1, n+1) array
Derivatives for all orders ``0..m`` and degrees ``0..n``
See Also
--------
lpmn: associated Legendre functions of the first kind for real z
Notes
-----
By default, i.e. for ``type=3``, phase conventions are chosen according
to [1]_ such that the function is analytic. The cut lies on the interval
(-1, 1). Approaching the cut from above or below in general yields a phase
factor with respect to Ferrer's function of the first kind
(cf. `lpmn`).
For ``type=2`` a cut at ``|x| > 1`` is chosen. Approaching the real values
on the interval (-1, 1) in the complex plane yields Ferrer's function
of the first kind.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/14.21
"""
if not isscalar(m) or (abs(m) > n):
raise ValueError("m must be <= n.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
if not (type == 2 or type == 3):
raise ValueError("type must be either 2 or 3.")
if (m < 0):
mp = -m
mf, nf = mgrid[0:mp+1, 0:n+1]
with _ufuncs.errstate(all='ignore'):
if type == 2:
fixarr = where(mf > nf, 0.0,
(-1)**mf * gamma(nf-mf+1) / gamma(nf+mf+1))
else:
fixarr = where(mf > nf, 0.0, gamma(nf-mf+1) / gamma(nf+mf+1))
else:
mp = m
p, pd = _specfun.clpmn(mp, n, real(z), imag(z), type)
if (m < 0):
p = p * fixarr
pd = pd * fixarr
return p, pd
def lqmn(m, n, z):
"""Sequence of associated Legendre functions of the second kind.
Computes the associated Legendre function of the second kind of order m and
degree n, ``Qmn(z)`` = :math:`Q_n^m(z)`, and its derivative, ``Qmn'(z)``.
Returns two arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and
``Qmn'(z)`` for all orders from ``0..m`` and degrees from ``0..n``.
Parameters
----------
m : int
``|m| <= n``; the order of the Legendre function.
n : int
where ``n >= 0``; the degree of the Legendre function. Often
called ``l`` (lower case L) in descriptions of the associated
Legendre function
z : complex
Input value.
Returns
-------
Qmn_z : (m+1, n+1) array
Values for all orders 0..m and degrees 0..n
Qmn_d_z : (m+1, n+1) array
Derivatives for all orders 0..m and degrees 0..n
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(m) or (m < 0):
raise ValueError("m must be a non-negative integer.")
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
if not isscalar(z):
raise ValueError("z must be scalar.")
m = int(m)
n = int(n)
# Ensure neither m nor n == 0
mm = max(1, m)
nn = max(1, n)
if iscomplex(z):
q, qd = _specfun.clqmn(mm, nn, z)
else:
q, qd = _specfun.lqmn(mm, nn, z)
return q[:(m+1), :(n+1)], qd[:(m+1), :(n+1)]
def bernoulli(n):
"""Bernoulli numbers B0..Bn (inclusive).
Parameters
----------
n : int
Indicated the number of terms in the Bernoulli series to generate.
Returns
-------
ndarray
The Bernoulli numbers ``[B(0), B(1), ..., B(n)]``.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] "Bernoulli number", Wikipedia, https://en.wikipedia.org/wiki/Bernoulli_number
Examples
--------
>>> import numpy as np
>>> from scipy.special import bernoulli, zeta
>>> bernoulli(4)
array([ 1. , -0.5 , 0.16666667, 0. , -0.03333333])
The Wikipedia article ([2]_) points out the relationship between the
Bernoulli numbers and the zeta function, ``B_n^+ = -n * zeta(1 - n)``
for ``n > 0``:
>>> n = np.arange(1, 5)
>>> -n * zeta(1 - n)
array([ 0.5 , 0.16666667, -0. , -0.03333333])
Note that, in the notation used in the wikipedia article,
`bernoulli` computes ``B_n^-`` (i.e. it used the convention that
``B_1`` is -1/2). The relation given above is for ``B_n^+``, so the
sign of 0.5 does not match the output of ``bernoulli(4)``.
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return _specfun.bernob(int(n1))[:(n+1)]
def euler(n):
"""Euler numbers E(0), E(1), ..., E(n).
The Euler numbers [1]_ are also known as the secant numbers.
Because ``euler(n)`` returns floating point values, it does not give
exact values for large `n`. The first inexact value is E(22).
Parameters
----------
n : int
The highest index of the Euler number to be returned.
Returns
-------
ndarray
The Euler numbers [E(0), E(1), ..., E(n)].
The odd Euler numbers, which are all zero, are included.
References
----------
.. [1] Sequence A122045, The On-Line Encyclopedia of Integer Sequences,
https://oeis.org/A122045
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
Examples
--------
>>> import numpy as np
>>> from scipy.special import euler
>>> euler(6)
array([ 1., 0., -1., 0., 5., 0., -61.])
>>> euler(13).astype(np.int64)
array([ 1, 0, -1, 0, 5, 0, -61,
0, 1385, 0, -50521, 0, 2702765, 0])
>>> euler(22)[-1] # Exact value of E(22) is -69348874393137901.
-69348874393137976.0
"""
if not isscalar(n) or (n < 0):
raise ValueError("n must be a non-negative integer.")
n = int(n)
if (n < 2):
n1 = 2
else:
n1 = n
return _specfun.eulerb(n1)[:(n+1)]
def lpn(n, z):
"""Legendre function of the first kind.
Compute sequence of Legendre functions of the first kind (polynomials),
Pn(z) and derivatives for all degrees from 0 to n (inclusive).
See also special.legendre for polynomial class.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
pn, pd = _specfun.clpn(n1, z)
else:
pn, pd = _specfun.lpn(n1, z)
return pn[:(n+1)], pd[:(n+1)]
def lqn(n, z):
"""Legendre function of the second kind.
Compute sequence of Legendre functions of the second kind, Qn(z) and
derivatives for all degrees from 0 to n (inclusive).
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
n = _nonneg_int_or_fail(n, 'n', strict=False)
if (n < 1):
n1 = 1
else:
n1 = n
if iscomplex(z):
qn, qd = _specfun.clqn(n1, z)
else:
qn, qd = _specfun.lqnb(n1, z)
return qn[:(n+1)], qd[:(n+1)]
def ai_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Ai and its derivative.
Computes the first `nt` zeros, `a`, of the Airy function Ai(x);
first `nt` zeros, `ap`, of the derivative of the Airy function Ai'(x);
the corresponding values Ai(a');
and the corresponding values Ai'(a).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
a : ndarray
First `nt` zeros of Ai(x)
ap : ndarray
First `nt` zeros of Ai'(x)
ai : ndarray
Values of Ai(x) evaluated at first `nt` zeros of Ai'(x)
aip : ndarray
Values of Ai'(x) evaluated at first `nt` zeros of Ai(x)
Examples
--------
>>> from scipy import special
>>> a, ap, ai, aip = special.ai_zeros(3)
>>> a
array([-2.33810741, -4.08794944, -5.52055983])
>>> ap
array([-1.01879297, -3.24819758, -4.82009921])
>>> ai
array([ 0.53565666, -0.41901548, 0.38040647])
>>> aip
array([ 0.70121082, -0.80311137, 0.86520403])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
kf = 1
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return _specfun.airyzo(nt, kf)
def bi_zeros(nt):
"""
Compute `nt` zeros and values of the Airy function Bi and its derivative.
Computes the first `nt` zeros, b, of the Airy function Bi(x);
first `nt` zeros, b', of the derivative of the Airy function Bi'(x);
the corresponding values Bi(b');
and the corresponding values Bi'(b).
Parameters
----------
nt : int
Number of zeros to compute
Returns
-------
b : ndarray
First `nt` zeros of Bi(x)
bp : ndarray
First `nt` zeros of Bi'(x)
bi : ndarray
Values of Bi(x) evaluated at first `nt` zeros of Bi'(x)
bip : ndarray
Values of Bi'(x) evaluated at first `nt` zeros of Bi(x)
Examples
--------
>>> from scipy import special
>>> b, bp, bi, bip = special.bi_zeros(3)
>>> b
array([-1.17371322, -3.2710933 , -4.83073784])
>>> bp
array([-2.29443968, -4.07315509, -5.51239573])
>>> bi
array([-0.45494438, 0.39652284, -0.36796916])
>>> bip
array([ 0.60195789, -0.76031014, 0.83699101])
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
kf = 2
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be a positive integer scalar.")
return _specfun.airyzo(nt, kf)
def lmbda(v, x):
r"""Jahnke-Emden Lambda function, Lambdav(x).
This function is defined as [2]_,
.. math:: \Lambda_v(x) = \Gamma(v+1) \frac{J_v(x)}{(x/2)^v},
where :math:`\Gamma` is the gamma function and :math:`J_v` is the
Bessel function of the first kind.
Parameters
----------
v : float
Order of the Lambda function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
vl : ndarray
Values of Lambda_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dl : ndarray
Derivatives Lambda_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
.. [2] Jahnke, E. and Emde, F. "Tables of Functions with Formulae and
Curves" (4th ed.), Dover, 1945
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
if (v < 0):
raise ValueError("argument must be > 0.")
n = int(v)
v0 = v - n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
if (v != floor(v)):
vm, vl, dl = _specfun.lamv(v1, x)
else:
vm, vl, dl = _specfun.lamn(v1, x)
return vl[:(n+1)], dl[:(n+1)]
def pbdv_seq(v, x):
"""Parabolic cylinder functions Dv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives D_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n < 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = _specfun.pbdv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbvv_seq(v, x):
"""Parabolic cylinder functions Vv(x) and derivatives.
Parameters
----------
v : float
Order of the parabolic cylinder function
x : float
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of V_vi(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
dp : ndarray
Derivatives V_vi'(x), for vi=v-int(v), vi=1+v-int(v), ..., vi=v.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(v) and isscalar(x)):
raise ValueError("arguments must be scalars.")
n = int(v)
v0 = v-n
if (n <= 1):
n1 = 1
else:
n1 = n
v1 = n1 + v0
dv, dp, pdf, pdd = _specfun.pbvv(v1, x)
return dv[:n1+1], dp[:n1+1]
def pbdn_seq(n, z):
"""Parabolic cylinder functions Dn(z) and derivatives.
Parameters
----------
n : int
Order of the parabolic cylinder function
z : complex
Value at which to evaluate the function and derivatives
Returns
-------
dv : ndarray
Values of D_i(z), for i=0, ..., i=n.
dp : ndarray
Derivatives D_i'(z), for i=0, ..., i=n.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996, chapter 13.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(n) and isscalar(z)):
raise ValueError("arguments must be scalars.")
if (floor(n) != n):
raise ValueError("n must be an integer.")
if (abs(n) <= 1):
n1 = 1
else:
n1 = n
cpb, cpd = _specfun.cpbdn(n1, z)
return cpb[:n1+1], cpd[:n1+1]
def ber_zeros(nt):
"""Compute nt zeros of the Kelvin function ber.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
ber
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 1)
def bei_zeros(nt):
"""Compute nt zeros of the Kelvin function bei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
bei
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 2)
def ker_zeros(nt):
"""Compute nt zeros of the Kelvin function ker.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
ker
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 3)
def kei_zeros(nt):
"""Compute nt zeros of the Kelvin function kei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the Kelvin function.
See Also
--------
kei
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 4)
def berp_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function ber.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
ber, berp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 5)
def beip_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function bei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
bei, beip
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 6)
def kerp_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function ker.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
ker, kerp
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 7)
def keip_zeros(nt):
"""Compute nt zeros of the derivative of the Kelvin function kei.
Parameters
----------
nt : int
Number of zeros to compute. Must be positive.
Returns
-------
ndarray
First `nt` zeros of the derivative of the Kelvin function.
See Also
--------
kei, keip
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return _specfun.klvnzo(nt, 8)
def kelvin_zeros(nt):
"""Compute nt zeros of all Kelvin functions.
Returned in a length-8 tuple of arrays of length nt. The tuple contains
the arrays of zeros of (ber, bei, ker, kei, ber', bei', ker', kei').
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not isscalar(nt) or (floor(nt) != nt) or (nt <= 0):
raise ValueError("nt must be positive integer scalar.")
return (_specfun.klvnzo(nt, 1),
_specfun.klvnzo(nt, 2),
_specfun.klvnzo(nt, 3),
_specfun.klvnzo(nt, 4),
_specfun.klvnzo(nt, 5),
_specfun.klvnzo(nt, 6),
_specfun.klvnzo(nt, 7),
_specfun.klvnzo(nt, 8))
def pro_cv_seq(m, n, c):
"""Characteristic values for prolate spheroidal wave functions.
Compute a sequence of characteristic values for the prolate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return _specfun.segv(m, n, c, 1)[1][:maxL]
def obl_cv_seq(m, n, c):
"""Characteristic values for oblate spheroidal wave functions.
Compute a sequence of characteristic values for the oblate
spheroidal wave functions for mode m and n'=m..n and spheroidal
parameter c.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f77_src/special_functions/special_functions.html
"""
if not (isscalar(m) and isscalar(n) and isscalar(c)):
raise ValueError("Arguments must be scalars.")
if (n != floor(n)) or (m != floor(m)):
raise ValueError("Modes must be integers.")
if (n-m > 199):
raise ValueError("Difference between n and m is too large.")
maxL = n-m+1
return _specfun.segv(m, n, c, -1)[1][:maxL]
def comb(N, k, exact=False, repetition=False, legacy=_NoValue):
"""The number of combinations of N things taken k at a time.
This is often expressed as "N choose k".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
For integers, if `exact` is False, then floating point precision is
used, otherwise the result is computed exactly. For non-integers, if
`exact` is True, is disregarded.
repetition : bool, optional
If `repetition` is True, then the number of combinations with
repetition is computed.
legacy : bool, optional
If `legacy` is True and `exact` is True, then non-integral arguments
are cast to ints; if `legacy` is False, the result for non-integral
arguments is unaffected by the value of `exact`.
.. deprecated:: 1.9.0
Using `legacy` is deprecated and will removed by
Scipy 1.14.0. If you want to keep the legacy behaviour, cast
your inputs directly, e.g.
``comb(int(your_N), int(your_k), exact=True)``.
Returns
-------
val : int, float, ndarray
The total number of combinations.
See Also
--------
binom : Binomial coefficient considered as a function of two real
variables.
Notes
-----
- Array arguments accepted only for exact=False case.
- If N < 0, or k < 0, then 0 is returned.
- If k > N and repetition=False, then 0 is returned.
Examples
--------
>>> import numpy as np
>>> from scipy.special import comb
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> comb(n, k, exact=False)
array([ 120., 210.])
>>> comb(10, 3, exact=True)
120
>>> comb(10, 3, exact=True, repetition=True)
220
"""
if legacy is not _NoValue:
warnings.warn(
"Using 'legacy' keyword is deprecated and will be removed by "
"Scipy 1.14.0. If you want to keep the legacy behaviour, cast "
"your inputs directly, e.g. "
"'comb(int(your_N), int(your_k), exact=True)'.",
DeprecationWarning,
stacklevel=2
)
if repetition:
return comb(N + k - 1, k, exact, legacy=legacy)
if exact:
if int(N) == N and int(k) == k:
# _comb_int casts inputs to integers, which is safe & intended here
return _comb_int(N, k)
elif legacy:
# here at least one number is not an integer; legacy behavior uses
# lossy casts to int
return _comb_int(N, k)
# otherwise, we disregard `exact=True`; it makes no sense for
# non-integral arguments
return comb(N, k)
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = binom(N, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
def perm(N, k, exact=False):
"""Permutations of N things taken k at a time, i.e., k-permutations of N.
It's also known as "partial permutations".
Parameters
----------
N : int, ndarray
Number of things.
k : int, ndarray
Number of elements taken.
exact : bool, optional
If `exact` is False, then floating point precision is used, otherwise
exact long integer is computed.
Returns
-------
val : int, ndarray
The number of k-permutations of N.
Notes
-----
- Array arguments accepted only for exact=False case.
- If k > N, N < 0, or k < 0, then a 0 is returned.
Examples
--------
>>> import numpy as np
>>> from scipy.special import perm
>>> k = np.array([3, 4])
>>> n = np.array([10, 10])
>>> perm(n, k)
array([ 720., 5040.])
>>> perm(10, 3, exact=True)
720
"""
if exact:
if (k > N) or (N < 0) or (k < 0):
return 0
val = 1
for i in range(N - k + 1, N + 1):
val *= i
return val
else:
k, N = asarray(k), asarray(N)
cond = (k <= N) & (N >= 0) & (k >= 0)
vals = poch(N - k + 1, k)
if isinstance(vals, np.ndarray):
vals[~cond] = 0
elif not cond:
vals = np.float64(0)
return vals
# https://stackoverflow.com/a/16327037
def _range_prod(lo, hi, k=1):
"""
Product of a range of numbers spaced k apart (from hi).
For k=1, this returns the product of
lo * (lo+1) * (lo+2) * ... * (hi-2) * (hi-1) * hi
= hi! / (lo-1)!
For k>1, it correspond to taking only every k'th number when
counting down from hi - e.g. 18!!!! = _range_prod(1, 18, 4).
Breaks into smaller products first for speed:
_range_prod(2, 9) = ((2*3)*(4*5))*((6*7)*(8*9))
"""
if lo + k < hi:
mid = (hi + lo) // 2
if k > 1:
# make sure mid is a multiple of k away from hi
mid = mid - ((mid - hi) % k)
return _range_prod(lo, mid, k) * _range_prod(mid + k, hi, k)
elif lo + k == hi:
return lo * hi
else:
return hi
def _exact_factorialx_array(n, k=1):
"""
Exact computation of factorial for an array.
The factorials are computed in incremental fashion, by taking
the sorted unique values of n and multiplying the intervening
numbers between the different unique values.
In other words, the factorial for the largest input is only
computed once, with each other result computed in the process.
k > 1 corresponds to the multifactorial.
"""
un = np.unique(n)
# numpy changed nan-sorting behaviour with 1.21, see numpy/numpy#18070;
# to unify the behaviour, we remove the nan's here; the respective
# values will be set separately at the end
un = un[~np.isnan(un)]
# Convert to object array if np.int64 can't handle size
if np.isnan(n).any():
dt = float
elif k in _FACTORIALK_LIMITS_64BITS.keys():
if un[-1] > _FACTORIALK_LIMITS_64BITS[k]:
# e.g. k=1: 21! > np.iinfo(np.int64).max
dt = object
elif un[-1] > _FACTORIALK_LIMITS_32BITS[k]:
# e.g. k=3: 26!!! > np.iinfo(np.int32).max
dt = np.int64
else:
dt = np.int_
else:
# for k >= 10, we always use object
dt = object
out = np.empty_like(n, dtype=dt)
# Handle invalid/trivial values
un = un[un > 1]
out[n < 2] = 1
out[n < 0] = 0
# Calculate products of each range of numbers
# we can only multiply incrementally if the values are k apart;
# therefore we partition `un` into "lanes", i.e. its residues modulo k
for lane in range(0, k):
ul = un[(un % k) == lane] if k > 1 else un
if ul.size:
# after np.unique, un resp. ul are sorted, ul[0] is the smallest;
# cast to python ints to avoid overflow with np.int-types
val = _range_prod(1, int(ul[0]), k=k)
out[n == ul[0]] = val
for i in range(len(ul) - 1):
# by the filtering above, we have ensured that prev & current
# are a multiple of k apart
prev = ul[i]
current = ul[i + 1]
# we already multiplied all factors until prev; continue
# building the full factorial from the following (`prev + 1`);
# use int() for the same reason as above
val *= _range_prod(int(prev + 1), int(current), k=k)
out[n == current] = val
if np.isnan(n).any():
out = out.astype(np.float64)
out[np.isnan(n)] = np.nan
return out
def factorial(n, exact=False):
"""
The factorial of a number or array of numbers.
The factorial of non-negative integer `n` is the product of all
positive integers less than or equal to `n`::
n! = n * (n - 1) * (n - 2) * ... * 1
Parameters
----------
n : int or array_like of ints
Input values. If ``n < 0``, the return value is 0.
exact : bool, optional
If True, calculate the answer exactly using long integer arithmetic.
If False, result is approximated in floating point rapidly using the
`gamma` function.
Default is False.
Returns
-------
nf : float or int or ndarray
Factorial of `n`, as integer or float depending on `exact`.
Notes
-----
For arrays with ``exact=True``, the factorial is computed only once, for
the largest input, with each other result computed in the process.
The output dtype is increased to ``int64`` or ``object`` if necessary.
With ``exact=False`` the factorial is approximated using the gamma
function:
.. math:: n! = \\Gamma(n+1)
Examples
--------
>>> import numpy as np
>>> from scipy.special import factorial
>>> arr = np.array([3, 4, 5])
>>> factorial(arr, exact=False)
array([ 6., 24., 120.])
>>> factorial(arr, exact=True)
array([ 6, 24, 120])
>>> factorial(5, exact=True)
120
"""
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
if n is None or np.isnan(n):
return np.nan
elif not (np.issubdtype(type(n), np.integer)
or np.issubdtype(type(n), np.floating)):
raise ValueError(
f"Unsupported datatype for factorial: {type(n)}\n"
"Permitted data types are integers and floating point numbers"
)
elif n < 0:
return 0
elif exact and np.issubdtype(type(n), np.integer):
return math.factorial(n)
# we do not raise for non-integers with exact=True due to
# historical reasons, though deprecation would be possible
return _ufuncs._factorial(n)
# arrays & array-likes
n = asarray(n)
if n.size == 0:
# return empty arrays unchanged
return n
if not (np.issubdtype(n.dtype, np.integer)
or np.issubdtype(n.dtype, np.floating)):
raise ValueError(
f"Unsupported datatype for factorial: {n.dtype}\n"
"Permitted data types are integers and floating point numbers"
)
if exact and not np.issubdtype(n.dtype, np.integer):
# legacy behaviour is to support mixed integers/NaNs;
# deprecate this for exact=True
n_flt = n[~np.isnan(n)]
if np.allclose(n_flt, n_flt.astype(np.int64)):
warnings.warn(
"Non-integer arrays (e.g. due to presence of NaNs) "
"together with exact=True are deprecated. Either ensure "
"that the the array has integer dtype or use exact=False.",
DeprecationWarning,
stacklevel=2
)
else:
msg = ("factorial with exact=True does not "
"support non-integral arrays")
raise ValueError(msg)
if exact:
return _exact_factorialx_array(n)
# we do not raise for non-integers with exact=True due to
# historical reasons, though deprecation would be possible
res = _ufuncs._factorial(n)
if isinstance(n, np.ndarray):
# _ufuncs._factorial does not maintain 0-dim arrays
return np.array(res)
return res
def factorial2(n, exact=False):
"""Double factorial.
This is the factorial with every second value skipped. E.g., ``7!! = 7 * 5
* 3 * 1``. It can be approximated numerically as::
n!! = 2 ** (n / 2) * gamma(n / 2 + 1) * sqrt(2 / pi) n odd
= 2 ** (n / 2) * gamma(n / 2 + 1) n even
= 2 ** (n / 2) * (n / 2)! n even
Parameters
----------
n : int or array_like
Calculate ``n!!``. If ``n < 0``, the return value is 0.
exact : bool, optional
The result can be approximated rapidly using the gamma-formula
above (default). If `exact` is set to True, calculate the
answer exactly using integer arithmetic.
Returns
-------
nff : float or int
Double factorial of `n`, as an int or a float depending on
`exact`.
Examples
--------
>>> from scipy.special import factorial2
>>> factorial2(7, exact=False)
array(105.00000000000001)
>>> factorial2(7, exact=True)
105
"""
def _approx(n):
# main factor that both even/odd approximations share
val = np.power(2, n / 2) * gamma(n / 2 + 1)
mask = np.ones_like(n, dtype=np.float64)
mask[n % 2 == 1] = sqrt(2 / pi)
# analytical continuation (based on odd integers)
# is scaled down by a factor of sqrt(2 / pi)
# compared to the value of even integers.
return val * mask
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
if n is None or np.isnan(n):
return np.nan
elif not np.issubdtype(type(n), np.integer):
msg = "factorial2 does not support non-integral scalar arguments"
raise ValueError(msg)
elif n < 0:
return 0
elif n in {0, 1}:
return 1
# general integer case
if exact:
return _range_prod(1, n, k=2)
return _approx(n)
# arrays & array-likes
n = asarray(n)
if n.size == 0:
# return empty arrays unchanged
return n
if not np.issubdtype(n.dtype, np.integer):
raise ValueError("factorial2 does not support non-integral arrays")
if exact:
return _exact_factorialx_array(n, k=2)
# approximation
vals = zeros(n.shape)
cond = (n >= 0)
n_to_compute = extract(cond, n)
place(vals, cond, _approx(n_to_compute))
return vals
def factorialk(n, k, exact=True):
"""Multifactorial of n of order k, n(!!...!).
This is the multifactorial of n skipping k values. For example,
factorialk(17, 4) = 17!!!! = 17 * 13 * 9 * 5 * 1
In particular, for any integer ``n``, we have
factorialk(n, 1) = factorial(n)
factorialk(n, 2) = factorial2(n)
Parameters
----------
n : int or array_like
Calculate multifactorial. If `n` < 0, the return value is 0.
k : int
Order of multifactorial.
exact : bool, optional
If exact is set to True, calculate the answer exactly using
integer arithmetic.
Returns
-------
val : int
Multifactorial of `n`.
Raises
------
NotImplementedError
Raises when exact is False
Examples
--------
>>> from scipy.special import factorialk
>>> factorialk(5, 1, exact=True)
120
>>> factorialk(5, 3, exact=True)
10
"""
if not np.issubdtype(type(k), np.integer) or k < 1:
raise ValueError(f"k must be a positive integer, received: {k}")
if not exact:
raise NotImplementedError
helpmsg = ""
if k in {1, 2}:
func = "factorial" if k == 1 else "factorial2"
helpmsg = f"\nYou can try to use {func} instead"
# don't use isscalar due to numpy/numpy#23574; 0-dim arrays treated below
if np.ndim(n) == 0 and not isinstance(n, np.ndarray):
# scalar cases
if n is None or np.isnan(n):
return np.nan
elif not np.issubdtype(type(n), np.integer):
msg = "factorialk does not support non-integral scalar arguments!"
raise ValueError(msg + helpmsg)
elif n < 0:
return 0
elif n in {0, 1}:
return 1
return _range_prod(1, n, k=k)
# arrays & array-likes
n = asarray(n)
if n.size == 0:
# return empty arrays unchanged
return n
if not np.issubdtype(n.dtype, np.integer):
msg = "factorialk does not support non-integral arrays!"
raise ValueError(msg + helpmsg)
return _exact_factorialx_array(n, k=k)
def zeta(x, q=None, out=None):
r"""
Riemann or Hurwitz zeta function.
Parameters
----------
x : array_like of float
Input data, must be real
q : array_like of float, optional
Input data, must be real. Defaults to Riemann zeta.
out : ndarray, optional
Output array for the computed values.
Returns
-------
out : array_like
Values of zeta(x).
Notes
-----
The two-argument version is the Hurwitz zeta function
.. math::
\zeta(x, q) = \sum_{k=0}^{\infty} \frac{1}{(k + q)^x};
see [dlmf]_ for details. The Riemann zeta function corresponds to
the case when ``q = 1``.
See Also
--------
zetac
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/25.11#i
Examples
--------
>>> import numpy as np
>>> from scipy.special import zeta, polygamma, factorial
Some specific values:
>>> zeta(2), np.pi**2/6
(1.6449340668482266, 1.6449340668482264)
>>> zeta(4), np.pi**4/90
(1.0823232337111381, 1.082323233711138)
Relation to the `polygamma` function:
>>> m = 3
>>> x = 1.25
>>> polygamma(m, x)
array(2.782144009188397)
>>> (-1)**(m+1) * factorial(m) * zeta(m+1, x)
2.7821440091883969
"""
if q is None:
return _ufuncs._riemann_zeta(x, out)
else:
return _ufuncs._zeta(x, q, out)
| 95,701
| 29.29503
| 96
|
py
|
scipy
|
scipy-main/scipy/special/_sf_error.py
|
"""Warnings and Exceptions that can be raised by special functions."""
import warnings
class SpecialFunctionWarning(Warning):
"""Warning that can be emitted by special functions."""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
class SpecialFunctionError(Exception):
"""Exception that can be raised by special functions."""
pass
| 375
| 22.5
| 70
|
py
|
scipy
|
scipy-main/scipy/special/_orthogonal.py
|
"""
A collection of functions to find the weights and abscissas for
Gaussian Quadrature.
These calculations are done by finding the eigenvalues of a
tridiagonal matrix whose entries are dependent on the coefficients
in the recursion formula for the orthogonal polynomials with the
corresponding weighting function over the interval.
Many recursion relations for orthogonal polynomials are given:
.. math::
a1n f_{n+1} (x) = (a2n + a3n x ) f_n (x) - a4n f_{n-1} (x)
The recursion relation of interest is
.. math::
P_{n+1} (x) = (x - A_n) P_n (x) - B_n P_{n-1} (x)
where :math:`P` has a different normalization than :math:`f`.
The coefficients can be found as:
.. math::
A_n = -a2n / a3n
\\qquad
B_n = ( a4n / a3n \\sqrt{h_n-1 / h_n})^2
where
.. math::
h_n = \\int_a^b w(x) f_n(x)^2
assume:
.. math::
P_0 (x) = 1
\\qquad
P_{-1} (x) == 0
For the mathematical background, see [golub.welsch-1969-mathcomp]_ and
[abramowitz.stegun-1965]_.
References
----------
.. [golub.welsch-1969-mathcomp]
Golub, Gene H, and John H Welsch. 1969. Calculation of Gauss
Quadrature Rules. *Mathematics of Computation* 23, 221-230+s1--s10.
.. [abramowitz.stegun-1965]
Abramowitz, Milton, and Irene A Stegun. (1965) *Handbook of
Mathematical Functions: with Formulas, Graphs, and Mathematical
Tables*. Gaithersburg, MD: National Bureau of Standards.
http://www.math.sfu.ca/~cbm/aands/
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
#
# Author: Travis Oliphant 2000
# Updated Sep. 2003 (fixed bugs --- tested to be accurate)
# SciPy imports.
import numpy as np
from numpy import (exp, inf, pi, sqrt, floor, sin, cos, around,
hstack, arccos, arange)
from scipy import linalg
from scipy.special import airy
# Local imports.
# There is no .pyi file for _specfun
from . import _specfun # type: ignore
from . import _ufuncs
_gam = _ufuncs.gamma
_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite',
'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
'sh_chebyu', 'sh_jacobi']
# Correspondence between new and old names of root functions
_rootfuns_map = {'roots_legendre': 'p_roots',
'roots_chebyt': 't_roots',
'roots_chebyu': 'u_roots',
'roots_chebyc': 'c_roots',
'roots_chebys': 's_roots',
'roots_jacobi': 'j_roots',
'roots_laguerre': 'l_roots',
'roots_genlaguerre': 'la_roots',
'roots_hermite': 'h_roots',
'roots_hermitenorm': 'he_roots',
'roots_gegenbauer': 'cg_roots',
'roots_sh_legendre': 'ps_roots',
'roots_sh_chebyt': 'ts_roots',
'roots_sh_chebyu': 'us_roots',
'roots_sh_jacobi': 'js_roots'}
__all__ = _polyfuns + list(_rootfuns_map.keys())
class orthopoly1d(np.poly1d):
def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None,
limits=None, monic=False, eval_func=None):
equiv_weights = [weights[k] / wfunc(roots[k]) for
k in range(len(roots))]
mu = sqrt(hn)
if monic:
evf = eval_func
if evf:
knn = kn
def eval_func(x):
return evf(x) / knn
mu = mu / abs(kn)
kn = 1.0
# compute coefficients from roots, then scale
poly = np.poly1d(roots, r=True)
np.poly1d.__init__(self, poly.coeffs * float(kn))
self.weights = np.array(list(zip(roots, weights, equiv_weights)))
self.weight_func = wfunc
self.limits = limits
self.normcoef = mu
# Note: eval_func will be discarded on arithmetic
self._eval_func = eval_func
def __call__(self, v):
if self._eval_func and not isinstance(v, np.poly1d):
return self._eval_func(v)
else:
return np.poly1d.__call__(self, v)
def _scale(self, p):
if p == 1.0:
return
self._coeffs *= p
evf = self._eval_func
if evf:
self._eval_func = lambda x: evf(x) * p
self.normcoef *= p
def _gen_roots_and_weights(n, mu0, an_func, bn_func, f, df, symmetrize, mu):
"""[x,w] = gen_roots_and_weights(n,an_func,sqrt_bn_func,mu)
Returns the roots (x) of an nth order orthogonal polynomial,
and weights (w) to use in appropriate Gaussian quadrature with that
orthogonal polynomial.
The polynomials have the recurrence relation
P_n+1(x) = (x - A_n) P_n(x) - B_n P_n-1(x)
an_func(n) should return A_n
sqrt_bn_func(n) should return sqrt(B_n)
mu ( = h_0 ) is the integral of the weight over the orthogonal
interval
"""
k = np.arange(n, dtype='d')
c = np.zeros((2, n))
c[0,1:] = bn_func(k[1:])
c[1,:] = an_func(k)
x = linalg.eigvals_banded(c, overwrite_a_band=True)
# improve roots by one application of Newton's method
y = f(n, x)
dy = df(n, x)
x -= y/dy
# fm and dy may contain very large/small values, so we
# log-normalize them to maintain precision in the product fm*dy
fm = f(n-1, x)
log_fm = np.log(np.abs(fm))
log_dy = np.log(np.abs(dy))
fm /= np.exp((log_fm.max() + log_fm.min()) / 2.)
dy /= np.exp((log_dy.max() + log_dy.min()) / 2.)
w = 1.0 / (fm * dy)
if symmetrize:
w = (w + w[::-1]) / 2
x = (x - x[::-1]) / 2
w *= mu0 / w.sum()
if mu:
return x, w, mu0
else:
return x, w
# Jacobi Polynomials 1 P^(alpha,beta)_n(x)
def roots_jacobi(n, alpha, beta, mu=False):
r"""Gauss-Jacobi quadrature.
Compute the sample points and weights for Gauss-Jacobi
quadrature. The sample points are the roots of the nth degree
Jacobi polynomial, :math:`P^{\alpha, \beta}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x)^{\alpha} (1 +
x)^{\beta}`. See 22.2.1 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
beta : float
beta must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha <= -1 or beta <= -1:
raise ValueError("alpha and beta must be greater than -1.")
if alpha == 0.0 and beta == 0.0:
return roots_legendre(m, mu)
if alpha == beta:
return roots_gegenbauer(m, alpha+0.5, mu)
if (alpha + beta) <= 1000:
mu0 = 2.0**(alpha+beta+1) * _ufuncs.beta(alpha+1, beta+1)
else:
# Avoid overflows in pow and beta for very large parameters
mu0 = np.exp((alpha + beta + 1) * np.log(2.0)
+ _ufuncs.betaln(alpha+1, beta+1))
a = alpha
b = beta
if a + b == 0.0:
def an_func(k):
return np.where(k == 0, (b - a) / (2 + a + b), 0.0)
else:
def an_func(k):
return np.where(k == 0, (b - a) / (2 + a + b), (b * b - a * a) / ((2.0 * k + a + b) * (2.0 * k + a + b + 2)))
def bn_func(k):
return 2.0 / (2.0 * k + a + b) * np.sqrt((k + a) * (k + b) / (2 * k + a + b + 1)) * np.where(k == 1, 1.0, np.sqrt(k * (k + a + b) / (2.0 * k + a + b - 1)))
def f(n, x):
return _ufuncs.eval_jacobi(n, a, b, x)
def df(n, x):
return 0.5 * (n + a + b + 1) * _ufuncs.eval_jacobi(n - 1, a + 1, b + 1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def jacobi(n, alpha, beta, monic=False):
r"""Jacobi polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}P_n^{(\alpha, \beta)}
+ (\beta - \alpha - (\alpha + \beta + 2)x)
\frac{d}{dx}P_n^{(\alpha, \beta)}
+ n(n + \alpha + \beta + 1)P_n^{(\alpha, \beta)} = 0
for :math:`\alpha, \beta > -1`; :math:`P_n^{(\alpha, \beta)}` is a
polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
beta : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Jacobi polynomial.
Notes
-----
For fixed :math:`\alpha, \beta`, the polynomials
:math:`P_n^{(\alpha, \beta)}` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x)^\alpha(1 + x)^\beta`.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The Jacobi polynomials satisfy the recurrence relation:
.. math::
P_n^{(\alpha, \beta-1)}(x) - P_n^{(\alpha-1, \beta)}(x)
= P_{n-1}^{(\alpha, \beta)}(x)
This can be verified, for example, for :math:`\alpha = \beta = 2`
and :math:`n = 1` over the interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import jacobi
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(jacobi(0, 2, 2)(x),
... jacobi(1, 2, 1)(x) - jacobi(1, 1, 2)(x))
True
Plot of the Jacobi polynomial :math:`P_5^{(\alpha, -0.5)}` for
different values of :math:`\alpha`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Jacobi polynomials $P_5^{(\alpha, -0.5)}$')
>>> for alpha in np.arange(0, 4, 1):
... ax.plot(x, jacobi(5, alpha, -0.5)(x), label=rf'$\alpha={alpha}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
def wfunc(x):
return (1 - x) ** alpha * (1 + x) ** beta
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
x, w, mu = roots_jacobi(n, alpha, beta, mu=True)
ab1 = alpha + beta + 1.0
hn = 2**ab1 / (2 * n + ab1) * _gam(n + alpha + 1)
hn *= _gam(n + beta + 1.0) / _gam(n + 1) / _gam(n + ab1)
kn = _gam(2 * n + ab1) / 2.0**n / _gam(n + 1) / _gam(n + ab1)
# here kn = coefficient on x^n term
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_jacobi(n, alpha, beta, x))
return p
# Jacobi Polynomials shifted G_n(p,q,x)
def roots_sh_jacobi(n, p1, q1, mu=False):
"""Gauss-Jacobi (shifted) quadrature.
Compute the sample points and weights for Gauss-Jacobi (shifted)
quadrature. The sample points are the roots of the nth degree
shifted Jacobi polynomial, :math:`G^{p,q}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[0, 1]` with
weight function :math:`w(x) = (1 - x)^{p-q} x^{q-1}`. See 22.2.2
in [AS]_ for details.
Parameters
----------
n : int
quadrature order
p1 : float
(p1 - q1) must be > -1
q1 : float
q1 must be > 0
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
if (p1-q1) <= -1 or q1 <= 0:
raise ValueError("(p - q) must be greater than -1, and q must be greater than 0.")
x, w, m = roots_jacobi(n, p1-q1, q1-1, True)
x = (x + 1) / 2
scale = 2.0**p1
w /= scale
m /= scale
if mu:
return x, w, m
else:
return x, w
def sh_jacobi(n, p, q, monic=False):
r"""Shifted Jacobi polynomial.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1}P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the nth Jacobi polynomial.
Parameters
----------
n : int
Degree of the polynomial.
p : float
Parameter, must have :math:`p > q - 1`.
q : float
Parameter, must be greater than 0.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
G : orthopoly1d
Shifted Jacobi polynomial.
Notes
-----
For fixed :math:`p, q`, the polynomials :math:`G_n^{(p, q)}` are
orthogonal over :math:`[0, 1]` with weight function :math:`(1 -
x)^{p - q}x^{q - 1}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
def wfunc(x):
return (1.0 - x) ** (p - q) * x ** (q - 1.0)
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (-1, 1), monic,
eval_func=np.ones_like)
n1 = n
x, w = roots_sh_jacobi(n1, p, q)
hn = _gam(n + 1) * _gam(n + q) * _gam(n + p) * _gam(n + p - q + 1)
hn /= (2 * n + p) * (_gam(2 * n + p)**2)
# kn = 1.0 in standard form so monic is redundant. Kept for compatibility.
kn = 1.0
pp = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: _ufuncs.eval_sh_jacobi(n, p, q, x))
return pp
# Generalized Laguerre L^(alpha)_n(x)
def roots_genlaguerre(n, alpha, mu=False):
r"""Gauss-generalized Laguerre quadrature.
Compute the sample points and weights for Gauss-generalized
Laguerre quadrature. The sample points are the roots of the nth
degree generalized Laguerre polynomial, :math:`L^{\alpha}_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0,
\infty]` with weight function :math:`w(x) = x^{\alpha}
e^{-x}`. See 22.3.9 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -1
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -1:
raise ValueError("alpha must be greater than -1.")
mu0 = _ufuncs.gamma(alpha + 1)
if m == 1:
x = np.array([alpha+1.0], 'd')
w = np.array([mu0], 'd')
if mu:
return x, w, mu0
else:
return x, w
def an_func(k):
return 2 * k + alpha + 1
def bn_func(k):
return -np.sqrt(k * (k + alpha))
def f(n, x):
return _ufuncs.eval_genlaguerre(n, alpha, x)
def df(n, x):
return (n * _ufuncs.eval_genlaguerre(n, alpha, x) - (n + alpha) * _ufuncs.eval_genlaguerre(n - 1, alpha, x)) / x
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, False, mu)
def genlaguerre(n, alpha, monic=False):
r"""Generalized (associated) Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n^{(\alpha)}
+ (\alpha + 1 - x)\frac{d}{dx}L_n^{(\alpha)}
+ nL_n^{(\alpha)} = 0,
where :math:`\alpha > -1`; :math:`L_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -1.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Generalized Laguerre polynomial.
Notes
-----
For fixed :math:`\alpha`, the polynomials :math:`L_n^{(\alpha)}`
are orthogonal over :math:`[0, \infty)` with weight function
:math:`e^{-x}x^\alpha`.
The Laguerre polynomials are the special case where :math:`\alpha
= 0`.
See Also
--------
laguerre : Laguerre polynomial.
hyp1f1 : confluent hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The generalized Laguerre polynomials are closely related to the confluent
hypergeometric function :math:`{}_1F_1`:
.. math::
L_n^{(\alpha)} = \binom{n + \alpha}{n} {}_1F_1(-n, \alpha +1, x)
This can be verified, for example, for :math:`n = \alpha = 3` over the
interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import binom
>>> from scipy.special import genlaguerre
>>> from scipy.special import hyp1f1
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(genlaguerre(3, 3)(x), binom(6, 3) * hyp1f1(-3, 4, x))
True
This is the plot of the generalized Laguerre polynomials
:math:`L_3^{(\alpha)}` for some values of :math:`\alpha`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-4.0, 12.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-5.0, 10.0)
>>> ax.set_title(r'Generalized Laguerre polynomials $L_3^{\alpha}$')
>>> for alpha in np.arange(0, 5):
... ax.plot(x, genlaguerre(3, alpha)(x), label=rf'$L_3^{(alpha)}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if alpha <= -1:
raise ValueError("alpha must be > -1")
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_genlaguerre(n1, alpha)
def wfunc(x):
return exp(-x) * x ** alpha
if n == 0:
x, w = [], []
hn = _gam(n + alpha + 1) / _gam(n + 1)
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (0, inf), monic,
lambda x: _ufuncs.eval_genlaguerre(n, alpha, x))
return p
# Laguerre L_n(x)
def roots_laguerre(n, mu=False):
r"""Gauss-Laguerre quadrature.
Compute the sample points and weights for Gauss-Laguerre
quadrature. The sample points are the roots of the nth degree
Laguerre polynomial, :math:`L_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[0, \infty]` with weight function
:math:`w(x) = e^{-x}`. See 22.2.13 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.laguerre.laggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
return roots_genlaguerre(n, 0.0, mu=mu)
def laguerre(n, monic=False):
r"""Laguerre polynomial.
Defined to be the solution of
.. math::
x\frac{d^2}{dx^2}L_n + (1 - x)\frac{d}{dx}L_n + nL_n = 0;
:math:`L_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
L : orthopoly1d
Laguerre Polynomial.
Notes
-----
The polynomials :math:`L_n` are orthogonal over :math:`[0,
\infty)` with weight function :math:`e^{-x}`.
See Also
--------
genlaguerre : Generalized (associated) Laguerre polynomial.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The Laguerre polynomials :math:`L_n` are the special case
:math:`\alpha = 0` of the generalized Laguerre polynomials
:math:`L_n^{(\alpha)}`.
Let's verify it on the interval :math:`[-1, 1]`:
>>> import numpy as np
>>> from scipy.special import genlaguerre
>>> from scipy.special import laguerre
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(genlaguerre(3, 0)(x), laguerre(3)(x))
True
The polynomials :math:`L_n` also satisfy the recurrence relation:
.. math::
(n + 1)L_{n+1}(x) = (2n +1 -x)L_n(x) - nL_{n-1}(x)
This can be easily checked on :math:`[0, 1]` for :math:`n = 3`:
>>> x = np.arange(0.0, 1.0, 0.01)
>>> np.allclose(4 * laguerre(4)(x),
... (7 - x) * laguerre(3)(x) - 3 * laguerre(2)(x))
True
This is the plot of the first few Laguerre polynomials :math:`L_n`:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(-1.0, 5.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-5.0, 5.0)
>>> ax.set_title(r'Laguerre polynomials $L_n$')
>>> for n in np.arange(0, 5):
... ax.plot(x, laguerre(n)(x), label=rf'$L_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_laguerre(n1)
if n == 0:
x, w = [], []
hn = 1.0
kn = (-1)**n / _gam(n + 1)
p = orthopoly1d(x, w, hn, kn, lambda x: exp(-x), (0, inf), monic,
lambda x: _ufuncs.eval_laguerre(n, x))
return p
# Hermite 1 H_n(x)
def roots_hermite(n, mu=False):
r"""Gauss-Hermite (physicist's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`H_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2}`. See 22.2.14 in [AS]_ for
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is applied
which computes nodes and weights in a numerically stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite.hermgauss
roots_hermitenorm
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(np.pi)
if n <= 150:
def an_func(k):
return 0.0 * k
def bn_func(k):
return np.sqrt(k / 2.0)
f = _ufuncs.eval_hermite
def df(n, x):
return 2.0 * n * _ufuncs.eval_hermite(n - 1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def _compute_tauk(n, k, maxit=5):
"""Helper function for Tricomi initial guesses
For details, see formula 3.1 in lemma 3.1 in the
original paper.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots :math:`\tau_k` to compute
maxit : int
Number of Newton maxit performed, the default
value of 5 is sufficient.
Returns
-------
tauk : ndarray
Roots of equation 3.1
See Also
--------
initial_nodes_a
roots_hermite_asy
"""
a = n % 2 - 0.5
c = (4.0*floor(n/2.0) - 4.0*k + 3.0)*pi / (4.0*floor(n/2.0) + 2.0*a + 2.0)
def f(x):
return x - sin(x) - c
def df(x):
return 1.0 - cos(x)
xi = 0.5*pi
for i in range(maxit):
xi = xi - f(xi)/df(xi)
return xi
def _initial_nodes_a(n, k):
r"""Tricomi initial guesses
Computes an initial approximation to the square of the `k`-th
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.1 in the
original paper. The guesses are accurate except in the region
near :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate roots
See Also
--------
initial_nodes
roots_hermite_asy
"""
tauk = _compute_tauk(n, k)
sigk = cos(0.5*tauk)**2
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Initial approximation of Hermite roots (square)
xksq = nu*sigk - 1.0/(3.0*nu) * (5.0/(4.0*(1.0-sigk)**2) - 1.0/(1.0-sigk) - 0.25)
return xksq
def _initial_nodes_b(n, k):
r"""Gatteschi initial guesses
Computes an initial approximation to the square of the kth
(positive) root :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The formula is the one from lemma 3.2 in the
original paper. The guesses are accurate in the region just
below :math:`\sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
k : ndarray of type int
Index of roots to compute
Returns
-------
xksq : ndarray
Square of the approximate root
See Also
--------
initial_nodes
roots_hermite_asy
"""
a = n % 2 - 0.5
nu = 4.0*floor(n/2.0) + 2.0*a + 2.0
# Airy roots by approximation
ak = _specfun.airyzo(k.max(), 1)[0][::-1]
# Initial approximation of Hermite roots (square)
xksq = (nu +
2.0**(2.0/3.0) * ak * nu**(1.0/3.0) +
1.0/5.0 * 2.0**(4.0/3.0) * ak**2 * nu**(-1.0/3.0) +
(9.0/140.0 - 12.0/175.0 * ak**3) * nu**(-1.0) +
(16.0/1575.0 * ak + 92.0/7875.0 * ak**4) * 2.0**(2.0/3.0) * nu**(-5.0/3.0) -
(15152.0/3031875.0 * ak**5 + 1088.0/121275.0 * ak**2) * 2.0**(1.0/3.0) * nu**(-7.0/3.0))
return xksq
def _initial_nodes(n):
"""Initial guesses for the Hermite roots
Computes an initial approximation to the non-negative
roots :math:`x_k` of the Hermite polynomial :math:`H_n`
of order :math:`n`. The Tricomi and Gatteschi initial
guesses are used in the region where they are accurate.
Parameters
----------
n : int
Quadrature order
Returns
-------
xk : ndarray
Approximate roots
See Also
--------
roots_hermite_asy
"""
# Turnover point
# linear polynomial fit to error of 10, 25, 40, ..., 1000 point rules
fit = 0.49082003*n - 4.37859653
turnover = around(fit).astype(int)
# Compute all approximations
ia = arange(1, int(floor(n*0.5)+1))
ib = ia[::-1]
xasq = _initial_nodes_a(n, ia[:turnover+1])
xbsq = _initial_nodes_b(n, ib[turnover+1:])
# Combine
iv = sqrt(hstack([xasq, xbsq]))
# Central node is always zero
if n % 2 == 1:
iv = hstack([0.0, iv])
return iv
def _pbcf(n, theta):
r"""Asymptotic series expansion of parabolic cylinder function
The implementation is based on sections 3.2 and 3.3 from the
original paper. Compared to the published version this code
adds one more term to the asymptotic series. The detailed
formulas can be found at [parabolic-asymptotics]_. The evaluation
is done in a transformed variable :math:`\theta := \arccos(t)`
where :math:`t := x / \mu` and :math:`\mu := \sqrt{2n + 1}`.
Parameters
----------
n : int
Quadrature order
theta : ndarray
Transformed position variable
Returns
-------
U : ndarray
Value of the parabolic cylinder function :math:`U(a, \theta)`.
Ud : ndarray
Value of the derivative :math:`U^{\prime}(a, \theta)` of
the parabolic cylinder function.
See Also
--------
roots_hermite_asy
References
----------
.. [parabolic-asymptotics]
https://dlmf.nist.gov/12.10#vii
"""
st = sin(theta)
ct = cos(theta)
# https://dlmf.nist.gov/12.10#vii
mu = 2.0*n + 1.0
# https://dlmf.nist.gov/12.10#E23
eta = 0.5*theta - 0.5*st*ct
# https://dlmf.nist.gov/12.10#E39
zeta = -(3.0*eta/2.0) ** (2.0/3.0)
# https://dlmf.nist.gov/12.10#E40
phi = (-zeta / st**2) ** (0.25)
# Coefficients
# https://dlmf.nist.gov/12.10#E43
a0 = 1.0
a1 = 0.10416666666666666667
a2 = 0.08355034722222222222
a3 = 0.12822657455632716049
a4 = 0.29184902646414046425
a5 = 0.88162726744375765242
b0 = 1.0
b1 = -0.14583333333333333333
b2 = -0.09874131944444444444
b3 = -0.14331205391589506173
b4 = -0.31722720267841354810
b5 = -0.94242914795712024914
# Polynomials
# https://dlmf.nist.gov/12.10#E9
# https://dlmf.nist.gov/12.10#E10
ctp = ct ** arange(16).reshape((-1,1))
u0 = 1.0
u1 = (1.0*ctp[3,:] - 6.0*ct) / 24.0
u2 = (-9.0*ctp[4,:] + 249.0*ctp[2,:] + 145.0) / 1152.0
u3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 28287.0*ctp[5,:] - 151995.0*ctp[3,:] - 259290.0*ct) / 414720.0
u4 = (72756.0*ctp[10,:] - 321339.0*ctp[8,:] - 154982.0*ctp[6,:] + 50938215.0*ctp[4,:] + 122602962.0*ctp[2,:] + 12773113.0) / 39813120.0
u5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 1994971575.0*ctp[11,:] - 3630137104.0*ctp[9,:] + 4433574213.0*ctp[7,:]
- 37370295816.0*ctp[5,:] - 119582875013.0*ctp[3,:] - 34009066266.0*ct) / 6688604160.0
v0 = 1.0
v1 = (1.0*ctp[3,:] + 6.0*ct) / 24.0
v2 = (15.0*ctp[4,:] - 327.0*ctp[2,:] - 143.0) / 1152.0
v3 = (-4042.0*ctp[9,:] + 18189.0*ctp[7,:] - 36387.0*ctp[5,:] + 238425.0*ctp[3,:] + 259290.0*ct) / 414720.0
v4 = (-121260.0*ctp[10,:] + 551733.0*ctp[8,:] - 151958.0*ctp[6,:] - 57484425.0*ctp[4,:] - 132752238.0*ctp[2,:] - 12118727) / 39813120.0
v5 = (82393456.0*ctp[15,:] - 617950920.0*ctp[13,:] + 2025529095.0*ctp[11,:] - 3750839308.0*ctp[9,:] + 3832454253.0*ctp[7,:]
+ 35213253348.0*ctp[5,:] + 130919230435.0*ctp[3,:] + 34009066266*ct) / 6688604160.0
# Airy Evaluation (Bi and Bip unused)
Ai, Aip, Bi, Bip = airy(mu**(4.0/6.0) * zeta)
# Prefactor for U
P = 2.0*sqrt(pi) * mu**(1.0/6.0) * phi
# Terms for U
# https://dlmf.nist.gov/12.10#E42
phip = phi ** arange(6, 31, 6).reshape((-1,1))
A0 = b0*u0
A1 = (b2*u0 + phip[0,:]*b1*u1 + phip[1,:]*b0*u2) / zeta**3
A2 = (b4*u0 + phip[0,:]*b3*u1 + phip[1,:]*b2*u2 + phip[2,:]*b1*u3 + phip[3,:]*b0*u4) / zeta**6
B0 = -(a1*u0 + phip[0,:]*a0*u1) / zeta**2
B1 = -(a3*u0 + phip[0,:]*a2*u1 + phip[1,:]*a1*u2 + phip[2,:]*a0*u3) / zeta**5
B2 = -(a5*u0 + phip[0,:]*a4*u1 + phip[1,:]*a3*u2 + phip[2,:]*a2*u3 + phip[3,:]*a1*u4 + phip[4,:]*a0*u5) / zeta**8
# U
# https://dlmf.nist.gov/12.10#E35
U = P * (Ai * (A0 + A1/mu**2.0 + A2/mu**4.0) +
Aip * (B0 + B1/mu**2.0 + B2/mu**4.0) / mu**(8.0/6.0))
# Prefactor for derivative of U
Pd = sqrt(2.0*pi) * mu**(2.0/6.0) / phi
# Terms for derivative of U
# https://dlmf.nist.gov/12.10#E46
C0 = -(b1*v0 + phip[0,:]*b0*v1) / zeta
C1 = -(b3*v0 + phip[0,:]*b2*v1 + phip[1,:]*b1*v2 + phip[2,:]*b0*v3) / zeta**4
C2 = -(b5*v0 + phip[0,:]*b4*v1 + phip[1,:]*b3*v2 + phip[2,:]*b2*v3 + phip[3,:]*b1*v4 + phip[4,:]*b0*v5) / zeta**7
D0 = a0*v0
D1 = (a2*v0 + phip[0,:]*a1*v1 + phip[1,:]*a0*v2) / zeta**3
D2 = (a4*v0 + phip[0,:]*a3*v1 + phip[1,:]*a2*v2 + phip[2,:]*a1*v3 + phip[3,:]*a0*v4) / zeta**6
# Derivative of U
# https://dlmf.nist.gov/12.10#E36
Ud = Pd * (Ai * (C0 + C1/mu**2.0 + C2/mu**4.0) / mu**(4.0/6.0) +
Aip * (D0 + D1/mu**2.0 + D2/mu**4.0))
return U, Ud
def _newton(n, x_initial, maxit=5):
"""Newton iteration for polishing the asymptotic approximation
to the zeros of the Hermite polynomials.
Parameters
----------
n : int
Quadrature order
x_initial : ndarray
Initial guesses for the roots
maxit : int
Maximal number of Newton iterations.
The default 5 is sufficient, usually
only one or two steps are needed.
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite_asy
"""
# Variable transformation
mu = sqrt(2.0*n + 1.0)
t = x_initial / mu
theta = arccos(t)
# Newton iteration
for i in range(maxit):
u, ud = _pbcf(n, theta)
dtheta = u / (sqrt(2.0) * mu * sin(theta) * ud)
theta = theta + dtheta
if max(abs(dtheta)) < 1e-14:
break
# Undo variable transformation
x = mu * cos(theta)
# Central node is always zero
if n % 2 == 1:
x[0] = 0.0
# Compute weights
w = exp(-x**2) / (2.0*ud**2)
return x, w
def _roots_hermite_asy(n):
r"""Gauss-Hermite (physicist's) quadrature for large n.
Computes the sample points and weights for Gauss-Hermite quadrature.
The sample points are the roots of the nth degree Hermite polynomial,
:math:`H_n(x)`. These sample points and weights correctly integrate
polynomials of degree :math:`2n - 1` or less over the interval
:math:`[-\infty, \infty]` with weight function :math:`f(x) = e^{-x^2}`.
This method relies on asymptotic expansions which work best for n > 150.
The algorithm has linear runtime making computation for very large n
feasible.
Parameters
----------
n : int
quadrature order
Returns
-------
nodes : ndarray
Quadrature nodes
weights : ndarray
Quadrature weights
See Also
--------
roots_hermite
References
----------
.. [townsend.trogdon.olver-2014]
Townsend, A. and Trogdon, T. and Olver, S. (2014)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*. :arXiv:`1410.5286`.
.. [townsend.trogdon.olver-2015]
Townsend, A. and Trogdon, T. and Olver, S. (2015)
*Fast computation of Gauss quadrature nodes and
weights on the whole real line*.
IMA Journal of Numerical Analysis
:doi:`10.1093/imanum/drv002`.
"""
iv = _initial_nodes(n)
nodes, weights = _newton(n, iv)
# Combine with negative parts
if n % 2 == 0:
nodes = hstack([-nodes[::-1], nodes])
weights = hstack([weights[::-1], weights])
else:
nodes = hstack([-nodes[-1:0:-1], nodes])
weights = hstack([weights[-1:0:-1], weights])
# Scale weights
weights *= sqrt(pi) / sum(weights)
return nodes, weights
def hermite(n, monic=False):
r"""Physicist's Hermite polynomial.
Defined by
.. math::
H_n(x) = (-1)^ne^{x^2}\frac{d^n}{dx^n}e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
H : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`H_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2}`.
Examples
--------
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> p_monic = special.hermite(3, monic=True)
>>> p_monic
poly1d([ 1. , 0. , -1.5, 0. ])
>>> p_monic(1)
-0.49999999999999983
>>> x = np.linspace(-3, 3, 400)
>>> y = p_monic(x)
>>> plt.plot(x, y)
>>> plt.title("Monic Hermite polynomial of degree 3")
>>> plt.xlabel("x")
>>> plt.ylabel("H_3(x)")
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermite(n1)
def wfunc(x):
return exp(-x * x)
if n == 0:
x, w = [], []
hn = 2**n * _gam(n + 1) * sqrt(pi)
kn = 2**n
p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic,
lambda x: _ufuncs.eval_hermite(n, x))
return p
# Hermite 2 He_n(x)
def roots_hermitenorm(n, mu=False):
r"""Gauss-Hermite (statistician's) quadrature.
Compute the sample points and weights for Gauss-Hermite
quadrature. The sample points are the roots of the nth degree
Hermite polynomial, :math:`He_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-\infty, \infty]` with weight
function :math:`w(x) = e^{-x^2/2}`. See 22.2.15 in [AS]_ for more
details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
Notes
-----
For small n up to 150 a modified version of the Golub-Welsch
algorithm is used. Nodes are computed from the eigenvalue
problem and improved by one step of a Newton iteration.
The weights are computed from the well-known analytical formula.
For n larger than 150 an optimal asymptotic algorithm is used
which computes nodes and weights in a numerical stable manner.
The algorithm has linear runtime making computation for very
large n (several thousand or more) feasible.
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.hermite_e.hermegauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = np.sqrt(2.0*np.pi)
if n <= 150:
def an_func(k):
return 0.0 * k
def bn_func(k):
return np.sqrt(k)
f = _ufuncs.eval_hermitenorm
def df(n, x):
return n * _ufuncs.eval_hermitenorm(n - 1, x)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
else:
nodes, weights = _roots_hermite_asy(m)
# Transform
nodes *= sqrt(2)
weights *= sqrt(2)
if mu:
return nodes, weights, mu0
else:
return nodes, weights
def hermitenorm(n, monic=False):
r"""Normalized (probabilist's) Hermite polynomial.
Defined by
.. math::
He_n(x) = (-1)^ne^{x^2/2}\frac{d^n}{dx^n}e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
He : orthopoly1d
Hermite polynomial.
Notes
-----
The polynomials :math:`He_n` are orthogonal over :math:`(-\infty,
\infty)` with weight function :math:`e^{-x^2/2}`.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_hermitenorm(n1)
def wfunc(x):
return exp(-x * x / 2.0)
if n == 0:
x, w = [], []
hn = sqrt(2 * pi) * _gam(n + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn, wfunc=wfunc, limits=(-inf, inf), monic=monic,
eval_func=lambda x: _ufuncs.eval_hermitenorm(n, x))
return p
# The remainder of the polynomials can be derived from the ones above.
# Ultraspherical (Gegenbauer) C^(alpha)_n(x)
def roots_gegenbauer(n, alpha, mu=False):
r"""Gauss-Gegenbauer quadrature.
Compute the sample points and weights for Gauss-Gegenbauer
quadrature. The sample points are the roots of the nth degree
Gegenbauer polynomial, :math:`C^{\alpha}_n(x)`. These sample
points and weights correctly integrate polynomials of degree
:math:`2n - 1` or less over the interval :math:`[-1, 1]` with
weight function :math:`w(x) = (1 - x^2)^{\alpha - 1/2}`. See
22.2.3 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
alpha : float
alpha must be > -0.5
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
if alpha < -0.5:
raise ValueError("alpha must be greater than -0.5.")
elif alpha == 0.0:
# C(n,0,x) == 0 uniformly, however, as alpha->0, C(n,alpha,x)->T(n,x)
# strictly, we should just error out here, since the roots are not
# really defined, but we used to return something useful, so let's
# keep doing so.
return roots_chebyt(n, mu)
if alpha <= 170:
mu0 = (np.sqrt(np.pi) * _ufuncs.gamma(alpha + 0.5)) \
/ _ufuncs.gamma(alpha + 1)
else:
# For large alpha we use a Taylor series expansion around inf,
# expressed as a 6th order polynomial of a^-1 and using Horner's
# method to minimize computation and maximize precision
inv_alpha = 1. / alpha
coeffs = np.array([0.000207186, -0.00152206, -0.000640869,
0.00488281, 0.0078125, -0.125, 1.])
mu0 = coeffs[0]
for term in range(1, len(coeffs)):
mu0 = mu0 * inv_alpha + coeffs[term]
mu0 = mu0 * np.sqrt(np.pi / alpha)
def an_func(k):
return 0.0 * k
def bn_func(k):
return np.sqrt(k * (k + 2 * alpha - 1) / (4 * (k + alpha) * (k + alpha - 1)))
def f(n, x):
return _ufuncs.eval_gegenbauer(n, alpha, x)
def df(n, x):
return (-n * x * _ufuncs.eval_gegenbauer(n, alpha, x) + (n + 2 * alpha - 1) * _ufuncs.eval_gegenbauer(n - 1, alpha, x)) / (1 - x ** 2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def gegenbauer(n, alpha, monic=False):
r"""Gegenbauer (ultraspherical) polynomial.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}C_n^{(\alpha)}
- (2\alpha + 1)x\frac{d}{dx}C_n^{(\alpha)}
+ n(n + 2\alpha)C_n^{(\alpha)} = 0
for :math:`\alpha > -1/2`; :math:`C_n^{(\alpha)}` is a polynomial
of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
alpha : float
Parameter, must be greater than -0.5.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Gegenbauer polynomial.
Notes
-----
The polynomials :math:`C_n^{(\alpha)}` are orthogonal over
:math:`[-1,1]` with weight function :math:`(1 - x^2)^{(\alpha -
1/2)}`.
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
We can initialize a variable ``p`` as a Gegenbauer polynomial using the
`gegenbauer` function and evaluate at a point ``x = 1``.
>>> p = special.gegenbauer(3, 0.5, monic=False)
>>> p
poly1d([ 2.5, 0. , -1.5, 0. ])
>>> p(1)
1.0
To evaluate ``p`` at various points ``x`` in the interval ``(-3, 3)``,
simply pass an array ``x`` to ``p`` as follows:
>>> x = np.linspace(-3, 3, 400)
>>> y = p(x)
We can then visualize ``x, y`` using `matplotlib.pyplot`.
>>> fig, ax = plt.subplots()
>>> ax.plot(x, y)
>>> ax.set_title("Gegenbauer (ultraspherical) polynomial of degree 3")
>>> ax.set_xlabel("x")
>>> ax.set_ylabel("G_3(x)")
>>> plt.show()
"""
base = jacobi(n, alpha - 0.5, alpha - 0.5, monic=monic)
if monic:
return base
# Abrahmowitz and Stegan 22.5.20
factor = (_gam(2*alpha + n) * _gam(alpha + 0.5) /
_gam(2*alpha) / _gam(alpha + 0.5 + n))
base._scale(factor)
base.__dict__['_eval_func'] = lambda x: _ufuncs.eval_gegenbauer(float(n),
alpha, x)
return base
# Chebyshev of the first kind: T_n(x) =
# n! sqrt(pi) / _gam(n+1./2)* P^(-1/2,-1/2)_n(x)
# Computed anew.
def roots_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`T_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = 1/\sqrt{1 - x^2}`. See 22.2.4
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.chebyshev.chebgauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
x = _ufuncs._sinpi(np.arange(-m + 1, m, 2) / (2*m))
w = np.full_like(x, pi/m)
if mu:
return x, w, pi
else:
return x, w
def chebyt(n, monic=False):
r"""Chebyshev polynomial of the first kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}T_n - x\frac{d}{dx}T_n + n^2T_n = 0;
:math:`T_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{-1/2}`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Chebyshev polynomials of the first kind of order :math:`n` can
be obtained as the determinant of specific :math:`n \times n`
matrices. As an example we can check how the points obtained from
the determinant of the following :math:`3 \times 3` matrix
lay exacty on :math:`T_3`:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.linalg import det
>>> from scipy.special import chebyt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Chebyshev polynomial $T_3$')
>>> ax.plot(x, chebyt(3)(x), label=rf'$T_3$')
>>> for p in np.arange(-1.0, 1.0, 0.1):
... ax.plot(p,
... det(np.array([[p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
... 'rx')
>>> plt.legend(loc='best')
>>> plt.show()
They are also related to the Jacobi Polynomials
:math:`P_n^{(-0.5, -0.5)}` through the relation:
.. math::
P_n^{(-0.5, -0.5)}(x) = \frac{1}{4^n} \binom{2n}{n} T_n(x)
Let's verify it for :math:`n = 3`:
>>> from scipy.special import binom
>>> from scipy.special import jacobi
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(jacobi(3, -0.5, -0.5)(x),
... 1/64 * binom(6, 3) * chebyt(3)(x))
True
We can plot the Chebyshev polynomials :math:`T_n` for some values
of :math:`n`:
>>> x = np.arange(-1.5, 1.5, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-4.0, 4.0)
>>> ax.set_title(r'Chebyshev polynomials $T_n$')
>>> for n in np.arange(2,5):
... ax.plot(x, chebyt(n)(x), label=rf'$T_n={n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if n < 0:
raise ValueError("n must be nonnegative.")
def wfunc(x):
return 1.0 / sqrt(1 - x * x)
if n == 0:
return orthopoly1d([], [], pi, 1.0, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_chebyt(n, x))
n1 = n
x, w, mu = roots_chebyt(n1, mu=True)
hn = pi / 2
kn = 2**(n - 1)
p = orthopoly1d(x, w, hn, kn, wfunc, (-1, 1), monic,
lambda x: _ufuncs.eval_chebyt(n, x))
return p
# Chebyshev of the second kind
# U_n(x) = (n+1)! sqrt(pi) / (2*_gam(n+3./2)) * P^(1/2,1/2)_n(x)
def roots_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`U_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-1, 1]`
with weight function :math:`w(x) = \sqrt{1 - x^2}`. See 22.2.5 in
[AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
m = int(n)
if n < 1 or n != m:
raise ValueError('n must be a positive integer.')
t = np.arange(m, 0, -1) * pi / (m + 1)
x = np.cos(t)
w = pi * np.sin(t)**2 / (m + 1)
if mu:
return x, w, pi / 2
else:
return x, w
def chebyu(n, monic=False):
r"""Chebyshev polynomial of the second kind.
Defined to be the solution of
.. math::
(1 - x^2)\frac{d^2}{dx^2}U_n - 3x\frac{d}{dx}U_n
+ n(n + 2)U_n = 0;
:math:`U_n` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U_n` are orthogonal over :math:`[-1, 1]`
with weight function :math:`(1 - x^2)^{1/2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Chebyshev polynomials of the second kind of order :math:`n` can
be obtained as the determinant of specific :math:`n \times n`
matrices. As an example we can check how the points obtained from
the determinant of the following :math:`3 \times 3` matrix
lay exacty on :math:`U_3`:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.linalg import det
>>> from scipy.special import chebyu
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 2.0)
>>> ax.set_title(r'Chebyshev polynomial $U_3$')
>>> ax.plot(x, chebyu(3)(x), label=rf'$U_3$')
>>> for p in np.arange(-1.0, 1.0, 0.1):
... ax.plot(p,
... det(np.array([[2*p, 1, 0], [1, 2*p, 1], [0, 1, 2*p]])),
... 'rx')
>>> plt.legend(loc='best')
>>> plt.show()
They satisfy the recurrence relation:
.. math::
U_{2n-1}(x) = 2 T_n(x)U_{n-1}(x)
where the :math:`T_n` are the Chebyshev polynomial of the first kind.
Let's verify it for :math:`n = 2`:
>>> from scipy.special import chebyt
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> np.allclose(chebyu(3)(x), 2 * chebyt(2)(x) * chebyu(1)(x))
True
We can plot the Chebyshev polynomials :math:`U_n` for some values
of :math:`n`:
>>> x = np.arange(-1.0, 1.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-1.5, 1.5)
>>> ax.set_title(r'Chebyshev polynomials $U_n$')
>>> for n in np.arange(1,5):
... ax.plot(x, chebyu(n)(x), label=rf'$U_n={n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
base = jacobi(n, 0.5, 0.5, monic=monic)
if monic:
return base
factor = sqrt(pi) / 2.0 * _gam(n + 2) / _gam(n + 1.5)
base._scale(factor)
return base
# Chebyshev of the first kind C_n(x)
def roots_chebyc(n, mu=False):
r"""Gauss-Chebyshev (first kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the first kind, :math:`C_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = 1 / \sqrt{1 - (x/2)^2}`. See
22.2.6 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyt(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebyc(n, monic=False):
r"""Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Defined as :math:`C_n(x) = 2T_n(x/2)`, where :math:`T_n` is the
nth Chebychev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
C : orthopoly1d
Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`C_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`1/\sqrt{1 - (x/2)^2}`.
See Also
--------
chebyt : Chebyshev polynomial of the first kind.
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebyc(n1)
if n == 0:
x, w = [], []
hn = 4 * pi * ((n == 0) + 1)
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: 1.0 / sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
p._scale(2.0 / p(2))
p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebyc(n, x)
return p
# Chebyshev of the second kind S_n(x)
def roots_chebys(n, mu=False):
r"""Gauss-Chebyshev (second kind) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
Chebyshev polynomial of the second kind, :math:`S_n(x)`. These
sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[-2, 2]`
with weight function :math:`w(x) = \sqrt{1 - (x/2)^2}`. See 22.2.7
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x *= 2
w *= 2
m *= 2
if mu:
return x, w, m
else:
return x, w
def chebys(n, monic=False):
r"""Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Defined as :math:`S_n(x) = U_n(x/2)` where :math:`U_n` is the
nth Chebychev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
S : orthopoly1d
Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
Notes
-----
The polynomials :math:`S_n(x)` are orthogonal over :math:`[-2, 2]`
with weight function :math:`\sqrt{1 - (x/2)}^2`.
See Also
--------
chebyu : Chebyshev polynomial of the second kind
References
----------
.. [1] Abramowitz and Stegun, "Handbook of Mathematical Functions"
Section 22. National Bureau of Standards, 1972.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_chebys(n1)
if n == 0:
x, w = [], []
hn = pi
kn = 1.0
p = orthopoly1d(x, w, hn, kn,
wfunc=lambda x: sqrt(1 - x * x / 4.0),
limits=(-2, 2), monic=monic)
if not monic:
factor = (n + 1.0) / p(2)
p._scale(factor)
p.__dict__['_eval_func'] = lambda x: _ufuncs.eval_chebys(n, x)
return p
# Shifted Chebyshev of the first kind T^*_n(x)
def roots_sh_chebyt(n, mu=False):
r"""Gauss-Chebyshev (first kind, shifted) quadrature.
Compute the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the first kind, :math:`T_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = 1/\sqrt{x - x^2}`. See 22.2.8
in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
xw = roots_chebyt(n, mu)
return ((xw[0] + 1) / 2,) + xw[1:]
def sh_chebyt(n, monic=False):
r"""Shifted Chebyshev polynomial of the first kind.
Defined as :math:`T^*_n(x) = T_n(2x - 1)` for :math:`T_n` the nth
Chebyshev polynomial of the first kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
T : orthopoly1d
Shifted Chebyshev polynomial of the first kind.
Notes
-----
The polynomials :math:`T^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{-1/2}`.
"""
base = sh_jacobi(n, 0.0, 0.5, monic=monic)
if monic:
return base
if n > 0:
factor = 4**n / 2.0
else:
factor = 1.0
base._scale(factor)
return base
# Shifted Chebyshev of the second kind U^*_n(x)
def roots_sh_chebyu(n, mu=False):
r"""Gauss-Chebyshev (second kind, shifted) quadrature.
Computes the sample points and weights for Gauss-Chebyshev
quadrature. The sample points are the roots of the nth degree
shifted Chebyshev polynomial of the second kind, :math:`U_n(x)`.
These sample points and weights correctly integrate polynomials of
degree :math:`2n - 1` or less over the interval :math:`[0, 1]`
with weight function :math:`w(x) = \sqrt{x - x^2}`. See 22.2.9 in
[AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w, m = roots_chebyu(n, True)
x = (x + 1) / 2
m_us = _ufuncs.beta(1.5, 1.5)
w *= m_us / m
if mu:
return x, w, m_us
else:
return x, w
def sh_chebyu(n, monic=False):
r"""Shifted Chebyshev polynomial of the second kind.
Defined as :math:`U^*_n(x) = U_n(2x - 1)` for :math:`U_n` the nth
Chebyshev polynomial of the second kind.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
U : orthopoly1d
Shifted Chebyshev polynomial of the second kind.
Notes
-----
The polynomials :math:`U^*_n` are orthogonal over :math:`[0, 1]`
with weight function :math:`(x - x^2)^{1/2}`.
"""
base = sh_jacobi(n, 2.0, 1.5, monic=monic)
if monic:
return base
factor = 4**n
base._scale(factor)
return base
# Legendre
def roots_legendre(n, mu=False):
r"""Gauss-Legendre quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature [GL]_. The sample points are the roots of the nth degree
Legendre polynomial :math:`P_n(x)`. These sample points and
weights correctly integrate polynomials of degree :math:`2n - 1`
or less over the interval :math:`[-1, 1]` with weight function
:math:`w(x) = 1`. See 2.2.10 in [AS]_ for more details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
numpy.polynomial.legendre.leggauss
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [GL] Gauss-Legendre quadrature, Wikipedia,
https://en.wikipedia.org/wiki/Gauss%E2%80%93Legendre_quadrature
Examples
--------
>>> import numpy as np
>>> from scipy.special import roots_legendre, eval_legendre
>>> roots, weights = roots_legendre(9)
``roots`` holds the roots, and ``weights`` holds the weights for
Gauss-Legendre quadrature.
>>> roots
array([-0.96816024, -0.83603111, -0.61337143, -0.32425342, 0. ,
0.32425342, 0.61337143, 0.83603111, 0.96816024])
>>> weights
array([0.08127439, 0.18064816, 0.2606107 , 0.31234708, 0.33023936,
0.31234708, 0.2606107 , 0.18064816, 0.08127439])
Verify that we have the roots by evaluating the degree 9 Legendre
polynomial at ``roots``. All the values are approximately zero:
>>> eval_legendre(9, roots)
array([-8.88178420e-16, -2.22044605e-16, 1.11022302e-16, 1.11022302e-16,
0.00000000e+00, -5.55111512e-17, -1.94289029e-16, 1.38777878e-16,
-8.32667268e-17])
Here we'll show how the above values can be used to estimate the
integral from 1 to 2 of f(t) = t + 1/t with Gauss-Legendre
quadrature [GL]_. First define the function and the integration
limits.
>>> def f(t):
... return t + 1/t
...
>>> a = 1
>>> b = 2
We'll use ``integral(f(t), t=a, t=b)`` to denote the definite integral
of f from t=a to t=b. The sample points in ``roots`` are from the
interval [-1, 1], so we'll rewrite the integral with the simple change
of variable::
x = 2/(b - a) * t - (a + b)/(b - a)
with inverse::
t = (b - a)/2 * x + (a + 2)/2
Then::
integral(f(t), a, b) =
(b - a)/2 * integral(f((b-a)/2*x + (a+b)/2), x=-1, x=1)
We can approximate the latter integral with the values returned
by `roots_legendre`.
Map the roots computed above from [-1, 1] to [a, b].
>>> t = (b - a)/2 * roots + (a + b)/2
Approximate the integral as the weighted sum of the function values.
>>> (b - a)/2 * f(t).dot(weights)
2.1931471805599276
Compare that to the exact result, which is 3/2 + log(2):
>>> 1.5 + np.log(2)
2.1931471805599454
"""
m = int(n)
if n < 1 or n != m:
raise ValueError("n must be a positive integer.")
mu0 = 2.0
def an_func(k):
return 0.0 * k
def bn_func(k):
return k * np.sqrt(1.0 / (4 * k * k - 1))
f = _ufuncs.eval_legendre
def df(n, x):
return (-n * x * _ufuncs.eval_legendre(n, x) + n * _ufuncs.eval_legendre(n - 1, x)) / (1 - x ** 2)
return _gen_roots_and_weights(m, mu0, an_func, bn_func, f, df, True, mu)
def legendre(n, monic=False):
r"""Legendre polynomial.
Defined to be the solution of
.. math::
\frac{d}{dx}\left[(1 - x^2)\frac{d}{dx}P_n(x)\right]
+ n(n + 1)P_n(x) = 0;
:math:`P_n(x)` is a polynomial of degree :math:`n`.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Legendre polynomial.
Notes
-----
The polynomials :math:`P_n` are orthogonal over :math:`[-1, 1]`
with weight function 1.
Examples
--------
Generate the 3rd-order Legendre polynomial 1/2*(5x^3 + 0x^2 - 3x + 0):
>>> from scipy.special import legendre
>>> legendre(3)
poly1d([ 2.5, 0. , -1.5, 0. ])
"""
if n < 0:
raise ValueError("n must be nonnegative.")
if n == 0:
n1 = n + 1
else:
n1 = n
x, w = roots_legendre(n1)
if n == 0:
x, w = [], []
hn = 2.0 / (2 * n + 1)
kn = _gam(2 * n + 1) / _gam(n + 1)**2 / 2.0**n
p = orthopoly1d(x, w, hn, kn, wfunc=lambda x: 1.0, limits=(-1, 1),
monic=monic,
eval_func=lambda x: _ufuncs.eval_legendre(n, x))
return p
# Shifted Legendre P^*_n(x)
def roots_sh_legendre(n, mu=False):
r"""Gauss-Legendre (shifted) quadrature.
Compute the sample points and weights for Gauss-Legendre
quadrature. The sample points are the roots of the nth degree
shifted Legendre polynomial :math:`P^*_n(x)`. These sample points
and weights correctly integrate polynomials of degree :math:`2n -
1` or less over the interval :math:`[0, 1]` with weight function
:math:`w(x) = 1.0`. See 2.2.11 in [AS]_ for details.
Parameters
----------
n : int
quadrature order
mu : bool, optional
If True, return the sum of the weights, optional.
Returns
-------
x : ndarray
Sample points
w : ndarray
Weights
mu : float
Sum of the weights
See Also
--------
scipy.integrate.quadrature
scipy.integrate.fixed_quad
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
"""
x, w = roots_legendre(n)
x = (x + 1) / 2
w /= 2
if mu:
return x, w, 1.0
else:
return x, w
def sh_legendre(n, monic=False):
r"""Shifted Legendre polynomial.
Defined as :math:`P^*_n(x) = P_n(2x - 1)` for :math:`P_n` the nth
Legendre polynomial.
Parameters
----------
n : int
Degree of the polynomial.
monic : bool, optional
If `True`, scale the leading coefficient to be 1. Default is
`False`.
Returns
-------
P : orthopoly1d
Shifted Legendre polynomial.
Notes
-----
The polynomials :math:`P^*_n` are orthogonal over :math:`[0, 1]`
with weight function 1.
"""
if n < 0:
raise ValueError("n must be nonnegative.")
def wfunc(x):
return 0.0 * x + 1.0
if n == 0:
return orthopoly1d([], [], 1.0, 1.0, wfunc, (0, 1), monic,
lambda x: _ufuncs.eval_sh_legendre(n, x))
x, w = roots_sh_legendre(n)
hn = 1.0 / (2 * n + 1.0)
kn = _gam(2 * n + 1) / _gam(n + 1)**2
p = orthopoly1d(x, w, hn, kn, wfunc, limits=(0, 1), monic=monic,
eval_func=lambda x: _ufuncs.eval_sh_legendre(n, x))
return p
# Make the old root function names an alias for the new ones
_modattrs = globals()
for newfun, oldfun in _rootfuns_map.items():
_modattrs[oldfun] = _modattrs[newfun]
__all__.append(oldfun)
| 74,237
| 27.76327
| 163
|
py
|
scipy
|
scipy-main/scipy/special/add_newdocs.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
import warnings
from . import _add_newdocs
__all__ = ['get', 'add_newdoc', 'Dict', 'docdict'] # noqa: F822
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.add_newdocs is deprecated and has no attribute "
f"{name}.")
warnings.warn("The `scipy.special.add_newdocs` namespace is deprecated."
" and will be removed in SciPy v2.0.0.",
category=DeprecationWarning, stacklevel=2)
return getattr(_add_newdocs, name)
| 644
| 25.875
| 76
|
py
|
scipy
|
scipy-main/scipy/special/_ellip_harm.py
|
import numpy as np
from ._ufuncs import _ellip_harm
from ._ellip_harm_2 import _ellipsoid, _ellipsoid_norm
def ellip_harm(h2, k2, n, p, s, signm=1, signn=1):
r"""
Ellipsoidal harmonic functions E^p_n(l)
These are also known as Lame functions of the first kind, and are
solutions to the Lame equation:
.. math:: (s^2 - h^2)(s^2 - k^2)E''(s) + s(2s^2 - h^2 - k^2)E'(s) + (a - q s^2)E(s) = 0
where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
returned) corresponding to the solutions.
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree
s : float
Coordinate
p : int
Order, can range between [1,2n+1]
signm : {1, -1}, optional
Sign of prefactor of functions. Can be +/-1. See Notes.
signn : {1, -1}, optional
Sign of prefactor of functions. Can be +/-1. See Notes.
Returns
-------
E : float
the harmonic :math:`E^p_n(s)`
See Also
--------
ellip_harm_2, ellip_normal
Notes
-----
The geometric interpretation of the ellipsoidal functions is
explained in [2]_, [3]_, [4]_. The `signm` and `signn` arguments control the
sign of prefactors for functions according to their type::
K : +1
L : signm
M : signn
N : signm*signn
.. versionadded:: 0.15.0
References
----------
.. [1] Digital Library of Mathematical Functions 29.12
https://dlmf.nist.gov/29.12
.. [2] Bardhan and Knepley, "Computational science and
re-discovery: open-source implementations of
ellipsoidal harmonics for problems in potential theory",
Comput. Sci. Disc. 5, 014006 (2012)
:doi:`10.1088/1749-4699/5/1/014006`.
.. [3] David J.and Dechambre P, "Computation of Ellipsoidal
Gravity Field Harmonics for small solar system bodies"
pp. 30-36, 2000
.. [4] George Dassios, "Ellipsoidal Harmonics: Theory and Applications"
pp. 418, 2012
Examples
--------
>>> from scipy.special import ellip_harm
>>> w = ellip_harm(5,8,1,1,2.5)
>>> w
2.5
Check that the functions indeed are solutions to the Lame equation:
>>> import numpy as np
>>> from scipy.interpolate import UnivariateSpline
>>> def eigenvalue(f, df, ddf):
... r = ((s**2 - h**2)*(s**2 - k**2)*ddf + s*(2*s**2 - h**2 - k**2)*df - n*(n+1)*s**2*f)/f
... return -r.mean(), r.std()
>>> s = np.linspace(0.1, 10, 200)
>>> k, h, n, p = 8.0, 2.2, 3, 2
>>> E = ellip_harm(h**2, k**2, n, p, s)
>>> E_spl = UnivariateSpline(s, E)
>>> a, a_err = eigenvalue(E_spl(s), E_spl(s,1), E_spl(s,2))
>>> a, a_err
(583.44366156701483, 6.4580890640310646e-11)
"""
return _ellip_harm(h2, k2, n, p, s, signm, signn)
_ellip_harm_2_vec = np.vectorize(_ellipsoid, otypes='d')
def ellip_harm_2(h2, k2, n, p, s):
r"""
Ellipsoidal harmonic functions F^p_n(l)
These are also known as Lame functions of the second kind, and are
solutions to the Lame equation:
.. math:: (s^2 - h^2)(s^2 - k^2)F''(s) + s(2s^2 - h^2 - k^2)F'(s) + (a - q s^2)F(s) = 0
where :math:`q = (n+1)n` and :math:`a` is the eigenvalue (not
returned) corresponding to the solutions.
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree.
p : int
Order, can range between [1,2n+1].
s : float
Coordinate
Returns
-------
F : float
The harmonic :math:`F^p_n(s)`
Notes
-----
Lame functions of the second kind are related to the functions of the first kind:
.. math::
F^p_n(s)=(2n + 1)E^p_n(s)\int_{0}^{1/s}\frac{du}{(E^p_n(1/u))^2\sqrt{(1-u^2k^2)(1-u^2h^2)}}
.. versionadded:: 0.15.0
See Also
--------
ellip_harm, ellip_normal
Examples
--------
>>> from scipy.special import ellip_harm_2
>>> w = ellip_harm_2(5,8,2,1,10)
>>> w
0.00108056853382
"""
with np.errstate(all='ignore'):
return _ellip_harm_2_vec(h2, k2, n, p, s)
def _ellip_normal_vec(h2, k2, n, p):
return _ellipsoid_norm(h2, k2, n, p)
_ellip_normal_vec = np.vectorize(_ellip_normal_vec, otypes='d')
def ellip_normal(h2, k2, n, p):
r"""
Ellipsoidal harmonic normalization constants gamma^p_n
The normalization constant is defined as
.. math::
\gamma^p_n=8\int_{0}^{h}dx\int_{h}^{k}dy\frac{(y^2-x^2)(E^p_n(y)E^p_n(x))^2}{\sqrt((k^2-y^2)(y^2-h^2)(h^2-x^2)(k^2-x^2)}
Parameters
----------
h2 : float
``h**2``
k2 : float
``k**2``; should be larger than ``h**2``
n : int
Degree.
p : int
Order, can range between [1,2n+1].
Returns
-------
gamma : float
The normalization constant :math:`\gamma^p_n`
See Also
--------
ellip_harm, ellip_harm_2
Notes
-----
.. versionadded:: 0.15.0
Examples
--------
>>> from scipy.special import ellip_normal
>>> w = ellip_normal(5,8,3,7)
>>> w
1723.38796997
"""
with np.errstate(all='ignore'):
return _ellip_normal_vec(h2, k2, n, p)
| 5,272
| 24.229665
| 127
|
py
|
scipy
|
scipy-main/scipy/special/orthogonal.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
import warnings
from . import _orthogonal
_polyfuns = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys',
'jacobi', 'laguerre', 'genlaguerre', 'hermite',
'hermitenorm', 'gegenbauer', 'sh_legendre', 'sh_chebyt',
'sh_chebyu', 'sh_jacobi']
# Correspondence between new and old names of root functions
_rootfuns_map = {'roots_legendre': 'p_roots',
'roots_chebyt': 't_roots',
'roots_chebyu': 'u_roots',
'roots_chebyc': 'c_roots',
'roots_chebys': 's_roots',
'roots_jacobi': 'j_roots',
'roots_laguerre': 'l_roots',
'roots_genlaguerre': 'la_roots',
'roots_hermite': 'h_roots',
'roots_hermitenorm': 'he_roots',
'roots_gegenbauer': 'cg_roots',
'roots_sh_legendre': 'ps_roots',
'roots_sh_chebyt': 'ts_roots',
'roots_sh_chebyu': 'us_roots',
'roots_sh_jacobi': 'js_roots'}
__all__ = _polyfuns + list(_rootfuns_map.keys()) + [ # noqa: F822
'exp', 'inf', 'floor', 'around', 'hstack', 'arange',
'linalg', 'airy', 'orthopoly1d', 'newfun',
'oldfun', 'p_roots', 't_roots', 'u_roots', 'c_roots', 's_roots',
'j_roots', 'l_roots', 'la_roots', 'h_roots', 'he_roots', 'cg_roots',
'ps_roots', 'ts_roots', 'us_roots', 'js_roots'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.orthogonal is deprecated and has no attribute "
f"{name}. Try looking in scipy.special instead.")
warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
"the `scipy.special.orthogonal` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_orthogonal, name)
| 2,053
| 35.678571
| 77
|
py
|
scipy
|
scipy-main/scipy/special/_add_newdocs.py
|
# Docstrings for generated ufuncs
#
# The syntax is designed to look like the function add_newdoc is being
# called from numpy.lib, but in this file add_newdoc puts the
# docstrings in a dictionary. This dictionary is used in
# _generate_pyx.py to generate the docstrings for the ufuncs in
# scipy.special at the C level when the ufuncs are created at compile
# time.
docdict: dict[str, str] = {}
def get(name):
return docdict.get(name)
def add_newdoc(name, doc):
docdict[name] = doc
add_newdoc("_sf_error_test_function",
"""
Private function; do not use.
""")
add_newdoc("_cosine_cdf",
"""
_cosine_cdf(x)
Cumulative distribution function (CDF) of the cosine distribution::
{ 0, x < -pi
cdf(x) = { (pi + x + sin(x))/(2*pi), -pi <= x <= pi
{ 1, x > pi
Parameters
----------
x : array_like
`x` must contain real numbers.
Returns
-------
scalar or ndarray
The cosine distribution CDF evaluated at `x`.
""")
add_newdoc("_cosine_invcdf",
"""
_cosine_invcdf(p)
Inverse of the cumulative distribution function (CDF) of the cosine
distribution.
The CDF of the cosine distribution is::
cdf(x) = (pi + x + sin(x))/(2*pi)
This function computes the inverse of cdf(x).
Parameters
----------
p : array_like
`p` must contain real numbers in the interval ``0 <= p <= 1``.
`nan` is returned for values of `p` outside the interval [0, 1].
Returns
-------
scalar or ndarray
The inverse of the cosine distribution CDF evaluated at `p`.
""")
add_newdoc("sph_harm",
r"""
sph_harm(m, n, theta, phi, out=None)
Compute spherical harmonics.
The spherical harmonics are defined as
.. math::
Y^m_n(\theta,\phi) = \sqrt{\frac{2n+1}{4\pi} \frac{(n-m)!}{(n+m)!}}
e^{i m \theta} P^m_n(\cos(\phi))
where :math:`P_n^m` are the associated Legendre functions; see `lpmv`.
Parameters
----------
m : array_like
Order of the harmonic (int); must have ``|m| <= n``.
n : array_like
Degree of the harmonic (int); must have ``n >= 0``. This is
often denoted by ``l`` (lower case L) in descriptions of
spherical harmonics.
theta : array_like
Azimuthal (longitudinal) coordinate; must be in ``[0, 2*pi]``.
phi : array_like
Polar (colatitudinal) coordinate; must be in ``[0, pi]``.
out : ndarray, optional
Optional output array for the function values
Returns
-------
y_mn : complex scalar or ndarray
The harmonic :math:`Y^m_n` sampled at ``theta`` and ``phi``.
Notes
-----
There are different conventions for the meanings of the input
arguments ``theta`` and ``phi``. In SciPy ``theta`` is the
azimuthal angle and ``phi`` is the polar angle. It is common to
see the opposite convention, that is, ``theta`` as the polar angle
and ``phi`` as the azimuthal angle.
Note that SciPy's spherical harmonics include the Condon-Shortley
phase [2]_ because it is part of `lpmv`.
With SciPy's conventions, the first several spherical harmonics
are
.. math::
Y_0^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{1}{\pi}} \\
Y_1^{-1}(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{-i\theta} \sin(\phi) \\
Y_1^0(\theta, \phi) &= \frac{1}{2} \sqrt{\frac{3}{\pi}}
\cos(\phi) \\
Y_1^1(\theta, \phi) &= -\frac{1}{2} \sqrt{\frac{3}{2\pi}}
e^{i\theta} \sin(\phi).
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] https://en.wikipedia.org/wiki/Spherical_harmonics#Condon.E2.80.93Shortley_phase
""")
add_newdoc("_ellip_harm",
"""
Internal function, use `ellip_harm` instead.
""")
add_newdoc("_ellip_norm",
"""
Internal function, use `ellip_norm` instead.
""")
add_newdoc("_lambertw",
"""
Internal function, use `lambertw` instead.
""")
add_newdoc("voigt_profile",
r"""
voigt_profile(x, sigma, gamma, out=None)
Voigt profile.
The Voigt profile is a convolution of a 1-D Normal distribution with
standard deviation ``sigma`` and a 1-D Cauchy distribution with half-width at
half-maximum ``gamma``.
If ``sigma = 0``, PDF of Cauchy distribution is returned.
Conversely, if ``gamma = 0``, PDF of Normal distribution is returned.
If ``sigma = gamma = 0``, the return value is ``Inf`` for ``x = 0``, and ``0`` for all other ``x``.
Parameters
----------
x : array_like
Real argument
sigma : array_like
The standard deviation of the Normal distribution part
gamma : array_like
The half-width at half-maximum of the Cauchy distribution part
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The Voigt profile at the given arguments
Notes
-----
It can be expressed in terms of Faddeeva function
.. math:: V(x; \sigma, \gamma) = \frac{Re[w(z)]}{\sigma\sqrt{2\pi}},
.. math:: z = \frac{x + i\gamma}{\sqrt{2}\sigma}
where :math:`w(z)` is the Faddeeva function.
See Also
--------
wofz : Faddeeva function
References
----------
.. [1] https://en.wikipedia.org/wiki/Voigt_profile
Examples
--------
Calculate the function at point 2 for ``sigma=1`` and ``gamma=1``.
>>> from scipy.special import voigt_profile
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> voigt_profile(2, 1., 1.)
0.09071519942627544
Calculate the function at several points by providing a NumPy array
for `x`.
>>> values = np.array([-2., 0., 5])
>>> voigt_profile(values, 1., 1.)
array([0.0907152 , 0.20870928, 0.01388492])
Plot the function for different parameter sets.
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> x = np.linspace(-10, 10, 500)
>>> parameters_list = [(1.5, 0., "solid"), (1.3, 0.5, "dashed"),
... (0., 1.8, "dotted"), (1., 1., "dashdot")]
>>> for params in parameters_list:
... sigma, gamma, linestyle = params
... voigt = voigt_profile(x, sigma, gamma)
... ax.plot(x, voigt, label=rf"$\sigma={sigma},\, \gamma={gamma}$",
... ls=linestyle)
>>> ax.legend()
>>> plt.show()
Verify visually that the Voigt profile indeed arises as the convolution
of a normal and a Cauchy distribution.
>>> from scipy.signal import convolve
>>> x, dx = np.linspace(-10, 10, 500, retstep=True)
>>> def gaussian(x, sigma):
... return np.exp(-0.5 * x**2/sigma**2)/(sigma * np.sqrt(2*np.pi))
>>> def cauchy(x, gamma):
... return gamma/(np.pi * (np.square(x)+gamma**2))
>>> sigma = 2
>>> gamma = 1
>>> gauss_profile = gaussian(x, sigma)
>>> cauchy_profile = cauchy(x, gamma)
>>> convolved = dx * convolve(cauchy_profile, gauss_profile, mode="same")
>>> voigt = voigt_profile(x, sigma, gamma)
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> ax.plot(x, gauss_profile, label="Gauss: $G$", c='b')
>>> ax.plot(x, cauchy_profile, label="Cauchy: $C$", c='y', ls="dashed")
>>> xx = 0.5*(x[1:] + x[:-1]) # midpoints
>>> ax.plot(xx, convolved[1:], label="Convolution: $G * C$", ls='dashdot',
... c='k')
>>> ax.plot(x, voigt, label="Voigt", ls='dotted', c='r')
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("wrightomega",
r"""
wrightomega(z, out=None)
Wright Omega function.
Defined as the solution to
.. math::
\omega + \log(\omega) = z
where :math:`\log` is the principal branch of the complex logarithm.
Parameters
----------
z : array_like
Points at which to evaluate the Wright Omega function
out : ndarray, optional
Optional output array for the function values
Returns
-------
omega : scalar or ndarray
Values of the Wright Omega function
Notes
-----
.. versionadded:: 0.19.0
The function can also be defined as
.. math::
\omega(z) = W_{K(z)}(e^z)
where :math:`K(z) = \lceil (\Im(z) - \pi)/(2\pi) \rceil` is the
unwinding number and :math:`W` is the Lambert W function.
The implementation here is taken from [1]_.
See Also
--------
lambertw : The Lambert W function
References
----------
.. [1] Lawrence, Corless, and Jeffrey, "Algorithm 917: Complex
Double-Precision Evaluation of the Wright :math:`\omega`
Function." ACM Transactions on Mathematical Software,
2012. :doi:`10.1145/2168773.2168779`.
Examples
--------
>>> import numpy as np
>>> from scipy.special import wrightomega, lambertw
>>> wrightomega([-2, -1, 0, 1, 2])
array([0.12002824, 0.27846454, 0.56714329, 1. , 1.5571456 ])
Complex input:
>>> wrightomega(3 + 5j)
(1.5804428632097158+3.8213626783287937j)
Verify that ``wrightomega(z)`` satisfies ``w + log(w) = z``:
>>> w = -5 + 4j
>>> wrightomega(w + np.log(w))
(-5+4j)
Verify the connection to ``lambertw``:
>>> z = 0.5 + 3j
>>> wrightomega(z)
(0.0966015889280649+1.4937828458191993j)
>>> lambertw(np.exp(z))
(0.09660158892806493+1.4937828458191993j)
>>> z = 0.5 + 4j
>>> wrightomega(z)
(-0.3362123489037213+2.282986001579032j)
>>> lambertw(np.exp(z), k=1)
(-0.33621234890372115+2.282986001579032j)
""")
add_newdoc("agm",
"""
agm(a, b, out=None)
Compute the arithmetic-geometric mean of `a` and `b`.
Start with a_0 = a and b_0 = b and iteratively compute::
a_{n+1} = (a_n + b_n)/2
b_{n+1} = sqrt(a_n*b_n)
a_n and b_n converge to the same limit as n increases; their common
limit is agm(a, b).
Parameters
----------
a, b : array_like
Real values only. If the values are both negative, the result
is negative. If one value is negative and the other is positive,
`nan` is returned.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The arithmetic-geometric mean of `a` and `b`.
Examples
--------
>>> import numpy as np
>>> from scipy.special import agm
>>> a, b = 24.0, 6.0
>>> agm(a, b)
13.458171481725614
Compare that result to the iteration:
>>> while a != b:
... a, b = (a + b)/2, np.sqrt(a*b)
... print("a = %19.16f b=%19.16f" % (a, b))
...
a = 15.0000000000000000 b=12.0000000000000000
a = 13.5000000000000000 b=13.4164078649987388
a = 13.4582039324993694 b=13.4581390309909850
a = 13.4581714817451772 b=13.4581714817060547
a = 13.4581714817256159 b=13.4581714817256159
When array-like arguments are given, broadcasting applies:
>>> a = np.array([[1.5], [3], [6]]) # a has shape (3, 1).
>>> b = np.array([6, 12, 24, 48]) # b has shape (4,).
>>> agm(a, b)
array([[ 3.36454287, 5.42363427, 9.05798751, 15.53650756],
[ 4.37037309, 6.72908574, 10.84726853, 18.11597502],
[ 6. , 8.74074619, 13.45817148, 21.69453707]])
""")
add_newdoc("airy",
r"""
airy(z, out=None)
Airy functions and their derivatives.
Parameters
----------
z : array_like
Real or complex argument.
out : tuple of ndarray, optional
Optional output arrays for the function values
Returns
-------
Ai, Aip, Bi, Bip : 4-tuple of scalar or ndarray
Airy functions Ai and Bi, and their derivatives Aip and Bip.
Notes
-----
The Airy functions Ai and Bi are two independent solutions of
.. math:: y''(x) = x y(x).
For real `z` in [-10, 10], the computation is carried out by calling
the Cephes [1]_ `airy` routine, which uses power series summation
for small `z` and rational minimax approximations for large `z`.
Outside this range, the AMOS [2]_ `zairy` and `zbiry` routines are
employed. They are computed using power series for :math:`|z| < 1` and
the following relations to modified Bessel functions for larger `z`
(where :math:`t \equiv 2 z^{3/2}/3`):
.. math::
Ai(z) = \frac{1}{\pi \sqrt{3}} K_{1/3}(t)
Ai'(z) = -\frac{z}{\pi \sqrt{3}} K_{2/3}(t)
Bi(z) = \sqrt{\frac{z}{3}} \left(I_{-1/3}(t) + I_{1/3}(t) \right)
Bi'(z) = \frac{z}{\sqrt{3}} \left(I_{-2/3}(t) + I_{2/3}(t)\right)
See also
--------
airye : exponentially scaled Airy functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compute the Airy functions on the interval [-15, 5].
>>> import numpy as np
>>> from scipy import special
>>> x = np.linspace(-15, 5, 201)
>>> ai, aip, bi, bip = special.airy(x)
Plot Ai(x) and Bi(x).
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, ai, 'r', label='Ai(x)')
>>> plt.plot(x, bi, 'b--', label='Bi(x)')
>>> plt.ylim(-0.5, 1.0)
>>> plt.grid()
>>> plt.legend(loc='upper left')
>>> plt.show()
""")
add_newdoc("airye",
"""
airye(z, out=None)
Exponentially scaled Airy functions and their derivatives.
Scaling::
eAi = Ai * exp(2.0/3.0*z*sqrt(z))
eAip = Aip * exp(2.0/3.0*z*sqrt(z))
eBi = Bi * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
eBip = Bip * exp(-abs(2.0/3.0*(z*sqrt(z)).real))
Parameters
----------
z : array_like
Real or complex argument.
out : tuple of ndarray, optional
Optional output arrays for the function values
Returns
-------
eAi, eAip, eBi, eBip : 4-tuple of scalar or ndarray
Exponentially scaled Airy functions eAi and eBi, and their derivatives
eAip and eBip
Notes
-----
Wrapper for the AMOS [1]_ routines `zairy` and `zbiry`.
See also
--------
airy
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
We can compute exponentially scaled Airy functions and their derivatives:
>>> import numpy as np
>>> from scipy.special import airye
>>> import matplotlib.pyplot as plt
>>> z = np.linspace(0, 50, 500)
>>> eAi, eAip, eBi, eBip = airye(z)
>>> f, ax = plt.subplots(2, 1, sharex=True)
>>> for ind, data in enumerate([[eAi, eAip, ["eAi", "eAip"]],
... [eBi, eBip, ["eBi", "eBip"]]]):
... ax[ind].plot(z, data[0], "-r", z, data[1], "-b")
... ax[ind].legend(data[2])
... ax[ind].grid(True)
>>> plt.show()
We can compute these using usual non-scaled Airy functions by:
>>> from scipy.special import airy
>>> Ai, Aip, Bi, Bip = airy(z)
>>> np.allclose(eAi, Ai * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eAip, Aip * np.exp(2.0 / 3.0 * z * np.sqrt(z)))
True
>>> np.allclose(eBi, Bi * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
>>> np.allclose(eBip, Bip * np.exp(-abs(np.real(2.0 / 3.0 * z * np.sqrt(z)))))
True
Comparing non-scaled and exponentially scaled ones, the usual non-scaled
function quickly underflows for large values, whereas the exponentially
scaled function does not.
>>> airy(200)
(0.0, 0.0, nan, nan)
>>> airye(200)
(0.07501041684381093, -1.0609012305109042, 0.15003188417418148, 2.1215836725571093)
""")
add_newdoc("bdtr",
r"""
bdtr(k, n, p, out=None)
Binomial distribution cumulative distribution function.
Sum of the terms 0 through `floor(k)` of the Binomial probability density.
.. math::
\mathrm{bdtr}(k, n, p) = \sum_{j=0}^{\lfloor k \rfloor} {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to the nearest integer.
n : array_like
Number of events (int).
p : array_like
Probability of success in a single event (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
Probability of `floor(k)` or fewer successes in `n` independent events with
success probabilities of `p`.
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtr}(k, n, p) = I_{1 - p}(n - \lfloor k \rfloor, \lfloor k \rfloor + 1).
Wrapper for the Cephes [1]_ routine `bdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrc",
r"""
bdtrc(k, n, p, out=None)
Binomial distribution survival function.
Sum of the terms `floor(k) + 1` through `n` of the binomial probability
density,
.. math::
\mathrm{bdtrc}(k, n, p) = \sum_{j=\lfloor k \rfloor +1}^n {{n}\choose{j}} p^j (1-p)^{n-j}
Parameters
----------
k : array_like
Number of successes (double), rounded down to nearest integer.
n : array_like
Number of events (int)
p : array_like
Probability of success in a single event.
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
Probability of `floor(k) + 1` or more successes in `n` independent
events with success probabilities of `p`.
See also
--------
bdtr
betainc
Notes
-----
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{bdtrc}(k, n, p) = I_{p}(\lfloor k \rfloor + 1, n - \lfloor k \rfloor).
Wrapper for the Cephes [1]_ routine `bdtrc`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtri",
r"""
bdtri(k, n, y, out=None)
Inverse function to `bdtr` with respect to `p`.
Finds the event probability `p` such that the sum of the terms 0 through
`k` of the binomial probability density is equal to the given cumulative
probability `y`.
Parameters
----------
k : array_like
Number of successes (float), rounded down to the nearest integer.
n : array_like
Number of events (float)
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
out : ndarray, optional
Optional output array for the function values
Returns
-------
p : scalar or ndarray
The event probability such that `bdtr(\lfloor k \rfloor, n, p) = y`.
See also
--------
bdtr
betaincinv
Notes
-----
The computation is carried out using the inverse beta integral function
and the relation,::
1 - p = betaincinv(n - k, k + 1, y).
Wrapper for the Cephes [1]_ routine `bdtri`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("bdtrik",
"""
bdtrik(y, n, p, out=None)
Inverse function to `bdtr` with respect to `k`.
Finds the number of successes `k` such that the sum of the terms 0 through
`k` of the Binomial probability density for `n` events with probability
`p` is equal to the given cumulative probability `y`.
Parameters
----------
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
n : array_like
Number of events (float).
p : array_like
Success probability (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
k : scalar or ndarray
The number of successes `k` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc("bdtrin",
"""
bdtrin(k, y, p, out=None)
Inverse function to `bdtr` with respect to `n`.
Finds the number of events `n` such that the sum of the terms 0 through
`k` of the Binomial probability density for events with probability `p` is
equal to the given cumulative probability `y`.
Parameters
----------
k : array_like
Number of successes (float).
y : array_like
Cumulative probability (probability of `k` or fewer successes in `n`
events).
p : array_like
Success probability (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
n : scalar or ndarray
The number of events `n` such that `bdtr(k, n, p) = y`.
See also
--------
bdtr
Notes
-----
Formula 26.5.24 of [1]_ is used to reduce the binomial distribution to the
cumulative incomplete beta distribution.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
Wrapper for the CDFLIB [2]_ Fortran routine `cdfbin`.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
""")
add_newdoc(
"binom",
r"""
binom(x, y, out=None)
Binomial coefficient considered as a function of two real variables.
For real arguments, the binomial coefficient is defined as
.. math::
\binom{x}{y} = \frac{\Gamma(x + 1)}{\Gamma(y + 1)\Gamma(x - y + 1)} =
\frac{1}{(x + 1)\mathrm{B}(x - y + 1, y + 1)}
Where :math:`\Gamma` is the Gamma function (`gamma`) and :math:`\mathrm{B}`
is the Beta function (`beta`) [1]_.
Parameters
----------
x, y: array_like
Real arguments to :math:`\binom{x}{y}`.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Value of binomial coefficient.
See Also
--------
comb : The number of combinations of N things taken k at a time.
Notes
-----
The Gamma function has poles at non-positive integers and tends to either
positive or negative infinity depending on the direction on the real line
from which a pole is approached. When considered as a function of two real
variables, :math:`\binom{x}{y}` is thus undefined when `x` is a negative
integer. `binom` returns ``nan`` when ``x`` is a negative integer. This
is the case even when ``x`` is a negative integer and ``y`` an integer,
contrary to the usual convention for defining :math:`\binom{n}{k}` when it
is considered as a function of two integer variables.
References
----------
.. [1] https://en.wikipedia.org/wiki/Binomial_coefficient
Examples
--------
The following examples illustrate the ways in which `binom` differs from
the function `comb`.
>>> from scipy.special import binom, comb
When ``exact=False`` and ``x`` and ``y`` are both positive, `comb` calls
`binom` internally.
>>> x, y = 3, 2
>>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
(3.0, 3.0, 3)
For larger values, `comb` with ``exact=True`` no longer agrees
with `binom`.
>>> x, y = 43, 23
>>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
(960566918219.9999, 960566918219.9999, 960566918220)
`binom` returns ``nan`` when ``x`` is a negative integer, but is otherwise
defined for negative arguments. `comb` returns 0 whenever one of ``x`` or
``y`` is negative or ``x`` is less than ``y``.
>>> x, y = -3, 2
>>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
(nan, 0.0, 0)
>>> x, y = -3.1, 2.2
>>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
(18.714147876804432, 0.0, 0)
>>> x, y = 2.2, 3.1
>>> (binom(x, y), comb(x, y), comb(x, y, exact=True))
(0.037399983365134115, 0.0, 0)
"""
)
add_newdoc("btdtria",
r"""
btdtria(p, b, x, out=None)
Inverse of `btdtr` with respect to `a`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `a`, returning the value of `a` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
b : array_like
Shape parameter (`b` > 0).
x : array_like
The quantile, in [0, 1].
out : ndarray, optional
Optional output array for the function values
Returns
-------
a : scalar or ndarray
The value of the shape parameter `a` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtrib : Inverse with respect to `b`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("btdtrib",
r"""
btdtria(a, p, x, out=None)
Inverse of `btdtr` with respect to `b`.
This is the inverse of the beta cumulative distribution function, `btdtr`,
considered as a function of `b`, returning the value of `b` for which
`btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
The quantile, in [0, 1].
out : ndarray, optional
Optional output array for the function values
Returns
-------
b : scalar or ndarray
The value of the shape parameter `b` such that `btdtr(a, b, x) = p`.
See Also
--------
btdtr : Cumulative distribution function of the beta distribution.
btdtri : Inverse with respect to `x`.
btdtria : Inverse with respect to `a`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfbet`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Algorithm 708: Significant Digit Computation of the Incomplete Beta
Function Ratios. ACM Trans. Math. Softw. 18 (1993), 360-373.
""")
add_newdoc("bei",
r"""
bei(x, out=None)
Kelvin function bei.
Defined as
.. math::
\mathrm{bei}(x) = \Im[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ber : the corresponding real part
beip : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import numpy as np
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).imag
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
>>> sc.bei(x)
array([0.24956604, 0.97229163, 1.93758679, 2.29269032])
""")
add_newdoc("beip",
r"""
beip(x, out=None)
Derivative of the Kelvin function bei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of bei.
See Also
--------
bei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("ber",
r"""
ber(x, out=None)
Kelvin function ber.
Defined as
.. math::
\mathrm{ber}(x) = \Re[J_0(x e^{3 \pi i / 4})]
where :math:`J_0` is the Bessel function of the first kind of
order zero (see `jv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
bei : the corresponding real part
berp : the derivative of bei
jv : Bessel function of the first kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using Bessel functions.
>>> import numpy as np
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.jv(0, x * np.exp(3 * np.pi * 1j / 4)).real
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
>>> sc.ber(x)
array([ 0.98438178, 0.75173418, -0.22138025, -2.56341656])
""")
add_newdoc("berp",
r"""
berp(x, out=None)
Derivative of the Kelvin function ber.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of ber.
See Also
--------
ber
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("besselpoly",
r"""
besselpoly(a, lmb, nu, out=None)
Weighted integral of the Bessel function of the first kind.
Computes
.. math::
\int_0^1 x^\lambda J_\nu(2 a x) \, dx
where :math:`J_\nu` is a Bessel function and :math:`\lambda=lmb`,
:math:`\nu=nu`.
Parameters
----------
a : array_like
Scale factor inside the Bessel function.
lmb : array_like
Power of `x`
nu : array_like
Order of the Bessel function.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Value of the integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Evaluate the function for one parameter set.
>>> from scipy.special import besselpoly
>>> besselpoly(1, 1, 1)
0.24449718372863877
Evaluate the function for different scale factors.
>>> import numpy as np
>>> factors = np.array([0., 3., 6.])
>>> besselpoly(factors, 1, 1)
array([ 0. , -0.00549029, 0.00140174])
Plot the function for varying powers, orders and scales.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> powers = np.linspace(0, 10, 100)
>>> orders = [1, 2, 3]
>>> scales = [1, 2]
>>> all_combinations = [(order, scale) for order in orders
... for scale in scales]
>>> for order, scale in all_combinations:
... ax.plot(powers, besselpoly(scale, powers, order),
... label=rf"$\nu={order}, a={scale}$")
>>> ax.legend()
>>> ax.set_xlabel(r"$\lambda$")
>>> ax.set_ylabel(r"$\int_0^1 x^{\lambda} J_{\nu}(2ax)\,dx$")
>>> plt.show()
""")
add_newdoc("beta",
r"""
beta(a, b, out=None)
Beta function.
This function is defined in [1]_ as
.. math::
B(a, b) = \int_0^1 t^{a-1}(1-t)^{b-1}dt
= \frac{\Gamma(a)\Gamma(b)}{\Gamma(a+b)},
where :math:`\Gamma` is the gamma function.
Parameters
----------
a, b : array_like
Real-valued arguments
out : ndarray, optional
Optional output array for the function result
Returns
-------
scalar or ndarray
Value of the beta function
See Also
--------
gamma : the gamma function
betainc : the regularized incomplete beta function
betaln : the natural logarithm of the absolute
value of the beta function
References
----------
.. [1] NIST Digital Library of Mathematical Functions,
Eq. 5.12.1. https://dlmf.nist.gov/5.12
Examples
--------
>>> import scipy.special as sc
The beta function relates to the gamma function by the
definition given above:
>>> sc.beta(2, 3)
0.08333333333333333
>>> sc.gamma(2)*sc.gamma(3)/sc.gamma(2 + 3)
0.08333333333333333
As this relationship demonstrates, the beta function
is symmetric:
>>> sc.beta(1.7, 2.4)
0.16567527689031739
>>> sc.beta(2.4, 1.7)
0.16567527689031739
This function satisfies :math:`B(1, b) = 1/b`:
>>> sc.beta(1, 4)
0.25
""")
add_newdoc("betainc",
r"""
betainc(a, b, x, out=None)
Regularized incomplete beta function.
Computes the regularized incomplete beta function, defined as [1]_:
.. math::
I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)} \int_0^x
t^{a-1}(1-t)^{b-1}dt,
for :math:`0 \leq x \leq 1`.
Parameters
----------
a, b : array_like
Positive, real-valued parameters
x : array_like
Real-valued such that :math:`0 \leq x \leq 1`,
the upper limit of integration
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Value of the regularized incomplete beta function
See Also
--------
beta : beta function
betaincinv : inverse of the regularized incomplete beta function
Notes
-----
The term *regularized* in the name of this function refers to the
scaling of the function by the gamma function terms shown in the
formula. When not qualified as *regularized*, the name *incomplete
beta function* often refers to just the integral expression,
without the gamma terms. One can use the function `beta` from
`scipy.special` to get this "nonregularized" incomplete beta
function by multiplying the result of ``betainc(a, b, x)`` by
``beta(a, b)``.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
Let :math:`B(a, b)` be the `beta` function.
>>> import scipy.special as sc
The coefficient in terms of `gamma` is equal to
:math:`1/B(a, b)`. Also, when :math:`x=1`
the integral is equal to :math:`B(a, b)`.
Therefore, :math:`I_{x=1}(a, b) = 1` for any :math:`a, b`.
>>> sc.betainc(0.2, 3.5, 1.0)
1.0
It satisfies
:math:`I_x(a, b) = x^a F(a, 1-b, a+1, x)/ (aB(a, b))`,
where :math:`F` is the hypergeometric function `hyp2f1`:
>>> a, b, x = 1.4, 3.1, 0.5
>>> x**a * sc.hyp2f1(a, 1 - b, a + 1, x)/(a * sc.beta(a, b))
0.8148904036225295
>>> sc.betainc(a, b, x)
0.8148904036225296
This functions satisfies the relationship
:math:`I_x(a, b) = 1 - I_{1-x}(b, a)`:
>>> sc.betainc(2.2, 3.1, 0.4)
0.49339638807619446
>>> 1 - sc.betainc(3.1, 2.2, 1 - 0.4)
0.49339638807619446
""")
add_newdoc("betaincinv",
r"""
betaincinv(a, b, y, out=None)
Inverse of the regularized incomplete beta function.
Computes :math:`x` such that:
.. math::
y = I_x(a, b) = \frac{\Gamma(a+b)}{\Gamma(a)\Gamma(b)}
\int_0^x t^{a-1}(1-t)^{b-1}dt,
where :math:`I_x` is the normalized incomplete beta
function `betainc` and
:math:`\Gamma` is the `gamma` function [1]_.
Parameters
----------
a, b : array_like
Positive, real-valued parameters
y : array_like
Real-valued input
out : ndarray, optional
Optional output array for function values
Returns
-------
scalar or ndarray
Value of the inverse of the regularized incomplete beta function
See Also
--------
betainc : regularized incomplete beta function
gamma : gamma function
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.17
Examples
--------
>>> import scipy.special as sc
This function is the inverse of `betainc` for fixed
values of :math:`a` and :math:`b`.
>>> a, b = 1.2, 3.1
>>> y = sc.betainc(a, b, 0.2)
>>> sc.betaincinv(a, b, y)
0.2
>>>
>>> a, b = 7.5, 0.4
>>> x = sc.betaincinv(a, b, 0.5)
>>> sc.betainc(a, b, x)
0.5
""")
add_newdoc("betaln",
"""
betaln(a, b, out=None)
Natural logarithm of absolute value of beta function.
Computes ``ln(abs(beta(a, b)))``.
Parameters
----------
a, b : array_like
Positive, real-valued parameters
out : ndarray, optional
Optional output array for function values
Returns
-------
scalar or ndarray
Value of the betaln function
See Also
--------
gamma : the gamma function
betainc : the regularized incomplete beta function
beta : the beta function
Examples
--------
>>> import numpy as np
>>> from scipy.special import betaln, beta
Verify that, for moderate values of ``a`` and ``b``, ``betaln(a, b)``
is the same as ``log(beta(a, b))``:
>>> betaln(3, 4)
-4.0943445622221
>>> np.log(beta(3, 4))
-4.0943445622221
In the following ``beta(a, b)`` underflows to 0, so we can't compute
the logarithm of the actual value.
>>> a = 400
>>> b = 900
>>> beta(a, b)
0.0
We can compute the logarithm of ``beta(a, b)`` by using `betaln`:
>>> betaln(a, b)
-804.3069951764146
""")
add_newdoc("boxcox",
"""
boxcox(x, lmbda, out=None)
Compute the Box-Cox transformation.
The Box-Cox transformation is::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Returns `nan` if ``x < 0``.
Returns `-inf` if ``x == 0`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox
>>> boxcox([1, 4, 10], 2.5)
array([ 0. , 12.4 , 126.09110641])
>>> boxcox(2, [0, 1, 2])
array([ 0.69314718, 1. , 1.5 ])
""")
add_newdoc("boxcox1p",
"""
boxcox1p(x, lmbda, out=None)
Compute the Box-Cox transformation of 1 + `x`.
The Box-Cox transformation computed by `boxcox1p` is::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Returns `nan` if ``x < -1``.
Returns `-inf` if ``x == -1`` and ``lmbda < 0``.
Parameters
----------
x : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
Transformed data.
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
>>> from scipy.special import boxcox1p
>>> boxcox1p(1e-4, [0, 0.5, 1])
array([ 9.99950003e-05, 9.99975001e-05, 1.00000000e-04])
>>> boxcox1p([0.01, 0.1], 0.25)
array([ 0.00996272, 0.09645476])
""")
add_newdoc("inv_boxcox",
"""
inv_boxcox(y, lmbda, out=None)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = (x**lmbda - 1) / lmbda if lmbda != 0
log(x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
out : ndarray, optional
Optional output array for the function values
Returns
-------
x : scalar or ndarray
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox, inv_boxcox
>>> y = boxcox([1, 4, 10], 2.5)
>>> inv_boxcox(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("inv_boxcox1p",
"""
inv_boxcox1p(y, lmbda, out=None)
Compute the inverse of the Box-Cox transformation.
Find ``x`` such that::
y = ((1+x)**lmbda - 1) / lmbda if lmbda != 0
log(1+x) if lmbda == 0
Parameters
----------
y : array_like
Data to be transformed.
lmbda : array_like
Power parameter of the Box-Cox transform.
out : ndarray, optional
Optional output array for the function values
Returns
-------
x : scalar or ndarray
Transformed data.
Notes
-----
.. versionadded:: 0.16.0
Examples
--------
>>> from scipy.special import boxcox1p, inv_boxcox1p
>>> y = boxcox1p([1, 4, 10], 2.5)
>>> inv_boxcox1p(y, 2.5)
array([1., 4., 10.])
""")
add_newdoc("btdtr",
r"""
btdtr(a, b, x, out=None)
Cumulative distribution function of the beta distribution.
Returns the integral from zero to `x` of the beta probability density
function,
.. math::
I = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
Shape parameter (a > 0).
b : array_like
Shape parameter (b > 0).
x : array_like
Upper limit of integration, in [0, 1].
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
Cumulative distribution function of the beta distribution with
parameters `a` and `b` at `x`.
See Also
--------
betainc
Notes
-----
This function is identical to the incomplete beta integral function
`betainc`.
Wrapper for the Cephes [1]_ routine `btdtr`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("btdtri",
r"""
btdtri(a, b, p, out=None)
The `p`-th quantile of the beta distribution.
This function is the inverse of the beta cumulative distribution function,
`btdtr`, returning the value of `x` for which `btdtr(a, b, x) = p`, or
.. math::
p = \int_0^x \frac{\Gamma(a + b)}{\Gamma(a)\Gamma(b)} t^{a-1} (1-t)^{b-1}\,dt
Parameters
----------
a : array_like
Shape parameter (`a` > 0).
b : array_like
Shape parameter (`b` > 0).
p : array_like
Cumulative probability, in [0, 1].
out : ndarray, optional
Optional output array for the function values
Returns
-------
x : scalar or ndarray
The quantile corresponding to `p`.
See Also
--------
betaincinv
btdtr
Notes
-----
The value of `x` is found by interval halving or Newton iterations.
Wrapper for the Cephes [1]_ routine `incbi`, which solves the equivalent
problem of finding the inverse of the incomplete beta integral.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("cbrt",
"""
cbrt(x, out=None)
Element-wise cube root of `x`.
Parameters
----------
x : array_like
`x` must contain real numbers.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The cube root of each value in `x`.
Examples
--------
>>> from scipy.special import cbrt
>>> cbrt(8)
2.0
>>> cbrt([-8, -3, 0.125, 1.331])
array([-2. , -1.44224957, 0.5 , 1.1 ])
""")
add_newdoc("chdtr",
r"""
chdtr(v, x, out=None)
Chi square cumulative distribution function.
Returns the area under the left tail (from 0 to `x`) of the Chi
square probability density function with `v` degrees of freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_0^x t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized lower
incomplete gamma function `gammainc` as
``gammainc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Upper bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the cumulative distribution function.
See Also
--------
chdtrc, chdtri, chdtriv, gammainc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It can be expressed in terms of the regularized lower incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtr(v, x)
array([0. , 0.68268949, 0.84270079, 0.91673548])
>>> sc.gammainc(v / 2, x / 2)
array([0. , 0.68268949, 0.84270079, 0.91673548])
""")
add_newdoc("chdtrc",
r"""
chdtrc(v, x, out=None)
Chi square survival function.
Returns the area under the right hand tail (from `x` to infinity)
of the Chi square probability density function with `v` degrees of
freedom:
.. math::
\frac{1}{2^{v/2} \Gamma(v/2)} \int_x^\infty t^{v/2 - 1} e^{-t/2} dt
Here :math:`\Gamma` is the Gamma function; see `gamma`. This
integral can be expressed in terms of the regularized upper
incomplete gamma function `gammaincc` as
``gammaincc(v / 2, x / 2)``. [1]_
Parameters
----------
v : array_like
Degrees of freedom.
x : array_like
Lower bound of the integral.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the survival function.
See Also
--------
chdtr, chdtri, chdtriv, gammaincc
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It can be expressed in terms of the regularized upper incomplete
gamma function.
>>> v = 1
>>> x = np.arange(4)
>>> sc.chdtrc(v, x)
array([1. , 0.31731051, 0.15729921, 0.08326452])
>>> sc.gammaincc(v / 2, x / 2)
array([1. , 0.31731051, 0.15729921, 0.08326452])
""")
add_newdoc("chdtri",
"""
chdtri(v, p, out=None)
Inverse to `chdtrc` with respect to `x`.
Returns `x` such that ``chdtrc(v, x) == p``.
Parameters
----------
v : array_like
Degrees of freedom.
p : array_like
Probability.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
x : scalar or ndarray
Value so that the probability a Chi square random variable
with `v` degrees of freedom is greater than `x` equals `p`.
See Also
--------
chdtrc, chdtr, chdtriv
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtrc`.
>>> v, p = 1, 0.3
>>> sc.chdtrc(v, sc.chdtri(v, p))
0.3
>>> x = 1
>>> sc.chdtri(v, sc.chdtrc(v, x))
1.0
""")
add_newdoc("chdtriv",
"""
chdtriv(p, x, out=None)
Inverse to `chdtr` with respect to `v`.
Returns `v` such that ``chdtr(v, x) == p``.
Parameters
----------
p : array_like
Probability that the Chi square random variable is less than
or equal to `x`.
x : array_like
Nonnegative input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Degrees of freedom.
See Also
--------
chdtr, chdtrc, chdtri
References
----------
.. [1] Chi-Square distribution,
https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
Examples
--------
>>> import scipy.special as sc
It inverts `chdtr`.
>>> p, x = 0.5, 1
>>> sc.chdtr(sc.chdtriv(p, x), x)
0.5000000000202172
>>> v = 1
>>> sc.chdtriv(sc.chdtr(v, x), v)
1.0000000000000013
""")
add_newdoc("chndtr",
r"""
chndtr(x, df, nc, out=None)
Non-central chi square cumulative distribution function
The cumulative distribution function is given by:
.. math::
P(\chi^{\prime 2} \vert \nu, \lambda) =\sum_{j=0}^{\infty}
e^{-\lambda /2}
\frac{(\lambda /2)^j}{j!} P(\chi^{\prime 2} \vert \nu + 2j),
where :math:`\nu > 0` is the degrees of freedom (``df``) and
:math:`\lambda \geq 0` is the non-centrality parameter (``nc``).
Parameters
----------
x : array_like
Upper bound of the integral; must satisfy ``x >= 0``
df : array_like
Degrees of freedom; must satisfy ``df > 0``
nc : array_like
Non-centrality parameter; must satisfy ``nc >= 0``
out : ndarray, optional
Optional output array for the function results
Returns
-------
x : scalar or ndarray
Value of the non-central chi square cumulative distribution function.
See Also
--------
chndtrix, chndtridf, chndtrinc
""")
add_newdoc("chndtrix",
"""
chndtrix(p, df, nc, out=None)
Inverse to `chndtr` vs `x`
Calculated using a search to find a value for `x` that produces the
desired value of `p`.
Parameters
----------
p : array_like
Probability; must satisfy ``0 <= p < 1``
df : array_like
Degrees of freedom; must satisfy ``df > 0``
nc : array_like
Non-centrality parameter; must satisfy ``nc >= 0``
out : ndarray, optional
Optional output array for the function results
Returns
-------
x : scalar or ndarray
Value so that the probability a non-central Chi square random variable
with `df` degrees of freedom and non-centrality, `nc`, is greater than
`x` equals `p`.
See Also
--------
chndtr, chndtridf, chndtrinc
""")
add_newdoc("chndtridf",
"""
chndtridf(x, p, nc, out=None)
Inverse to `chndtr` vs `df`
Calculated using a search to find a value for `df` that produces the
desired value of `p`.
Parameters
----------
x : array_like
Upper bound of the integral; must satisfy ``x >= 0``
p : array_like
Probability; must satisfy ``0 <= p < 1``
nc : array_like
Non-centrality parameter; must satisfy ``nc >= 0``
out : ndarray, optional
Optional output array for the function results
Returns
-------
df : scalar or ndarray
Degrees of freedom
See Also
--------
chndtr, chndtrix, chndtrinc
""")
add_newdoc("chndtrinc",
"""
chndtrinc(x, df, p, out=None)
Inverse to `chndtr` vs `nc`
Calculated using a search to find a value for `df` that produces the
desired value of `p`.
Parameters
----------
x : array_like
Upper bound of the integral; must satisfy ``x >= 0``
df : array_like
Degrees of freedom; must satisfy ``df > 0``
p : array_like
Probability; must satisfy ``0 <= p < 1``
out : ndarray, optional
Optional output array for the function results
Returns
-------
nc : scalar or ndarray
Non-centrality
See Also
--------
chndtr, chndtrix, chndtrinc
""")
add_newdoc("cosdg",
"""
cosdg(x, out=None)
Cosine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cosine of the input.
See Also
--------
sindg, tandg, cotdg
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than using cosine directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cosdg(x)
array([-0., 0., -0.])
>>> np.cos(x * np.pi / 180)
array([ 6.1232340e-17, -1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("cosm1",
"""
cosm1(x, out=None)
cos(x) - 1 for use when `x` is near zero.
Parameters
----------
x : array_like
Real valued argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``cos(x) - 1``.
See Also
--------
expm1, log1p
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than computing ``cos(x) - 1`` directly for
``x`` around 0.
>>> x = 1e-30
>>> np.cos(x) - 1
0.0
>>> sc.cosm1(x)
-5.0000000000000005e-61
""")
add_newdoc("cotdg",
"""
cotdg(x, out=None)
Cotangent of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Cotangent at the input.
See Also
--------
sindg, cosdg, tandg
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than using cotangent directly.
>>> x = 90 + 180 * np.arange(3)
>>> sc.cotdg(x)
array([0., 0., 0.])
>>> 1 / np.tan(x * np.pi / 180)
array([6.1232340e-17, 1.8369702e-16, 3.0616170e-16])
""")
add_newdoc("dawsn",
"""
dawsn(x, out=None)
Dawson's integral.
Computes::
exp(-x**2) * integral(exp(t**2), t=0..x).
Parameters
----------
x : array_like
Function parameter.
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
Value of the integral.
See Also
--------
wofz, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-15, 15, num=1000)
>>> plt.plot(x, special.dawsn(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$dawsn(x)$')
>>> plt.show()
""")
add_newdoc("ellipe",
r"""
ellipe(m, out=None)
Complete elliptic integral of the second kind
This function is defined as
.. math:: E(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
m : array_like
Defines the parameter of the elliptic integral.
out : ndarray, optional
Optional output array for the function values
Returns
-------
E : scalar or ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpe`.
For `m > 0` the computation uses the approximation,
.. math:: E(m) \approx P(1-m) - (1-m) \log(1-m) Q(1-m),
where :math:`P` and :math:`Q` are tenth-order polynomials. For
`m < 0`, the relation
.. math:: E(m) = E(m/(m - 1)) \sqrt(1-m)
is used.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre E integral is related to Carlson's symmetric R_D or R_G
functions in multiple ways [3]_. For example,
.. math:: E(m) = 2 R_G(0, 1-k^2, 1) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
Examples
--------
This function is used in finding the circumference of an
ellipse with semi-major axis `a` and semi-minor axis `b`.
>>> import numpy as np
>>> from scipy import special
>>> a = 3.5
>>> b = 2.1
>>> e_sq = 1.0 - b**2/a**2 # eccentricity squared
Then the circumference is found using the following:
>>> C = 4*a*special.ellipe(e_sq) # circumference formula
>>> C
17.868899204378693
When `a` and `b` are the same (meaning eccentricity is 0),
this reduces to the circumference of a circle.
>>> 4*a*special.ellipe(0.0) # formula for ellipse with a = b
21.991148575128552
>>> 2*np.pi*a # formula for circle of radius a
21.991148575128552
""")
add_newdoc("ellipeinc",
r"""
ellipeinc(phi, m, out=None)
Incomplete elliptic integral of the second kind
This function is defined as
.. math:: E(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{1/2} dt
Parameters
----------
phi : array_like
amplitude of the elliptic integral.
m : array_like
parameter of the elliptic integral.
out : ndarray, optional
Optional output array for the function values
Returns
-------
E : scalar or ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellie`.
Computation uses arithmetic-geometric means algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre E incomplete integral can be related to combinations
of Carlson's symmetric integrals R_D, R_F, and R_G in multiple
ways [3]_. For example, with :math:`c = \csc^2\phi`,
.. math::
E(\phi, m) = R_F(c-1, c-k^2, c)
- \frac{1}{3} k^2 R_D(c-1, c-k^2, c) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc("ellipj",
"""
ellipj(u, m, out=None)
Jacobian elliptic functions
Calculates the Jacobian elliptic functions of parameter `m` between
0 and 1, and real argument `u`.
Parameters
----------
m : array_like
Parameter.
u : array_like
Argument.
out : tuple of ndarray, optional
Optional output arrays for the function values
Returns
-------
sn, cn, dn, ph : 4-tuple of scalar or ndarray
The returned functions::
sn(u|m), cn(u|m), dn(u|m)
The value `ph` is such that if `u = ellipkinc(ph, m)`,
then `sn(u|m) = sin(ph)` and `cn(u|m) = cos(ph)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpj`.
These functions are periodic, with quarter-period on the real axis
equal to the complete elliptic integral `ellipk(m)`.
Relation to incomplete elliptic integral: If `u = ellipkinc(phi,m)`, then
`sn(u|m) = sin(phi)`, and `cn(u|m) = cos(phi)`. The `phi` is called
the amplitude of `u`.
Computation is by means of the arithmetic-geometric mean algorithm,
except when `m` is within 1e-9 of 0 or 1. In the latter case with `m`
close to 1, the approximation applies only for `phi < pi/2`.
See also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipkm1",
"""
ellipkm1(p, out=None)
Complete elliptic integral of the first kind around `m` = 1
This function is defined as
.. math:: K(p) = \\int_0^{\\pi/2} [1 - m \\sin(t)^2]^{-1/2} dt
where `m = 1 - p`.
Parameters
----------
p : array_like
Defines the parameter of the elliptic integral as `m = 1 - p`.
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the elliptic integral.
Notes
-----
Wrapper for the Cephes [1]_ routine `ellpk`.
For `p <= 1`, computation uses the approximation,
.. math:: K(p) \\approx P(p) - \\log(p) Q(p),
where :math:`P` and :math:`Q` are tenth-order polynomials. The
argument `p` is used internally rather than `m` so that the logarithmic
singularity at `m = 1` will be shifted to the origin; this preserves
maximum accuracy. For `p > 1`, the identity
.. math:: K(p) = K(1/p)/\\sqrt(p)
is used.
See Also
--------
ellipk : Complete elliptic integral of the first kind
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
""")
add_newdoc("ellipk",
r"""
ellipk(m, out=None)
Complete elliptic integral of the first kind.
This function is defined as
.. math:: K(m) = \int_0^{\pi/2} [1 - m \sin(t)^2]^{-1/2} dt
Parameters
----------
m : array_like
The parameter of the elliptic integral.
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the elliptic integral.
Notes
-----
For more precision around point m = 1, use `ellipkm1`, which this
function calls.
The parameterization in terms of :math:`m` follows that of section
17.2 in [1]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre K integral is related to Carlson's symmetric R_F
function by [2]_:
.. math:: K(m) = R_F(0, 1-k^2, 1) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind around m = 1
ellipkinc : Incomplete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [2] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc("ellipkinc",
r"""
ellipkinc(phi, m, out=None)
Incomplete elliptic integral of the first kind
This function is defined as
.. math:: K(\phi, m) = \int_0^{\phi} [1 - m \sin(t)^2]^{-1/2} dt
This function is also called :math:`F(\phi, m)`.
Parameters
----------
phi : array_like
amplitude of the elliptic integral
m : array_like
parameter of the elliptic integral
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the elliptic integral
Notes
-----
Wrapper for the Cephes [1]_ routine `ellik`. The computation is
carried out using the arithmetic-geometric mean algorithm.
The parameterization in terms of :math:`m` follows that of section
17.2 in [2]_. Other parameterizations in terms of the
complementary parameter :math:`1 - m`, modular angle
:math:`\sin^2(\alpha) = m`, or modulus :math:`k^2 = m` are also
used, so be careful that you choose the correct parameter.
The Legendre K incomplete integral (or F integral) is related to
Carlson's symmetric R_F function [3]_.
Setting :math:`c = \csc^2\phi`,
.. math:: F(\phi, m) = R_F(c-1, c-k^2, c) .
See Also
--------
ellipkm1 : Complete elliptic integral of the first kind, near `m` = 1
ellipk : Complete elliptic integral of the first kind
ellipe : Complete elliptic integral of the second kind
ellipeinc : Incomplete elliptic integral of the second kind
elliprf : Completely-symmetric elliptic integral of the first kind.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
.. [3] NIST Digital Library of Mathematical
Functions. http://dlmf.nist.gov/, Release 1.0.28 of
2020-09-15. See Sec. 19.25(i) https://dlmf.nist.gov/19.25#i
""")
add_newdoc(
"elliprc",
r"""
elliprc(x, y, out=None)
Degenerate symmetric elliptic integral.
The function RC is defined as [1]_
.. math::
R_{\mathrm{C}}(x, y) =
\frac{1}{2} \int_0^{+\infty} (t + x)^{-1/2} (t + y)^{-1} dt
= R_{\mathrm{F}}(x, y, y)
Parameters
----------
x, y : array_like
Real or complex input parameters. `x` can be any number in the
complex plane cut along the negative real axis. `y` must be non-zero.
out : ndarray, optional
Optional output array for the function values
Returns
-------
R : scalar or ndarray
Value of the integral. If `y` is real and negative, the Cauchy
principal value is returned. If both of `x` and `y` are real, the
return value is real. Otherwise, the return value is complex.
Notes
-----
RC is a degenerate case of the symmetric integral RF: ``elliprc(x, y) ==
elliprf(x, y, y)``. It is an elementary function rather than an elliptic
integral.
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E6
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
Examples
--------
Basic homogeneity property:
>>> import numpy as np
>>> from scipy.special import elliprc
>>> x = 1.2 + 3.4j
>>> y = 5.
>>> scale = 0.3 + 0.4j
>>> elliprc(scale*x, scale*y)
(0.5484493976710874-0.4169557678995833j)
>>> elliprc(x, y)/np.sqrt(scale)
(0.5484493976710874-0.41695576789958333j)
When the two arguments coincide, the integral is particularly
simple:
>>> x = 1.2 + 3.4j
>>> elliprc(x, x)
(0.4299173120614631-0.3041729818745595j)
>>> 1/np.sqrt(x)
(0.4299173120614631-0.30417298187455954j)
Another simple case: the first argument vanishes:
>>> y = 1.2 + 3.4j
>>> elliprc(0, y)
(0.6753125346116815-0.47779380263880866j)
>>> np.pi/2/np.sqrt(y)
(0.6753125346116815-0.4777938026388088j)
When `x` and `y` are both positive, we can express
:math:`R_C(x,y)` in terms of more elementary functions. For the
case :math:`0 \le x < y`,
>>> x = 3.2
>>> y = 6.
>>> elliprc(x, y)
0.44942991498453444
>>> np.arctan(np.sqrt((y-x)/x))/np.sqrt(y-x)
0.44942991498453433
And for the case :math:`0 \le y < x`,
>>> x = 6.
>>> y = 3.2
>>> elliprc(x,y)
0.4989837501576147
>>> np.log((np.sqrt(x)+np.sqrt(x-y))/np.sqrt(y))/np.sqrt(x-y)
0.49898375015761476
""")
add_newdoc(
"elliprd",
r"""
elliprd(x, y, z, out=None)
Symmetric elliptic integral of the second kind.
The function RD is defined as [1]_
.. math::
R_{\mathrm{D}}(x, y, z) =
\frac{3}{2} \int_0^{+\infty} [(t + x) (t + y)]^{-1/2} (t + z)^{-3/2}
dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x` or `y` can be any number in the
complex plane cut along the negative real axis, but at most one of them
can be zero, while `z` must be non-zero.
out : ndarray, optional
Optional output array for the function values
Returns
-------
R : scalar or ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the
return value is real. Otherwise, the return value is complex.
Notes
-----
RD is a degenerate case of the elliptic integral RJ: ``elliprd(x, y, z) ==
elliprj(x, y, z, z)``.
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric elliptic integral.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E5
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
Examples
--------
Basic homogeneity property:
>>> import numpy as np
>>> from scipy.special import elliprd
>>> x = 1.2 + 3.4j
>>> y = 5.
>>> z = 6.
>>> scale = 0.3 + 0.4j
>>> elliprd(scale*x, scale*y, scale*z)
(-0.03703043835680379-0.24500934665683802j)
>>> elliprd(x, y, z)*np.power(scale, -1.5)
(-0.0370304383568038-0.24500934665683805j)
All three arguments coincide:
>>> x = 1.2 + 3.4j
>>> elliprd(x, x, x)
(-0.03986825876151896-0.14051741840449586j)
>>> np.power(x, -1.5)
(-0.03986825876151894-0.14051741840449583j)
The so-called "second lemniscate constant":
>>> elliprd(0, 2, 1)/3
0.5990701173677961
>>> from scipy.special import gamma
>>> gamma(0.75)**2/np.sqrt(2*np.pi)
0.5990701173677959
""")
add_newdoc(
"elliprf",
r"""
elliprf(x, y, z, out=None)
Completely-symmetric elliptic integral of the first kind.
The function RF is defined as [1]_
.. math::
R_{\mathrm{F}}(x, y, z) =
\frac{1}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2} dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x`, `y`, or `z` can be any number in
the complex plane cut along the negative real axis, but at most one of
them can be zero.
out : ndarray, optional
Optional output array for the function values
Returns
-------
R : scalar or ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the return
value is real. Otherwise, the return value is complex.
Notes
-----
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order (cf.:
https://dlmf.nist.gov/19.36.i) and the AGM algorithm for the complete
integral. [2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E1
.. [2] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
Examples
--------
Basic homogeneity property:
>>> import numpy as np
>>> from scipy.special import elliprf
>>> x = 1.2 + 3.4j
>>> y = 5.
>>> z = 6.
>>> scale = 0.3 + 0.4j
>>> elliprf(scale*x, scale*y, scale*z)
(0.5328051227278146-0.4008623567957094j)
>>> elliprf(x, y, z)/np.sqrt(scale)
(0.5328051227278147-0.4008623567957095j)
All three arguments coincide:
>>> x = 1.2 + 3.4j
>>> elliprf(x, x, x)
(0.42991731206146316-0.30417298187455954j)
>>> 1/np.sqrt(x)
(0.4299173120614631-0.30417298187455954j)
The so-called "first lemniscate constant":
>>> elliprf(0, 1, 2)
1.3110287771460598
>>> from scipy.special import gamma
>>> gamma(0.25)**2/(4*np.sqrt(2*np.pi))
1.3110287771460598
""")
add_newdoc(
"elliprg",
r"""
elliprg(x, y, z, out=None)
Completely-symmetric elliptic integral of the second kind.
The function RG is defined as [1]_
.. math::
R_{\mathrm{G}}(x, y, z) =
\frac{1}{4} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
\left(\frac{x}{t + x} + \frac{y}{t + y} + \frac{z}{t + z}\right) t
dt
Parameters
----------
x, y, z : array_like
Real or complex input parameters. `x`, `y`, or `z` can be any number in
the complex plane cut along the negative real axis.
out : ndarray, optional
Optional output array for the function values
Returns
-------
R : scalar or ndarray
Value of the integral. If all of `x`, `y`, and `z` are real, the return
value is real. Otherwise, the return value is complex.
Notes
-----
The implementation uses the relation [1]_
.. math::
2 R_{\mathrm{G}}(x, y, z) =
z R_{\mathrm{F}}(x, y, z) -
\frac{1}{3} (x - z) (y - z) R_{\mathrm{D}}(x, y, z) +
\sqrt{\frac{x y}{z}}
and the symmetry of `x`, `y`, `z` when at least one non-zero parameter can
be chosen as the pivot. When one of the arguments is close to zero, the AGM
method is applied instead. Other special cases are computed following Ref.
[2]_
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprj : Symmetric elliptic integral of the third kind.
References
----------
.. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
.. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.16.E1
https://dlmf.nist.gov/19.20.ii
Examples
--------
Basic homogeneity property:
>>> import numpy as np
>>> from scipy.special import elliprg
>>> x = 1.2 + 3.4j
>>> y = 5.
>>> z = 6.
>>> scale = 0.3 + 0.4j
>>> elliprg(scale*x, scale*y, scale*z)
(1.195936862005246+0.8470988320464167j)
>>> elliprg(x, y, z)*np.sqrt(scale)
(1.195936862005246+0.8470988320464165j)
Simplifications:
>>> elliprg(0, y, y)
1.756203682760182
>>> 0.25*np.pi*np.sqrt(y)
1.7562036827601817
>>> elliprg(0, 0, z)
1.224744871391589
>>> 0.5*np.sqrt(z)
1.224744871391589
The surface area of a triaxial ellipsoid with semiaxes ``a``, ``b``, and
``c`` is given by
.. math::
S = 4 \pi a b c R_{\mathrm{G}}(1 / a^2, 1 / b^2, 1 / c^2).
>>> def ellipsoid_area(a, b, c):
... r = 4.0 * np.pi * a * b * c
... return r * elliprg(1.0 / (a * a), 1.0 / (b * b), 1.0 / (c * c))
>>> print(ellipsoid_area(1, 3, 5))
108.62688289491807
""")
add_newdoc(
"elliprj",
r"""
elliprj(x, y, z, p, out=None)
Symmetric elliptic integral of the third kind.
The function RJ is defined as [1]_
.. math::
R_{\mathrm{J}}(x, y, z, p) =
\frac{3}{2} \int_0^{+\infty} [(t + x) (t + y) (t + z)]^{-1/2}
(t + p)^{-1} dt
.. warning::
This function should be considered experimental when the inputs are
unbalanced. Check correctness with another independent implementation.
Parameters
----------
x, y, z, p : array_like
Real or complex input parameters. `x`, `y`, or `z` are numbers in
the complex plane cut along the negative real axis (subject to further
constraints, see Notes), and at most one of them can be zero. `p` must
be non-zero.
out : ndarray, optional
Optional output array for the function values
Returns
-------
R : scalar or ndarray
Value of the integral. If all of `x`, `y`, `z`, and `p` are real, the
return value is real. Otherwise, the return value is complex.
If `p` is real and negative, while `x`, `y`, and `z` are real,
non-negative, and at most one of them is zero, the Cauchy principal
value is returned. [1]_ [2]_
Notes
-----
The code implements Carlson's algorithm based on the duplication theorems
and series expansion up to the 7th order. [3]_ The algorithm is slightly
different from its earlier incarnation as it appears in [1]_, in that the
call to `elliprc` (or ``atan``/``atanh``, see [4]_) is no longer needed in
the inner loop. Asymptotic approximations are used where arguments differ
widely in the order of magnitude. [5]_
The input values are subject to certain sufficient but not necessary
constaints when input arguments are complex. Notably, ``x``, ``y``, and
``z`` must have non-negative real parts, unless two of them are
non-negative and complex-conjugates to each other while the other is a real
non-negative number. [1]_ If the inputs do not satisfy the sufficient
condition described in Ref. [1]_ they are rejected outright with the output
set to NaN.
In the case where one of ``x``, ``y``, and ``z`` is equal to ``p``, the
function ``elliprd`` should be preferred because of its less restrictive
domain.
.. versionadded:: 1.8.0
See Also
--------
elliprc : Degenerate symmetric integral.
elliprd : Symmetric elliptic integral of the second kind.
elliprf : Completely-symmetric elliptic integral of the first kind.
elliprg : Completely-symmetric elliptic integral of the second kind.
References
----------
.. [1] B. C. Carlson, "Numerical computation of real or complex elliptic
integrals," Numer. Algorithm, vol. 10, no. 1, pp. 13-26, 1995.
https://arxiv.org/abs/math/9409227
https://doi.org/10.1007/BF02198293
.. [2] B. C. Carlson, ed., Chapter 19 in "Digital Library of Mathematical
Functions," NIST, US Dept. of Commerce.
https://dlmf.nist.gov/19.20.iii
.. [3] B. C. Carlson, J. FitzSimmons, "Reduction Theorems for Elliptic
Integrands with the Square Root of Two Quadratic Factors," J.
Comput. Appl. Math., vol. 118, nos. 1-2, pp. 71-85, 2000.
https://doi.org/10.1016/S0377-0427(00)00282-X
.. [4] F. Johansson, "Numerical Evaluation of Elliptic Functions, Elliptic
Integrals and Modular Forms," in J. Blumlein, C. Schneider, P.
Paule, eds., "Elliptic Integrals, Elliptic Functions and Modular
Forms in Quantum Field Theory," pp. 269-293, 2019 (Cham,
Switzerland: Springer Nature Switzerland)
https://arxiv.org/abs/1806.06725
https://doi.org/10.1007/978-3-030-04480-0
.. [5] B. C. Carlson, J. L. Gustafson, "Asymptotic Approximations for
Symmetric Elliptic Integrals," SIAM J. Math. Anls., vol. 25, no. 2,
pp. 288-303, 1994.
https://arxiv.org/abs/math/9310223
https://doi.org/10.1137/S0036141092228477
Examples
--------
Basic homogeneity property:
>>> import numpy as np
>>> from scipy.special import elliprj
>>> x = 1.2 + 3.4j
>>> y = 5.
>>> z = 6.
>>> p = 7.
>>> scale = 0.3 - 0.4j
>>> elliprj(scale*x, scale*y, scale*z, scale*p)
(0.10834905565679157+0.19694950747103812j)
>>> elliprj(x, y, z, p)*np.power(scale, -1.5)
(0.10834905565679556+0.19694950747103854j)
Reduction to simpler elliptic integral:
>>> elliprj(x, y, z, z)
(0.08288462362195129-0.028376809745123258j)
>>> from scipy.special import elliprd
>>> elliprd(x, y, z)
(0.08288462362195136-0.028376809745123296j)
All arguments coincide:
>>> elliprj(x, x, x, x)
(-0.03986825876151896-0.14051741840449586j)
>>> np.power(x, -1.5)
(-0.03986825876151894-0.14051741840449583j)
""")
add_newdoc("entr",
r"""
entr(x, out=None)
Elementwise function for computing entropy.
.. math:: \text{entr}(x) = \begin{cases} - x \log(x) & x > 0 \\ 0 & x = 0 \\ -\infty & \text{otherwise} \end{cases}
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
Optional output array for the function values
Returns
-------
res : scalar or ndarray
The value of the elementwise entropy function at the given points `x`.
See Also
--------
kl_div, rel_entr, scipy.stats.entropy
Notes
-----
.. versionadded:: 0.15.0
This function is concave.
The origin of this function is in convex programming; see [1]_.
Given a probability distribution :math:`p_1, \ldots, p_n`,
the definition of entropy in the context of *information theory* is
.. math::
\sum_{i = 1}^n \mathrm{entr}(p_i).
To compute the latter quantity, use `scipy.stats.entropy`.
References
----------
.. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
Cambridge University Press, 2004.
:doi:`https://doi.org/10.1017/CBO9780511804441`
""")
add_newdoc("erf",
"""
erf(z, out=None)
Returns the error function of complex argument.
It is defined as ``2/sqrt(pi)*integral(exp(-t**2), t=0..z)``.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
Optional output array for the function values
Returns
-------
res : scalar or ndarray
The values of the error function at the given points `x`.
See Also
--------
erfc, erfinv, erfcinv, wofz, erfcx, erfi
Notes
-----
The cumulative of the unit normal distribution is given by
``Phi(z) = 1/2[1 + erf(z/sqrt(2))]``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Error_function
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover,
1972. http://www.math.sfu.ca/~cbm/aands/page_297.htm
.. [3] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erf(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erf(x)$')
>>> plt.show()
""")
add_newdoc("erfc",
"""
erfc(x, out=None)
Complementary error function, ``1 - erf(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the complementary error function
See Also
--------
erf, erfi, erfcx, dawsn, wofz
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfc(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfc(x)$')
>>> plt.show()
""")
add_newdoc("erfi",
"""
erfi(z, out=None)
Imaginary error function, ``-i erf(i z)``.
Parameters
----------
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the imaginary error function
See Also
--------
erf, erfc, erfcx, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfi(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfi(x)$')
>>> plt.show()
""")
add_newdoc("erfcx",
"""
erfcx(x, out=None)
Scaled complementary error function, ``exp(x**2) * erfc(x)``.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the scaled complementary error function
See Also
--------
erf, erfc, erfi, dawsn, wofz
Notes
-----
.. versionadded:: 0.12.0
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> plt.plot(x, special.erfcx(x))
>>> plt.xlabel('$x$')
>>> plt.ylabel('$erfcx(x)$')
>>> plt.show()
""")
add_newdoc(
"erfinv",
"""
erfinv(y, out=None)
Inverse of the error function.
Computes the inverse of the error function.
In the complex domain, there is no unique complex number w satisfying
erf(w)=z. This indicates a true inverse function would be multivalued.
When the domain restricts to the real, -1 < x < 1, there is a unique real
number satisfying erf(erfinv(x)) = x.
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [-1, 1]
out : ndarray, optional
Optional output array for the function values
Returns
-------
erfinv : scalar or ndarray
The inverse of erf of y, element-wise
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfcinv : Inverse of the complementary error function
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import erfinv, erf
>>> erfinv(0.5)
0.4769362762044699
>>> y = np.linspace(-1.0, 1.0, num=9)
>>> x = erfinv(y)
>>> x
array([ -inf, -0.81341985, -0.47693628, -0.22531206, 0. ,
0.22531206, 0.47693628, 0.81341985, inf])
Verify that ``erf(erfinv(y))`` is ``y``.
>>> erf(x)
array([-1. , -0.75, -0.5 , -0.25, 0. , 0.25, 0.5 , 0.75, 1. ])
Plot the function:
>>> y = np.linspace(-1, 1, 200)
>>> fig, ax = plt.subplots()
>>> ax.plot(y, erfinv(y))
>>> ax.grid(True)
>>> ax.set_xlabel('y')
>>> ax.set_title('erfinv(y)')
>>> plt.show()
""")
add_newdoc(
"erfcinv",
"""
erfcinv(y, out=None)
Inverse of the complementary error function.
Computes the inverse of the complementary error function.
In the complex domain, there is no unique complex number w satisfying
erfc(w)=z. This indicates a true inverse function would be multivalued.
When the domain restricts to the real, 0 < x < 2, there is a unique real
number satisfying erfc(erfcinv(x)) = erfcinv(erfc(x)).
It is related to inverse of the error function by erfcinv(1-x) = erfinv(x)
Parameters
----------
y : ndarray
Argument at which to evaluate. Domain: [0, 2]
out : ndarray, optional
Optional output array for the function values
Returns
-------
erfcinv : scalar or ndarray
The inverse of erfc of y, element-wise
See Also
--------
erf : Error function of a complex argument
erfc : Complementary error function, ``1 - erf(x)``
erfinv : Inverse of the error function
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import erfcinv
>>> erfcinv(0.5)
0.4769362762044699
>>> y = np.linspace(0.0, 2.0, num=11)
>>> erfcinv(y)
array([ inf, 0.9061938 , 0.59511608, 0.37080716, 0.17914345,
-0. , -0.17914345, -0.37080716, -0.59511608, -0.9061938 ,
-inf])
Plot the function:
>>> y = np.linspace(0, 2, 200)
>>> fig, ax = plt.subplots()
>>> ax.plot(y, erfcinv(y))
>>> ax.grid(True)
>>> ax.set_xlabel('y')
>>> ax.set_title('erfcinv(y)')
>>> plt.show()
""")
add_newdoc("eval_jacobi",
r"""
eval_jacobi(n, alpha, beta, x, out=None)
Evaluate Jacobi polynomial at a point.
The Jacobi polynomials can be defined via the Gauss hypergeometric
function :math:`{}_2F_1` as
.. math::
P_n^{(\alpha, \beta)}(x) = \frac{(\alpha + 1)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 1 + \alpha + \beta + n; \alpha + 1; (1 - z)/2)
where :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.42 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
beta : array_like
Parameter
x : array_like
Points at which to evaluate the polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
P : scalar or ndarray
Values of the Jacobi polynomial
See Also
--------
roots_jacobi : roots and quadrature weights of Jacobi polynomials
jacobi : Jacobi polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_jacobi",
r"""
eval_sh_jacobi(n, p, q, x, out=None)
Evaluate shifted Jacobi polynomial at a point.
Defined by
.. math::
G_n^{(p, q)}(x)
= \binom{2n + p - 1}{n}^{-1} P_n^{(p - q, q - 1)}(2x - 1),
where :math:`P_n^{(\cdot, \cdot)}` is the n-th Jacobi
polynomial. See 22.5.2 in [AS]_ for details.
Parameters
----------
n : int
Degree of the polynomial. If not an integer, the result is
determined via the relation to `binom` and `eval_jacobi`.
p : float
Parameter
q : float
Parameter
out : ndarray, optional
Optional output array for the function values
Returns
-------
G : scalar or ndarray
Values of the shifted Jacobi polynomial.
See Also
--------
roots_sh_jacobi : roots and quadrature weights of shifted Jacobi
polynomials
sh_jacobi : shifted Jacobi polynomial object
eval_jacobi : evaluate Jacobi polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_gegenbauer",
r"""
eval_gegenbauer(n, alpha, x, out=None)
Evaluate Gegenbauer polynomial at a point.
The Gegenbauer polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
C_n^{(\alpha)} = \frac{(2\alpha)_n}{\Gamma(n + 1)}
{}_2F_1(-n, 2\alpha + n; \alpha + 1/2; (1 - z)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.46 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
alpha : array_like
Parameter
x : array_like
Points at which to evaluate the Gegenbauer polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
C : scalar or ndarray
Values of the Gegenbauer polynomial
See Also
--------
roots_gegenbauer : roots and quadrature weights of Gegenbauer
polynomials
gegenbauer : Gegenbauer polynomial object
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyt",
r"""
eval_chebyt(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind at a point.
The Chebyshev polynomials of the first kind can be defined via the
Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
T_n(x) = {}_2F_1(n, -n; 1/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.47 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
T : scalar or ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyt : roots and quadrature weights of Chebyshev
polynomials of the first kind
chebyu : Chebychev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
Notes
-----
This routine is numerically stable for `x` in ``[-1, 1]`` at least
up to order ``10000``.
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebyu",
r"""
eval_chebyu(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind at a point.
The Chebyshev polynomials of the second kind can be defined via
the Gauss hypergeometric function :math:`{}_2F_1` as
.. math::
U_n(x) = (n + 1) {}_2F_1(-n, n + 2; 3/2; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.48 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
U : scalar or ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyu : roots and quadrature weights of Chebyshev
polynomials of the second kind
chebyu : Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
hyp2f1 : Gauss hypergeometric function
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_chebys",
r"""
eval_chebys(n, x, out=None)
Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
S_n(x) = U_n(x/2)
where :math:`U_n` is a Chebyshev polynomial of the second
kind. See 22.5.13 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
S : scalar or ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebys : roots and quadrature weights of Chebyshev
polynomials of the second kind on [-2, 2]
chebys : Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
second kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebys(3, x)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
>>> sc.eval_chebyu(3, x / 2)
array([-4. , 0.672, 0.736, -0.736, -0.672, 4. ])
""")
add_newdoc("eval_chebyc",
r"""
eval_chebyc(n, x, out=None)
Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a
point.
These polynomials are defined as
.. math::
C_n(x) = 2 T_n(x/2)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.11 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
C : scalar or ndarray
Values of the Chebyshev polynomial
See Also
--------
roots_chebyc : roots and quadrature weights of Chebyshev
polynomials of the first kind on [-2, 2]
chebyc : Chebyshev polynomial object
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
eval_chebyt : evaluate Chebycshev polynomials of the first kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
They are a scaled version of the Chebyshev polynomials of the
first kind.
>>> x = np.linspace(-2, 2, 6)
>>> sc.eval_chebyc(3, x)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
>>> 2 * sc.eval_chebyt(3, x / 2)
array([-2. , 1.872, 1.136, -1.136, -1.872, 2. ])
""")
add_newdoc("eval_sh_chebyt",
r"""
eval_sh_chebyt(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the first kind at a
point.
These polynomials are defined as
.. math::
T_n^*(x) = T_n(2x - 1)
where :math:`T_n` is a Chebyshev polynomial of the first kind. See
22.5.14 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyt`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
T : scalar or ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyt : roots and quadrature weights of shifted
Chebyshev polynomials of the first kind
sh_chebyt : shifted Chebyshev polynomial object
eval_chebyt : evaluate Chebyshev polynomials of the first kind
numpy.polynomial.chebyshev.Chebyshev : Chebyshev series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_sh_chebyu",
r"""
eval_sh_chebyu(n, x, out=None)
Evaluate shifted Chebyshev polynomial of the second kind at a
point.
These polynomials are defined as
.. math::
U_n^*(x) = U_n(2x - 1)
where :math:`U_n` is a Chebyshev polynomial of the first kind. See
22.5.15 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to `eval_chebyu`.
x : array_like
Points at which to evaluate the shifted Chebyshev polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
U : scalar or ndarray
Values of the shifted Chebyshev polynomial
See Also
--------
roots_sh_chebyu : roots and quadrature weights of shifted
Chebychev polynomials of the second kind
sh_chebyu : shifted Chebyshev polynomial object
eval_chebyu : evaluate Chebyshev polynomials of the second kind
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_legendre",
r"""
eval_legendre(n, x, out=None)
Evaluate Legendre polynomial at a point.
The Legendre polynomials can be defined via the Gauss
hypergeometric function :math:`{}_2F_1` as
.. math::
P_n(x) = {}_2F_1(-n, n + 1; 1; (1 - x)/2).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.49 in [AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the Gauss hypergeometric
function.
x : array_like
Points at which to evaluate the Legendre polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
P : scalar or ndarray
Values of the Legendre polynomial
See Also
--------
roots_legendre : roots and quadrature weights of Legendre
polynomials
legendre : Legendre polynomial object
hyp2f1 : Gauss hypergeometric function
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import numpy as np
>>> from scipy.special import eval_legendre
Evaluate the zero-order Legendre polynomial at x = 0
>>> eval_legendre(0, 0)
1.0
Evaluate the first-order Legendre polynomial between -1 and 1
>>> X = np.linspace(-1, 1, 5) # Domain of Legendre polynomials
>>> eval_legendre(1, X)
array([-1. , -0.5, 0. , 0.5, 1. ])
Evaluate Legendre polynomials of order 0 through 4 at x = 0
>>> N = range(0, 5)
>>> eval_legendre(N, 0)
array([ 1. , 0. , -0.5 , 0. , 0.375])
Plot Legendre polynomials of order 0 through 4
>>> X = np.linspace(-1, 1)
>>> import matplotlib.pyplot as plt
>>> for n in range(0, 5):
... y = eval_legendre(n, X)
... plt.plot(X, y, label=r'$P_{}(x)$'.format(n))
>>> plt.title("Legendre Polynomials")
>>> plt.xlabel("x")
>>> plt.ylabel(r'$P_n(x)$')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("eval_sh_legendre",
r"""
eval_sh_legendre(n, x, out=None)
Evaluate shifted Legendre polynomial at a point.
These polynomials are defined as
.. math::
P_n^*(x) = P_n(2x - 1)
where :math:`P_n` is a Legendre polynomial. See 2.2.11 in [AS]_
for details.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the value is
determined via the relation to `eval_legendre`.
x : array_like
Points at which to evaluate the shifted Legendre polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
P : scalar or ndarray
Values of the shifted Legendre polynomial
See Also
--------
roots_sh_legendre : roots and quadrature weights of shifted
Legendre polynomials
sh_legendre : shifted Legendre polynomial object
eval_legendre : evaluate Legendre polynomials
numpy.polynomial.legendre.Legendre : Legendre series
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_genlaguerre",
r"""
eval_genlaguerre(n, alpha, x, out=None)
Evaluate generalized Laguerre polynomial at a point.
The generalized Laguerre polynomials can be defined via the
confluent hypergeometric function :math:`{}_1F_1` as
.. math::
L_n^{(\alpha)}(x) = \binom{n + \alpha}{n}
{}_1F_1(-n, \alpha + 1, x).
When :math:`n` is an integer the result is a polynomial of degree
:math:`n`. See 22.5.54 in [AS]_ for details. The Laguerre
polynomials are the special case where :math:`\alpha = 0`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer, the result is
determined via the relation to the confluent hypergeometric
function.
alpha : array_like
Parameter; must have ``alpha > -1``
x : array_like
Points at which to evaluate the generalized Laguerre
polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
L : scalar or ndarray
Values of the generalized Laguerre polynomial
See Also
--------
roots_genlaguerre : roots and quadrature weights of generalized
Laguerre polynomials
genlaguerre : generalized Laguerre polynomial object
hyp1f1 : confluent hypergeometric function
eval_laguerre : evaluate Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_laguerre",
r"""
eval_laguerre(n, x, out=None)
Evaluate Laguerre polynomial at a point.
The Laguerre polynomials can be defined via the confluent
hypergeometric function :math:`{}_1F_1` as
.. math::
L_n(x) = {}_1F_1(-n, 1, x).
See 22.5.16 and 22.5.54 in [AS]_ for details. When :math:`n` is an
integer the result is a polynomial of degree :math:`n`.
Parameters
----------
n : array_like
Degree of the polynomial. If not an integer the result is
determined via the relation to the confluent hypergeometric
function.
x : array_like
Points at which to evaluate the Laguerre polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
L : scalar or ndarray
Values of the Laguerre polynomial
See Also
--------
roots_laguerre : roots and quadrature weights of Laguerre
polynomials
laguerre : Laguerre polynomial object
numpy.polynomial.laguerre.Laguerre : Laguerre series
eval_genlaguerre : evaluate generalized Laguerre polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermite",
r"""
eval_hermite(n, x, out=None)
Evaluate physicist's Hermite polynomial at a point.
Defined by
.. math::
H_n(x) = (-1)^n e^{x^2} \frac{d^n}{dx^n} e^{-x^2};
:math:`H_n` is a polynomial of degree :math:`n`. See 22.11.7 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
H : scalar or ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermite : roots and quadrature weights of physicist's
Hermite polynomials
hermite : physicist's Hermite polynomial object
numpy.polynomial.hermite.Hermite : Physicist's Hermite series
eval_hermitenorm : evaluate Probabilist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("eval_hermitenorm",
r"""
eval_hermitenorm(n, x, out=None)
Evaluate probabilist's (normalized) Hermite polynomial at a
point.
Defined by
.. math::
He_n(x) = (-1)^n e^{x^2/2} \frac{d^n}{dx^n} e^{-x^2/2};
:math:`He_n` is a polynomial of degree :math:`n`. See 22.11.8 in
[AS]_ for details.
Parameters
----------
n : array_like
Degree of the polynomial
x : array_like
Points at which to evaluate the Hermite polynomial
out : ndarray, optional
Optional output array for the function values
Returns
-------
He : scalar or ndarray
Values of the Hermite polynomial
See Also
--------
roots_hermitenorm : roots and quadrature weights of probabilist's
Hermite polynomials
hermitenorm : probabilist's Hermite polynomial object
numpy.polynomial.hermite_e.HermiteE : Probabilist's Hermite series
eval_hermite : evaluate physicist's Hermite polynomials
References
----------
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
""")
add_newdoc("exp1",
r"""
exp1(z, out=None)
Exponential integral E1.
For complex :math:`z \ne 0` the exponential integral can be defined as
[1]_
.. math::
E_1(z) = \int_z^\infty \frac{e^{-t}}{t} dt,
where the path of the integral does not cross the negative real
axis or pass through the origin.
Parameters
----------
z: array_like
Real or complex argument.
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral E1
See Also
--------
expi : exponential integral :math:`Ei`
expn : generalization of :math:`E_1`
Notes
-----
For :math:`x > 0` it is related to the exponential integral
:math:`Ei` (see `expi`) via the relation
.. math::
E_1(x) = -Ei(-x).
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.1
https://dlmf.nist.gov/6.2#E1
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It has a pole at 0.
>>> sc.exp1(0)
inf
It has a branch cut on the negative real axis.
>>> sc.exp1(-1)
nan
>>> sc.exp1(complex(-1, 0))
(-1.8951178163559368-3.141592653589793j)
>>> sc.exp1(complex(-1, -0.0))
(-1.8951178163559368+3.141592653589793j)
It approaches 0 along the positive real axis.
>>> sc.exp1([1, 10, 100, 1000])
array([2.19383934e-01, 4.15696893e-06, 3.68359776e-46, 0.00000000e+00])
It is related to `expi`.
>>> x = np.array([1, 2, 3, 4])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc(
"_scaled_exp1",
"""
_scaled_exp1(x, out=None):
Compute the scaled exponential integral.
This is a private function, subject to change or removal with no
deprecation.
This function computes F(x), where F is the factor remaining in E_1(x)
when exp(-x)/x is factored out. That is,::
E_1(x) = exp(-x)/x * F(x)
or
F(x) = x * exp(x) * E_1(x)
The function is defined for real x >= 0. For x < 0, nan is returned.
F has the properties:
* F(0) = 0
* F(x) is increasing on [0, inf).
* The limit as x goes to infinity of F(x) is 1.
Parameters
----------
x: array_like
The input values. Must be real. The implementation is limited to
double precision floating point, so other types will be cast to
to double precision.
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the scaled exponential integral.
See Also
--------
exp1 : exponential integral E_1
Examples
--------
>>> from scipy.special import _scaled_exp1
>>> _scaled_exp1([0, 0.1, 1, 10, 100])
"""
)
add_newdoc("exp10",
"""
exp10(x, out=None)
Compute ``10**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
``10**x``, computed element-wise.
Examples
--------
>>> import numpy as np
>>> from scipy.special import exp10
>>> exp10(3)
1000.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp10(x)
array([[ 0.1 , 0.31622777, 1. ],
[ 3.16227766, 10. , 31.6227766 ]])
""")
add_newdoc("exp2",
"""
exp2(x, out=None)
Compute ``2**x`` element-wise.
Parameters
----------
x : array_like
`x` must contain real numbers.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
``2**x``, computed element-wise.
Examples
--------
>>> import numpy as np
>>> from scipy.special import exp2
>>> exp2(3)
8.0
>>> x = np.array([[-1, -0.5, 0], [0.5, 1, 1.5]])
>>> exp2(x)
array([[ 0.5 , 0.70710678, 1. ],
[ 1.41421356, 2. , 2.82842712]])
""")
add_newdoc("expi",
r"""
expi(x, out=None)
Exponential integral Ei.
For real :math:`x`, the exponential integral is defined as [1]_
.. math::
Ei(x) = \int_{-\infty}^x \frac{e^t}{t} dt.
For :math:`x > 0` the integral is understood as a Cauchy principal
value.
It is extended to the complex plane by analytic continuation of
the function on the interval :math:`(0, \infty)`. The complex
variant has a branch cut on the negative real axis.
Parameters
----------
x : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the exponential integral
Notes
-----
The exponential integrals :math:`E_1` and :math:`Ei` satisfy the
relation
.. math::
E_1(x) = -Ei(-x)
for :math:`x > 0`.
See Also
--------
exp1 : Exponential integral :math:`E_1`
expn : Generalized exponential integral :math:`E_n`
References
----------
.. [1] Digital Library of Mathematical Functions, 6.2.5
https://dlmf.nist.gov/6.2#E5
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is related to `exp1`.
>>> x = np.array([1, 2, 3, 4])
>>> -sc.expi(-x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
The complex variant has a branch cut on the negative real axis.
>>> sc.expi(-1 + 1e-12j)
(-0.21938393439552062+3.1415926535894254j)
>>> sc.expi(-1 - 1e-12j)
(-0.21938393439552062-3.1415926535894254j)
As the complex variant approaches the branch cut, the real parts
approach the value of the real variant.
>>> sc.expi(-1)
-0.21938393439552062
The SciPy implementation returns the real variant for complex
values on the branch cut.
>>> sc.expi(complex(-1, 0.0))
(-0.21938393439552062-0j)
>>> sc.expi(complex(-1, -0.0))
(-0.21938393439552062-0j)
""")
add_newdoc('expit',
"""
expit(x, out=None)
Expit (a.k.a. logistic sigmoid) ufunc for ndarrays.
The expit function, also known as the logistic sigmoid function, is
defined as ``expit(x) = 1/(1+exp(-x))``. It is the inverse of the
logit function.
Parameters
----------
x : ndarray
The ndarray to apply expit to element-wise.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
An ndarray of the same shape as x. Its entries
are `expit` of the corresponding entry of x.
See Also
--------
logit
Notes
-----
As a ufunc expit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> import numpy as np
>>> from scipy.special import expit, logit
>>> expit([-np.inf, -1.5, 0, 1.5, np.inf])
array([ 0. , 0.18242552, 0.5 , 0.81757448, 1. ])
`logit` is the inverse of `expit`:
>>> logit(expit([-2.5, 0, 3.1, 5.0]))
array([-2.5, 0. , 3.1, 5. ])
Plot expit(x) for x in [-6, 6]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-6, 6, 121)
>>> y = expit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.xlim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('expit(x)')
>>> plt.show()
""")
add_newdoc("expm1",
"""
expm1(x, out=None)
Compute ``exp(x) - 1``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``expm1(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : array_like
`x` must contain real numbers.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
``exp(x) - 1`` computed element-wise.
Examples
--------
>>> import numpy as np
>>> from scipy.special import expm1
>>> expm1(1.0)
1.7182818284590451
>>> expm1([-0.2, -0.1, 0, 0.1, 0.2])
array([-0.18126925, -0.09516258, 0. , 0.10517092, 0.22140276])
The exact value of ``exp(7.5e-13) - 1`` is::
7.5000000000028125000000007031250000001318...*10**-13.
Here is what ``expm1(7.5e-13)`` gives:
>>> expm1(7.5e-13)
7.5000000000028135e-13
Compare that to ``exp(7.5e-13) - 1``, where the subtraction results in
a "catastrophic" loss of precision:
>>> np.exp(7.5e-13) - 1
7.5006667543675576e-13
""")
add_newdoc("expn",
r"""
expn(n, x, out=None)
Generalized exponential integral En.
For integer :math:`n \geq 0` and real :math:`x \geq 0` the
generalized exponential integral is defined as [dlmf]_
.. math::
E_n(x) = x^{n - 1} \int_x^\infty \frac{e^{-t}}{t^n} dt.
Parameters
----------
n : array_like
Non-negative integers
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the generalized exponential integral
See Also
--------
exp1 : special case of :math:`E_n` for :math:`n = 1`
expi : related to :math:`E_n` when :math:`n = 1`
References
----------
.. [dlmf] Digital Library of Mathematical Functions, 8.19.2
https://dlmf.nist.gov/8.19#E2
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
Its domain is nonnegative n and x.
>>> sc.expn(-1, 1.0), sc.expn(1, -1.0)
(nan, nan)
It has a pole at ``x = 0`` for ``n = 1, 2``; for larger ``n`` it
is equal to ``1 / (n - 1)``.
>>> sc.expn([0, 1, 2, 3, 4], 0)
array([ inf, inf, 1. , 0.5 , 0.33333333])
For n equal to 0 it reduces to ``exp(-x) / x``.
>>> x = np.array([1, 2, 3, 4])
>>> sc.expn(0, x)
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
>>> np.exp(-x) / x
array([0.36787944, 0.06766764, 0.01659569, 0.00457891])
For n equal to 1 it reduces to `exp1`.
>>> sc.expn(1, x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
>>> sc.exp1(x)
array([0.21938393, 0.04890051, 0.01304838, 0.00377935])
""")
add_newdoc("exprel",
r"""
exprel(x, out=None)
Relative error exponential, ``(exp(x) - 1)/x``.
When `x` is near zero, ``exp(x)`` is near 1, so the numerical calculation
of ``exp(x) - 1`` can suffer from catastrophic loss of precision.
``exprel(x)`` is implemented to avoid the loss of precision that occurs when
`x` is near zero.
Parameters
----------
x : ndarray
Input array. `x` must contain real numbers.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
``(exp(x) - 1)/x``, computed element-wise.
See Also
--------
expm1
Notes
-----
.. versionadded:: 0.17.0
Examples
--------
>>> import numpy as np
>>> from scipy.special import exprel
>>> exprel(0.01)
1.0050167084168056
>>> exprel([-0.25, -0.1, 0, 0.1, 0.25])
array([ 0.88479687, 0.95162582, 1. , 1.05170918, 1.13610167])
Compare ``exprel(5e-9)`` to the naive calculation. The exact value
is ``1.00000000250000000416...``.
>>> exprel(5e-9)
1.0000000025
>>> (np.exp(5e-9) - 1)/5e-9
0.99999999392252903
""")
add_newdoc("fdtr",
r"""
fdtr(dfn, dfd, x, out=None)
F cumulative distribution function.
Returns the value of the cumulative distribution function of the
F-distribution, also known as Snedecor's F-distribution or the
Fisher-Snedecor distribution.
The F-distribution with parameters :math:`d_n` and :math:`d_d` is the
distribution of the random variable,
.. math::
X = \frac{U_n/d_n}{U_d/d_d},
where :math:`U_n` and :math:`U_d` are random variables distributed
:math:`\chi^2`, with :math:`d_n` and :math:`d_d` degrees of freedom,
respectively.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
The CDF of the F-distribution with parameters `dfn` and `dfd` at `x`.
See Also
--------
fdtrc : F distribution survival function
fdtri : F distribution inverse cumulative distribution
scipy.stats.f : F distribution
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{xd_n/(d_d + xd_n)}(d_n/2, d_d/2).
Wrapper for the Cephes [1]_ routine `fdtr`. The F distribution is also
available as `scipy.stats.f`. Calling `fdtr` directly can improve
performance compared to the ``cdf`` method of `scipy.stats.f` (see last
example below).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.
>>> import numpy as np
>>> from scipy.special import fdtr
>>> fdtr(1, 2, 1)
0.5773502691896258
Calculate the function at several points by providing a NumPy array for
`x`.
>>> x = np.array([0.5, 2., 3.])
>>> fdtr(1, 2, x)
array([0.4472136 , 0.70710678, 0.77459667])
Plot the function for several parameter sets.
>>> import matplotlib.pyplot as plt
>>> dfn_parameters = [1, 5, 10, 50]
>>> dfd_parameters = [1, 1, 2, 3]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
... linestyles))
>>> x = np.linspace(0, 30, 1000)
>>> fig, ax = plt.subplots()
>>> for parameter_set in parameters_list:
... dfn, dfd, style = parameter_set
... fdtr_vals = fdtr(dfn, dfd, x)
... ax.plot(x, fdtr_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
... ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$x$")
>>> ax.set_title("F distribution cumulative distribution function")
>>> plt.show()
The F distribution is also available as `scipy.stats.f`. Using `fdtr`
directly can be much faster than calling the ``cdf`` method of
`scipy.stats.f`, especially for small arrays or individual values.
To get the same results one must use the following parametrization:
``stats.f(dfn, dfd).cdf(x)=fdtr(dfn, dfd, x)``.
>>> from scipy.stats import f
>>> dfn, dfd = 1, 2
>>> x = 1
>>> fdtr_res = fdtr(dfn, dfd, x) # this will often be faster than below
>>> f_dist_res = f(dfn, dfd).cdf(x)
>>> fdtr_res == f_dist_res # test that results are equal
True
""")
add_newdoc("fdtrc",
r"""
fdtrc(dfn, dfd, x, out=None)
F survival function.
Returns the complemented F-distribution function (the integral of the
density from `x` to infinity).
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
y : scalar or ndarray
The complemented F-distribution function with parameters `dfn` and
`dfd` at `x`.
See Also
--------
fdtr : F distribution cumulative distribution function
fdtri : F distribution inverse cumulative distribution function
scipy.stats.f : F distribution
Notes
-----
The regularized incomplete beta function is used, according to the
formula,
.. math::
F(d_n, d_d; x) = I_{d_d/(d_d + xd_n)}(d_d/2, d_n/2).
Wrapper for the Cephes [1]_ routine `fdtrc`. The F distribution is also
available as `scipy.stats.f`. Calling `fdtrc` directly can improve
performance compared to the ``sf`` method of `scipy.stats.f` (see last
example below).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function for ``dfn=1`` and ``dfd=2`` at ``x=1``.
>>> import numpy as np
>>> from scipy.special import fdtrc
>>> fdtrc(1, 2, 1)
0.42264973081037427
Calculate the function at several points by providing a NumPy array for
`x`.
>>> x = np.array([0.5, 2., 3.])
>>> fdtrc(1, 2, x)
array([0.5527864 , 0.29289322, 0.22540333])
Plot the function for several parameter sets.
>>> import matplotlib.pyplot as plt
>>> dfn_parameters = [1, 5, 10, 50]
>>> dfd_parameters = [1, 1, 2, 3]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
... linestyles))
>>> x = np.linspace(0, 30, 1000)
>>> fig, ax = plt.subplots()
>>> for parameter_set in parameters_list:
... dfn, dfd, style = parameter_set
... fdtrc_vals = fdtrc(dfn, dfd, x)
... ax.plot(x, fdtrc_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
... ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$x$")
>>> ax.set_title("F distribution survival function")
>>> plt.show()
The F distribution is also available as `scipy.stats.f`. Using `fdtrc`
directly can be much faster than calling the ``sf`` method of
`scipy.stats.f`, especially for small arrays or individual values.
To get the same results one must use the following parametrization:
``stats.f(dfn, dfd).sf(x)=fdtrc(dfn, dfd, x)``.
>>> from scipy.stats import f
>>> dfn, dfd = 1, 2
>>> x = 1
>>> fdtrc_res = fdtrc(dfn, dfd, x) # this will often be faster than below
>>> f_dist_res = f(dfn, dfd).sf(x)
>>> f_dist_res == fdtrc_res # test that results are equal
True
""")
add_newdoc("fdtri",
r"""
fdtri(dfn, dfd, p, out=None)
The `p`-th quantile of the F-distribution.
This function is the inverse of the F-distribution CDF, `fdtr`, returning
the `x` such that `fdtr(dfn, dfd, x) = p`.
Parameters
----------
dfn : array_like
First parameter (positive float).
dfd : array_like
Second parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
out : ndarray, optional
Optional output array for the function values
Returns
-------
x : scalar or ndarray
The quantile corresponding to `p`.
See Also
--------
fdtr : F distribution cumulative distribution function
fdtrc : F distribution survival function
scipy.stats.f : F distribution
Notes
-----
The computation is carried out using the relation to the inverse
regularized beta function, :math:`I^{-1}_x(a, b)`. Let
:math:`z = I^{-1}_p(d_d/2, d_n/2).` Then,
.. math::
x = \frac{d_d (1 - z)}{d_n z}.
If `p` is such that :math:`x < 0.5`, the following relation is used
instead for improved stability: let
:math:`z' = I^{-1}_{1 - p}(d_n/2, d_d/2).` Then,
.. math::
x = \frac{d_d z'}{d_n (1 - z')}.
Wrapper for the Cephes [1]_ routine `fdtri`.
The F distribution is also available as `scipy.stats.f`. Calling
`fdtri` directly can improve performance compared to the ``ppf``
method of `scipy.stats.f` (see last example below).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
`fdtri` represents the inverse of the F distribution CDF which is
available as `fdtr`. Here, we calculate the CDF for ``df1=1``, ``df2=2``
at ``x=3``. `fdtri` then returns ``3`` given the same values for `df1`,
`df2` and the computed CDF value.
>>> import numpy as np
>>> from scipy.special import fdtri, fdtr
>>> df1, df2 = 1, 2
>>> x = 3
>>> cdf_value = fdtr(df1, df2, x)
>>> fdtri(df1, df2, cdf_value)
3.000000000000006
Calculate the function at several points by providing a NumPy array for
`x`.
>>> x = np.array([0.1, 0.4, 0.7])
>>> fdtri(1, 2, x)
array([0.02020202, 0.38095238, 1.92156863])
Plot the function for several parameter sets.
>>> import matplotlib.pyplot as plt
>>> dfn_parameters = [50, 10, 1, 50]
>>> dfd_parameters = [0.5, 1, 1, 5]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(dfn_parameters, dfd_parameters,
... linestyles))
>>> x = np.linspace(0, 1, 1000)
>>> fig, ax = plt.subplots()
>>> for parameter_set in parameters_list:
... dfn, dfd, style = parameter_set
... fdtri_vals = fdtri(dfn, dfd, x)
... ax.plot(x, fdtri_vals, label=rf"$d_n={dfn},\, d_d={dfd}$",
... ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$x$")
>>> title = "F distribution inverse cumulative distribution function"
>>> ax.set_title(title)
>>> ax.set_ylim(0, 30)
>>> plt.show()
The F distribution is also available as `scipy.stats.f`. Using `fdtri`
directly can be much faster than calling the ``ppf`` method of
`scipy.stats.f`, especially for small arrays or individual values.
To get the same results one must use the following parametrization:
``stats.f(dfn, dfd).ppf(x)=fdtri(dfn, dfd, x)``.
>>> from scipy.stats import f
>>> dfn, dfd = 1, 2
>>> x = 0.7
>>> fdtri_res = fdtri(dfn, dfd, x) # this will often be faster than below
>>> f_dist_res = f(dfn, dfd).ppf(x)
>>> f_dist_res == fdtri_res # test that results are equal
True
""")
add_newdoc("fdtridfd",
"""
fdtridfd(dfn, p, x, out=None)
Inverse to `fdtr` vs dfd
Finds the F density argument dfd such that ``fdtr(dfn, dfd, x) == p``.
Parameters
----------
dfn : array_like
First parameter (positive float).
p : array_like
Cumulative probability, in [0, 1].
x : array_like
Argument (nonnegative float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
dfd : scalar or ndarray
`dfd` such that ``fdtr(dfn, dfd, x) == p``.
See Also
--------
fdtr : F distribution cumulative distribution function
fdtrc : F distribution survival function
fdtri : F distribution quantile function
scipy.stats.f : F distribution
Examples
--------
Compute the F distribution cumulative distribution function for one
parameter set.
>>> from scipy.special import fdtridfd, fdtr
>>> dfn, dfd, x = 10, 5, 2
>>> cdf_value = fdtr(dfn, dfd, x)
>>> cdf_value
0.7700248806501017
Verify that `fdtridfd` recovers the original value for `dfd`:
>>> fdtridfd(dfn, cdf_value, x)
5.0
""")
'''
commented out as fdtridfn seems to have bugs and is not in functions.json
see: https://github.com/scipy/scipy/pull/15622#discussion_r811440983
add_newdoc(
"fdtridfn",
"""
fdtridfn(p, dfd, x, out=None)
Inverse to `fdtr` vs dfn
finds the F density argument dfn such that ``fdtr(dfn, dfd, x) == p``.
Parameters
----------
p : array_like
Cumulative probability, in [0, 1].
dfd : array_like
Second parameter (positive float).
x : array_like
Argument (nonnegative float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
dfn : scalar or ndarray
`dfn` such that ``fdtr(dfn, dfd, x) == p``.
See Also
--------
fdtr, fdtrc, fdtri, fdtridfd
""")
'''
add_newdoc("fresnel",
r"""
fresnel(z, out=None)
Fresnel integrals.
The Fresnel integrals are defined as
.. math::
S(z) &= \int_0^z \sin(\pi t^2 /2) dt \\
C(z) &= \int_0^z \cos(\pi t^2 /2) dt.
See [dlmf]_ for details.
Parameters
----------
z : array_like
Real or complex valued argument
out : 2-tuple of ndarrays, optional
Optional output arrays for the function results
Returns
-------
S, C : 2-tuple of scalar or ndarray
Values of the Fresnel integrals
See Also
--------
fresnel_zeros : zeros of the Fresnel integrals
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/7.2#iii
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
As z goes to infinity along the real axis, S and C converge to 0.5.
>>> S, C = sc.fresnel([0.1, 1, 10, 100, np.inf])
>>> S
array([0.00052359, 0.43825915, 0.46816998, 0.4968169 , 0.5 ])
>>> C
array([0.09999753, 0.7798934 , 0.49989869, 0.4999999 , 0.5 ])
They are related to the error function `erf`.
>>> z = np.array([1, 2, 3, 4])
>>> zeta = 0.5 * np.sqrt(np.pi) * (1 - 1j) * z
>>> S, C = sc.fresnel(z)
>>> C + 1j*S
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
>>> 0.5 * (1 + 1j) * sc.erf(zeta)
array([0.7798934 +0.43825915j, 0.48825341+0.34341568j,
0.60572079+0.496313j , 0.49842603+0.42051575j])
""")
add_newdoc("gamma",
r"""
gamma(z, out=None)
gamma function.
The gamma function is defined as
.. math::
\Gamma(z) = \int_0^\infty t^{z-1} e^{-t} dt
for :math:`\Re(z) > 0` and is extended to the rest of the complex
plane by analytic continuation. See [dlmf]_ for more details.
Parameters
----------
z : array_like
Real or complex valued argument
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the gamma function
Notes
-----
The gamma function is often referred to as the generalized
factorial since :math:`\Gamma(n + 1) = n!` for natural numbers
:math:`n`. More generally it satisfies the recurrence relation
:math:`\Gamma(z + 1) = z \cdot \Gamma(z)` for complex :math:`z`,
which, combined with the fact that :math:`\Gamma(1) = 1`, implies
the above identity for :math:`z = n`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> import numpy as np
>>> from scipy.special import gamma, factorial
>>> gamma([0, 0.5, 1, 5])
array([ inf, 1.77245385, 1. , 24. ])
>>> z = 2.5 + 1j
>>> gamma(z)
(0.77476210455108352+0.70763120437959293j)
>>> gamma(z+1), z*gamma(z) # Recurrence property
((1.2292740569981171+2.5438401155000685j),
(1.2292740569981158+2.5438401155000658j))
>>> gamma(0.5)**2 # gamma(0.5) = sqrt(pi)
3.1415926535897927
Plot gamma(x) for real x
>>> x = np.linspace(-3.5, 5.5, 2251)
>>> y = gamma(x)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'b', alpha=0.6, label='gamma(x)')
>>> k = np.arange(1, 7)
>>> plt.plot(k, factorial(k-1), 'k*', alpha=0.6,
... label='(x-1)!, x = 1, 2, ...')
>>> plt.xlim(-3.5, 5.5)
>>> plt.ylim(-10, 25)
>>> plt.grid()
>>> plt.xlabel('x')
>>> plt.legend(loc='lower right')
>>> plt.show()
""")
add_newdoc("gammainc",
r"""
gammainc(a, x, out=None)
Regularized lower incomplete gamma function.
It is defined as
.. math::
P(a, x) = \frac{1}{\Gamma(a)} \int_0^x t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the lower incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammaincc` is the regularized upper
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammaincc : regularized upper incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the CDF of the gamma distribution, so it starts at 0 and
monotonically increases to 1.
>>> sc.gammainc(0.5, [0, 1, 10, 100])
array([0. , 0.84270079, 0.99999226, 1. ])
It is equal to one minus the upper incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammainc(a, x)
0.6289066304773024
>>> 1 - sc.gammaincc(a, x)
0.6289066304773024
""")
add_newdoc("gammaincc",
r"""
gammaincc(a, x, out=None)
Regularized upper incomplete gamma function.
It is defined as
.. math::
Q(a, x) = \frac{1}{\Gamma(a)} \int_x^\infty t^{a - 1}e^{-t} dt
for :math:`a > 0` and :math:`x \geq 0`. See [dlmf]_ for details.
Parameters
----------
a : array_like
Positive parameter
x : array_like
Nonnegative argument
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the upper incomplete gamma function
Notes
-----
The function satisfies the relation ``gammainc(a, x) +
gammaincc(a, x) = 1`` where `gammainc` is the regularized lower
incomplete gamma function.
The implementation largely follows that of [boost]_.
See also
--------
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical functions
https://dlmf.nist.gov/8.2#E4
.. [boost] Maddock et. al., "Incomplete Gamma Functions",
https://www.boost.org/doc/libs/1_61_0/libs/math/doc/html/math_toolkit/sf_gamma/igamma.html
Examples
--------
>>> import scipy.special as sc
It is the survival function of the gamma distribution, so it
starts at 1 and monotonically decreases to 0.
>>> sc.gammaincc(0.5, [0, 1, 10, 100, 1000])
array([1.00000000e+00, 1.57299207e-01, 7.74421643e-06, 2.08848758e-45,
0.00000000e+00])
It is equal to one minus the lower incomplete gamma function.
>>> a, x = 0.5, 0.4
>>> sc.gammaincc(a, x)
0.37109336952269756
>>> 1 - sc.gammainc(a, x)
0.37109336952269756
""")
add_newdoc("gammainccinv",
"""
gammainccinv(a, y, out=None)
Inverse of the regularized upper incomplete gamma function.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = Q(a, x)`. Here :math:`Q` is the regularized upper
incomplete gamma function; see `gammaincc`. This is well-defined
because the upper incomplete gamma function is monotonic as can
be seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Argument between 0 and 1, inclusive
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the inverse of the upper incomplete gamma function
See Also
--------
gammaincc : regularized upper incomplete gamma function
gammainc : regularized lower incomplete gamma function
gammaincinv : inverse of the regularized lower incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at infinity and monotonically decreases to 0.
>>> sc.gammainccinv(0.5, [0, 0.1, 0.5, 1])
array([ inf, 1.35277173, 0.22746821, 0. ])
It inverts the upper incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammaincc(a, sc.gammainccinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 50]
>>> sc.gammainccinv(a, sc.gammaincc(a, x))
array([ 0., 10., 50.])
""")
add_newdoc("gammaincinv",
"""
gammaincinv(a, y, out=None)
Inverse to the regularized lower incomplete gamma function.
Given an input :math:`y` between 0 and 1, returns :math:`x` such
that :math:`y = P(a, x)`. Here :math:`P` is the regularized lower
incomplete gamma function; see `gammainc`. This is well-defined
because the lower incomplete gamma function is monotonic as can be
seen from its definition in [dlmf]_.
Parameters
----------
a : array_like
Positive parameter
y : array_like
Parameter between 0 and 1, inclusive
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the inverse of the lower incomplete gamma function
See Also
--------
gammainc : regularized lower incomplete gamma function
gammaincc : regularized upper incomplete gamma function
gammainccinv : inverse of the regularized upper incomplete gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/8.2#E4
Examples
--------
>>> import scipy.special as sc
It starts at 0 and monotonically increases to infinity.
>>> sc.gammaincinv(0.5, [0, 0.1 ,0.5, 1])
array([0. , 0.00789539, 0.22746821, inf])
It inverts the lower incomplete gamma function.
>>> a, x = 0.5, [0, 0.1, 0.5, 1]
>>> sc.gammainc(a, sc.gammaincinv(a, x))
array([0. , 0.1, 0.5, 1. ])
>>> a, x = 0.5, [0, 10, 25]
>>> sc.gammaincinv(a, sc.gammainc(a, x))
array([ 0. , 10. , 25.00001465])
""")
add_newdoc("gammaln",
r"""
gammaln(x, out=None)
Logarithm of the absolute value of the gamma function.
Defined as
.. math::
\ln(\lvert\Gamma(x)\rvert)
where :math:`\Gamma` is the gamma function. For more details on
the gamma function, see [dlmf]_.
Parameters
----------
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the log of the absolute value of gamma
See Also
--------
gammasgn : sign of the gamma function
loggamma : principal branch of the logarithm of the gamma function
Notes
-----
It is the same function as the Python standard library function
:func:`math.lgamma`.
When used in conjunction with `gammasgn`, this function is useful
for working in logspace on the real axis without having to deal
with complex numbers via the relation ``exp(gammaln(x)) =
gammasgn(x) * gamma(x)``.
For complex-valued log-gamma, use `loggamma` instead of `gammaln`.
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It has two positive zeros.
>>> sc.gammaln([1, 2])
array([0., 0.])
It has poles at nonpositive integers.
>>> sc.gammaln([0, -1, -2, -3, -4])
array([inf, inf, inf, inf, inf])
It asymptotically approaches ``x * log(x)`` (Stirling's formula).
>>> x = np.array([1e10, 1e20, 1e40, 1e80])
>>> sc.gammaln(x)
array([2.20258509e+11, 4.50517019e+21, 9.11034037e+41, 1.83206807e+82])
>>> x * np.log(x)
array([2.30258509e+11, 4.60517019e+21, 9.21034037e+41, 1.84206807e+82])
""")
add_newdoc("gammasgn",
r"""
gammasgn(x, out=None)
Sign of the gamma function.
It is defined as
.. math::
\text{gammasgn}(x) =
\begin{cases}
+1 & \Gamma(x) > 0 \\
-1 & \Gamma(x) < 0
\end{cases}
where :math:`\Gamma` is the gamma function; see `gamma`. This
definition is complete since the gamma function is never zero;
see the discussion after [dlmf]_.
Parameters
----------
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Sign of the gamma function
Notes
-----
The gamma function can be computed as ``gammasgn(x) *
np.exp(gammaln(x))``.
See Also
--------
gamma : the gamma function
gammaln : log of the absolute value of the gamma function
loggamma : analytic continuation of the log of the gamma function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#E1
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is 1 for `x > 0`.
>>> sc.gammasgn([1, 2, 3, 4])
array([1., 1., 1., 1.])
It alternates between -1 and 1 for negative integers.
>>> sc.gammasgn([-0.5, -1.5, -2.5, -3.5])
array([-1., 1., -1., 1.])
It can be used to compute the gamma function.
>>> x = [1.5, 0.5, -0.5, -1.5]
>>> sc.gammasgn(x) * np.exp(sc.gammaln(x))
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
>>> sc.gamma(x)
array([ 0.88622693, 1.77245385, -3.5449077 , 2.3632718 ])
""")
add_newdoc("gdtr",
r"""
gdtr(a, b, x, out=None)
Gamma distribution cumulative distribution function.
Returns the integral from zero to `x` of the gamma probability density
function,
.. math::
F = \int_0^x \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (upper limit of integration; float).
out : ndarray, optional
Optional output array for the function values
See also
--------
gdtrc : 1 - CDF of the gamma distribution.
scipy.stats.gamma: Gamma distribution
Returns
-------
F : scalar or ndarray
The CDF of the gamma distribution with parameters `a` and `b`
evaluated at `x`.
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtr`. Calling `gdtr` directly can
improve performance compared to the ``cdf`` method of `scipy.stats.gamma`
(see last example below).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Compute the function for ``a=1``, ``b=2`` at ``x=5``.
>>> import numpy as np
>>> from scipy.special import gdtr
>>> import matplotlib.pyplot as plt
>>> gdtr(1., 2., 5.)
0.9595723180054873
Compute the function for ``a=1`` and ``b=2`` at several points by
providing a NumPy array for `x`.
>>> xvalues = np.array([1., 2., 3., 4])
>>> gdtr(1., 1., xvalues)
array([0.63212056, 0.86466472, 0.95021293, 0.98168436])
`gdtr` can evaluate different parameter sets by providing arrays with
broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the
function for three different `a` at four positions `x` and ``b=3``,
resulting in a 3x4 array.
>>> a = np.array([[0.5], [1.5], [2.5]])
>>> x = np.array([1., 2., 3., 4])
>>> a.shape, x.shape
((3, 1), (4,))
>>> gdtr(a, 3., x)
array([[0.01438768, 0.0803014 , 0.19115317, 0.32332358],
[0.19115317, 0.57680992, 0.82642193, 0.9380312 ],
[0.45618688, 0.87534798, 0.97974328, 0.9972306 ]])
Plot the function for four different parameter sets.
>>> a_parameters = [0.3, 1, 2, 6]
>>> b_parameters = [2, 10, 15, 20]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))
>>> x = np.linspace(0, 30, 1000)
>>> fig, ax = plt.subplots()
>>> for parameter_set in parameters_list:
... a, b, style = parameter_set
... gdtr_vals = gdtr(a, b, x)
... ax.plot(x, gdtr_vals, label=f"$a= {a},\, b={b}$", ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$x$")
>>> ax.set_title("Gamma distribution cumulative distribution function")
>>> plt.show()
The gamma distribution is also available as `scipy.stats.gamma`. Using
`gdtr` directly can be much faster than calling the ``cdf`` method of
`scipy.stats.gamma`, especially for small arrays or individual values.
To get the same results one must use the following parametrization:
``stats.gamma(b, scale=1/a).cdf(x)=gdtr(a, b, x)``.
>>> from scipy.stats import gamma
>>> a = 2.
>>> b = 3
>>> x = 1.
>>> gdtr_result = gdtr(a, b, x) # this will often be faster than below
>>> gamma_dist_result = gamma(b, scale=1/a).cdf(x)
>>> gdtr_result == gamma_dist_result # test that results are equal
True
""")
add_newdoc("gdtrc",
r"""
gdtrc(a, b, x, out=None)
Gamma distribution survival function.
Integral from `x` to infinity of the gamma probability density function,
.. math::
F = \int_x^\infty \frac{a^b}{\Gamma(b)} t^{b-1} e^{-at}\,dt,
where :math:`\Gamma` is the gamma function.
Parameters
----------
a : array_like
The rate parameter of the gamma distribution, sometimes denoted
:math:`\beta` (float). It is also the reciprocal of the scale
parameter :math:`\theta`.
b : array_like
The shape parameter of the gamma distribution, sometimes denoted
:math:`\alpha` (float).
x : array_like
The quantile (lower limit of integration; float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
F : scalar or ndarray
The survival function of the gamma distribution with parameters `a`
and `b` evaluated at `x`.
See Also
--------
gdtr: Gamma distribution cumulative distribution function
scipy.stats.gamma: Gamma distribution
gdtrix
Notes
-----
The evaluation is carried out using the relation to the incomplete gamma
integral (regularized gamma function).
Wrapper for the Cephes [1]_ routine `gdtrc`. Calling `gdtrc` directly can
improve performance compared to the ``sf`` method of `scipy.stats.gamma`
(see last example below).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Compute the function for ``a=1`` and ``b=2`` at ``x=5``.
>>> import numpy as np
>>> from scipy.special import gdtrc
>>> import matplotlib.pyplot as plt
>>> gdtrc(1., 2., 5.)
0.04042768199451279
Compute the function for ``a=1``, ``b=2`` at several points by providing
a NumPy array for `x`.
>>> xvalues = np.array([1., 2., 3., 4])
>>> gdtrc(1., 1., xvalues)
array([0.36787944, 0.13533528, 0.04978707, 0.01831564])
`gdtrc` can evaluate different parameter sets by providing arrays with
broadcasting compatible shapes for `a`, `b` and `x`. Here we compute the
function for three different `a` at four positions `x` and ``b=3``,
resulting in a 3x4 array.
>>> a = np.array([[0.5], [1.5], [2.5]])
>>> x = np.array([1., 2., 3., 4])
>>> a.shape, x.shape
((3, 1), (4,))
>>> gdtrc(a, 3., x)
array([[0.98561232, 0.9196986 , 0.80884683, 0.67667642],
[0.80884683, 0.42319008, 0.17357807, 0.0619688 ],
[0.54381312, 0.12465202, 0.02025672, 0.0027694 ]])
Plot the function for four different parameter sets.
>>> a_parameters = [0.3, 1, 2, 6]
>>> b_parameters = [2, 10, 15, 20]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(a_parameters, b_parameters, linestyles))
>>> x = np.linspace(0, 30, 1000)
>>> fig, ax = plt.subplots()
>>> for parameter_set in parameters_list:
... a, b, style = parameter_set
... gdtrc_vals = gdtrc(a, b, x)
... ax.plot(x, gdtrc_vals, label=f"$a= {a},\, b={b}$", ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$x$")
>>> ax.set_title("Gamma distribution survival function")
>>> plt.show()
The gamma distribution is also available as `scipy.stats.gamma`.
Using `gdtrc` directly can be much faster than calling the ``sf`` method
of `scipy.stats.gamma`, especially for small arrays or individual
values. To get the same results one must use the following parametrization:
``stats.gamma(b, scale=1/a).sf(x)=gdtrc(a, b, x)``.
>>> from scipy.stats import gamma
>>> a = 2
>>> b = 3
>>> x = 1.
>>> gdtrc_result = gdtrc(a, b, x) # this will often be faster than below
>>> gamma_dist_result = gamma(b, scale=1/a).sf(x)
>>> gdtrc_result == gamma_dist_result # test that results are equal
True
""")
add_newdoc("gdtria",
"""
gdtria(p, b, x, out=None)
Inverse of `gdtr` vs a.
Returns the inverse with respect to the parameter `a` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
p : array_like
Probability values.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
a : scalar or ndarray
Values of the `a` parameter such that `p = gdtr(a, b, x)`. `1/a`
is the "scale" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `a` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `a`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtria
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtria(p, 3.4, 5.6)
1.2
""")
add_newdoc("gdtrib",
"""
gdtrib(a, p, x, out=None)
Inverse of `gdtr` vs b.
Returns the inverse with respect to the parameter `b` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
p : array_like
Probability values.
x : array_like
Nonnegative real values, from the domain of the gamma distribution.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
b : scalar or ndarray
Values of the `b` parameter such that `p = gdtr(a, b, x)`. `b` is
the "shape" parameter of the gamma distribution.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrix : Inverse with respect to `x` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `b` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `b`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrib
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrib(1.2, p, 5.6)
3.3999999999723882
""")
add_newdoc("gdtrix",
"""
gdtrix(a, b, p, out=None)
Inverse of `gdtr` vs x.
Returns the inverse with respect to the parameter `x` of ``p =
gdtr(a, b, x)``, the cumulative distribution function of the gamma
distribution. This is also known as the pth quantile of the
distribution.
Parameters
----------
a : array_like
`a` parameter values of `gdtr(a, b, x)`. `1/a` is the "scale"
parameter of the gamma distribution.
b : array_like
`b` parameter values of `gdtr(a, b, x)`. `b` is the "shape" parameter
of the gamma distribution.
p : array_like
Probability values.
out : ndarray, optional
If a fourth argument is given, it must be a numpy.ndarray whose size
matches the broadcast result of `a`, `b` and `x`. `out` is then the
array returned by the function.
Returns
-------
x : scalar or ndarray
Values of the `x` parameter such that `p = gdtr(a, b, x)`.
See Also
--------
gdtr : CDF of the gamma distribution.
gdtria : Inverse with respect to `a` of `gdtr(a, b, x)`.
gdtrib : Inverse with respect to `b` of `gdtr(a, b, x)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfgam`.
The cumulative distribution function `p` is computed using a routine by
DiDinato and Morris [2]_. Computation of `x` involves a search for a value
that produces the desired value of `p`. The search relies on the
monotonicity of `p` with `x`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] DiDinato, A. R. and Morris, A. H.,
Computation of the incomplete gamma function ratios and their
inverse. ACM Trans. Math. Softw. 12 (1986), 377-393.
Examples
--------
First evaluate `gdtr`.
>>> from scipy.special import gdtr, gdtrix
>>> p = gdtr(1.2, 3.4, 5.6)
>>> print(p)
0.94378087442
Verify the inverse.
>>> gdtrix(1.2, 3.4, p)
5.5999999999999996
""")
add_newdoc("hankel1",
r"""
hankel1(v, z, out=None)
Hankel function of the first kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the Hankel function of the first kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
See also
--------
hankel1e : ndarray
This function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel1e",
r"""
hankel1e(v, z, out=None)
Exponentially scaled Hankel function of the first kind
Defined as::
hankel1e(v, z) = hankel1(v, z) * exp(-1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the exponentially scaled Hankel function.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(1)}_v(z) = \frac{2}{\imath\pi} \exp(-\imath \pi v/2) K_v(z \exp(-\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(1)}_{-v}(z) = H^{(1)}_v(z) \exp(\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2",
r"""
hankel2(v, z, out=None)
Hankel function of the second kind
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\imath \pi v/2) K_v(z \exp(\imath\pi/2))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
See also
--------
hankel2e : this function with leading exponential behavior stripped off.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("hankel2e",
r"""
hankel2e(v, z, out=None)
Exponentially scaled Hankel function of the second kind
Defined as::
hankel2e(v, z) = hankel2(v, z) * exp(1j * z)
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the exponentially scaled Hankel function of the second kind.
Notes
-----
A wrapper for the AMOS [1]_ routine `zbesh`, which carries out the
computation using the relation,
.. math:: H^{(2)}_v(z) = -\frac{2}{\imath\pi} \exp(\frac{\imath \pi v}{2}) K_v(z exp(\frac{\imath\pi}{2}))
where :math:`K_v` is the modified Bessel function of the second kind.
For negative orders, the relation
.. math:: H^{(2)}_{-v}(z) = H^{(2)}_v(z) \exp(-\imath\pi v)
is used.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
""")
add_newdoc("huber",
r"""
huber(delta, r, out=None)
Huber loss function.
.. math:: \text{huber}(\delta, r) = \begin{cases} \infty & \delta < 0 \\ \frac{1}{2}r^2 & 0 \le \delta, | r | \le \delta \\ \delta ( |r| - \frac{1}{2}\delta ) & \text{otherwise} \end{cases}
Parameters
----------
delta : ndarray
Input array, indicating the quadratic vs. linear loss changepoint.
r : ndarray
Input array, possibly representing residuals.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The computed Huber loss function values.
See also
--------
pseudo_huber : smooth approximation of this function
Notes
-----
`huber` is useful as a loss function in robust statistics or machine
learning to reduce the influence of outliers as compared to the common
squared error loss, residuals with a magnitude higher than `delta` are
not squared [1]_.
Typically, `r` represents residuals, the difference
between a model prediction and data. Then, for :math:`|r|\leq\delta`,
`huber` resembles the squared error and for :math:`|r|>\delta` the
absolute error. This way, the Huber loss often achieves
a fast convergence in model fitting for small residuals like the squared
error loss function and still reduces the influence of outliers
(:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is
the cutoff between squared and absolute error regimes, it has
to be tuned carefully for each problem. `huber` is also
convex, making it suitable for gradient based optimization.
.. versionadded:: 0.15.0
References
----------
.. [1] Peter Huber. "Robust Estimation of a Location Parameter",
1964. Annals of Statistics. 53 (1): 73 - 101.
Examples
--------
Import all necessary modules.
>>> import numpy as np
>>> from scipy.special import huber
>>> import matplotlib.pyplot as plt
Compute the function for ``delta=1`` at ``r=2``
>>> huber(1., 2.)
1.5
Compute the function for different `delta` by providing a NumPy array or
list for `delta`.
>>> huber([1., 3., 5.], 4.)
array([3.5, 7.5, 8. ])
Compute the function at different points by providing a NumPy array or
list for `r`.
>>> huber(2., np.array([1., 1.5, 3.]))
array([0.5 , 1.125, 4. ])
The function can be calculated for different `delta` and `r` by
providing arrays for both with compatible shapes for broadcasting.
>>> r = np.array([1., 2.5, 8., 10.])
>>> deltas = np.array([[1.], [5.], [9.]])
>>> print(r.shape, deltas.shape)
(4,) (3, 1)
>>> huber(deltas, r)
array([[ 0.5 , 2. , 7.5 , 9.5 ],
[ 0.5 , 3.125, 27.5 , 37.5 ],
[ 0.5 , 3.125, 32. , 49.5 ]])
Plot the function for different `delta`.
>>> x = np.linspace(-4, 4, 500)
>>> deltas = [1, 2, 3]
>>> linestyles = ["dashed", "dotted", "dashdot"]
>>> fig, ax = plt.subplots()
>>> combined_plot_parameters = list(zip(deltas, linestyles))
>>> for delta, style in combined_plot_parameters:
... ax.plot(x, huber(delta, x), label=f"$\delta={delta}$", ls=style)
>>> ax.legend(loc="upper center")
>>> ax.set_xlabel("$x$")
>>> ax.set_title("Huber loss function $h_{\delta}(x)$")
>>> ax.set_xlim(-4, 4)
>>> ax.set_ylim(0, 8)
>>> plt.show()
""")
add_newdoc("hyp0f1",
r"""
hyp0f1(v, z, out=None)
Confluent hypergeometric limit function 0F1.
Parameters
----------
v : array_like
Real-valued parameter
z : array_like
Real- or complex-valued argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The confluent hypergeometric limit function
Notes
-----
This function is defined as:
.. math:: _0F_1(v, z) = \sum_{k=0}^{\infty}\frac{z^k}{(v)_k k!}.
It's also the limit as :math:`q \to \infty` of :math:`_1F_1(q; v; z/q)`,
and satisfies the differential equation :math:`f''(z) + vf'(z) =
f(z)`. See [1]_ for more information.
References
----------
.. [1] Wolfram MathWorld, "Confluent Hypergeometric Limit Function",
http://mathworld.wolfram.com/ConfluentHypergeometricLimitFunction.html
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is one when `z` is zero.
>>> sc.hyp0f1(1, 0)
1.0
It is the limit of the confluent hypergeometric function as `q`
goes to infinity.
>>> q = np.array([1, 10, 100, 1000])
>>> v = 1
>>> z = 1
>>> sc.hyp1f1(q, v, z / q)
array([2.71828183, 2.31481985, 2.28303778, 2.27992985])
>>> sc.hyp0f1(v, z)
2.2795853023360673
It is related to Bessel functions.
>>> n = 1
>>> x = np.linspace(0, 1, 5)
>>> sc.jv(n, x)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
>>> (0.5 * x)**n / sc.factorial(n) * sc.hyp0f1(n + 1, -0.25 * x**2)
array([0. , 0.12402598, 0.24226846, 0.3492436 , 0.44005059])
""")
add_newdoc("hyp1f1",
r"""
hyp1f1(a, b, x, out=None)
Confluent hypergeometric function 1F1.
The confluent hypergeometric function is defined by the series
.. math::
{}_1F_1(a; b; x) = \sum_{k = 0}^\infty \frac{(a)_k}{(b)_k k!} x^k.
See [dlmf]_ for more details. Here :math:`(\cdot)_k` is the
Pochhammer symbol; see `poch`.
Parameters
----------
a, b : array_like
Real parameters
x : array_like
Real or complex argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the confluent hypergeometric function
See also
--------
hyperu : another confluent hypergeometric function
hyp0f1 : confluent hypergeometric limit function
hyp2f1 : Gaussian hypergeometric function
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/13.2#E2
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is one when `x` is zero:
>>> sc.hyp1f1(0.5, 0.5, 0)
1.0
It is singular when `b` is a nonpositive integer.
>>> sc.hyp1f1(0.5, -1, 0)
inf
It is a polynomial when `a` is a nonpositive integer.
>>> a, b, x = -1, 0.5, np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.hyp1f1(a, b, x)
array([-1., -3., -5., -7.])
>>> 1 + (a / b) * x
array([-1., -3., -5., -7.])
It reduces to the exponential function when `a = b`.
>>> sc.hyp1f1(2, 2, [1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
>>> np.exp([1, 2, 3, 4])
array([ 2.71828183, 7.3890561 , 20.08553692, 54.59815003])
""")
add_newdoc("hyp2f1",
r"""
hyp2f1(a, b, c, z, out=None)
Gauss hypergeometric function 2F1(a, b; c; z)
Parameters
----------
a, b, c : array_like
Arguments, should be real-valued.
z : array_like
Argument, real or complex.
out : ndarray, optional
Optional output array for the function values
Returns
-------
hyp2f1 : scalar or ndarray
The values of the gaussian hypergeometric function.
See also
--------
hyp0f1 : confluent hypergeometric limit function.
hyp1f1 : Kummer's (confluent hypergeometric) function.
Notes
-----
This function is defined for :math:`|z| < 1` as
.. math::
\mathrm{hyp2f1}(a, b, c, z) = \sum_{n=0}^\infty
\frac{(a)_n (b)_n}{(c)_n}\frac{z^n}{n!},
and defined on the rest of the complex z-plane by analytic
continuation [1]_.
Here :math:`(\cdot)_n` is the Pochhammer symbol; see `poch`. When
:math:`n` is an integer the result is a polynomial of degree :math:`n`.
The implementation for complex values of ``z`` is described in [2]_,
except for ``z`` in the region defined by
.. math::
0.9 <= \left|z\right| < 1.1,
\left|1 - z\right| >= 0.9,
\mathrm{real}(z) >= 0
in which the implementation follows [4]_.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/15.2
.. [2] S. Zhang and J.M. Jin, "Computation of Special Functions", Wiley 1996
.. [3] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [4] J.L. Lopez and N.M. Temme, "New series expansions of the Gauss
hypergeometric function", Adv Comput Math 39, 349-365 (2013).
https://doi.org/10.1007/s10444-012-9283-y
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It has poles when `c` is a negative integer.
>>> sc.hyp2f1(1, 1, -2, 1)
inf
It is a polynomial when `a` or `b` is a negative integer.
>>> a, b, c = -1, 1, 1.5
>>> z = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, c, z)
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
>>> 1 + a * b * z / c
array([1. , 0.83333333, 0.66666667, 0.5 , 0.33333333])
It is symmetric in `a` and `b`.
>>> a = np.linspace(0, 1, 5)
>>> b = np.linspace(0, 1, 5)
>>> sc.hyp2f1(a, b, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
>>> sc.hyp2f1(b, a, 1, 0.5)
array([1. , 1.03997334, 1.1803406 , 1.47074441, 2. ])
It contains many other functions as special cases.
>>> z = 0.5
>>> sc.hyp2f1(1, 1, 2, z)
1.3862943611198901
>>> -np.log(1 - z) / z
1.3862943611198906
>>> sc.hyp2f1(0.5, 1, 1.5, z**2)
1.098612288668109
>>> np.log((1 + z) / (1 - z)) / (2 * z)
1.0986122886681098
>>> sc.hyp2f1(0.5, 1, 1.5, -z**2)
0.9272952180016117
>>> np.arctan(z) / z
0.9272952180016122
""")
add_newdoc("hyperu",
r"""
hyperu(a, b, x, out=None)
Confluent hypergeometric function U
It is defined as the solution to the equation
.. math::
x \frac{d^2w}{dx^2} + (b - x) \frac{dw}{dx} - aw = 0
which satisfies the property
.. math::
U(a, b, x) \sim x^{-a}
as :math:`x \to \infty`. See [dlmf]_ for more details.
Parameters
----------
a, b : array_like
Real-valued parameters
x : array_like
Real-valued argument
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of `U`
References
----------
.. [dlmf] NIST Digital Library of Mathematics Functions
https://dlmf.nist.gov/13.2#E6
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It has a branch cut along the negative `x` axis.
>>> x = np.linspace(-0.1, -10, 5)
>>> sc.hyperu(1, 1, x)
array([nan, nan, nan, nan, nan])
It approaches zero as `x` goes to infinity.
>>> x = np.array([1, 10, 100])
>>> sc.hyperu(1, 1, x)
array([0.59634736, 0.09156333, 0.00990194])
It satisfies Kummer's transformation.
>>> a, b, x = 2, 1, 1
>>> sc.hyperu(a, b, x)
0.1926947246463881
>>> x**(1 - b) * sc.hyperu(a - b + 1, 2 - b, x)
0.1926947246463881
""")
add_newdoc("i0",
r"""
i0(x, out=None)
Modified Bessel function of order 0.
Defined as,
.. math::
I_0(x) = \sum_{k=0}^\infty \frac{(x^2/4)^k}{(k!)^2} = J_0(\imath x),
where :math:`J_0` is the Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
Value of the modified Bessel function of order 0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i0`.
See also
--------
iv: Modified Bessel function of any order
i0e: Exponentially scaled modified Bessel function of order 0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import i0
>>> i0(1.)
1.2660658777520082
Calculate at several points:
>>> import numpy as np
>>> i0(np.array([-2., 0., 3.5]))
array([2.2795853 , 1. , 7.37820343])
Plot the function from -10 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> y = i0(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("i0e",
"""
i0e(x, out=None)
Exponentially scaled modified Bessel function of order 0.
Defined as::
i0e(x) = exp(-abs(x)) * i0(x).
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
Value of the exponentially scaled modified Bessel function of order 0
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i0`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i0e`. `i0e`
is useful for large arguments `x`: for these, `i0` quickly overflows.
See also
--------
iv: Modified Bessel function of the first kind
i0: Modified Bessel function of order 0
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
In the following example `i0` returns infinity whereas `i0e` still returns
a finite number.
>>> from scipy.special import i0, i0e
>>> i0(1000.), i0e(1000.)
(inf, 0.012617240455891257)
Calculate the function at several points by providing a NumPy array or
list for `x`:
>>> import numpy as np
>>> i0e(np.array([-2., 0., 3.]))
array([0.30850832, 1. , 0.24300035])
Plot the function from -10 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> y = i0e(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("i1",
r"""
i1(x, out=None)
Modified Bessel function of order 1.
Defined as,
.. math::
I_1(x) = \frac{1}{2}x \sum_{k=0}^\infty \frac{(x^2/4)^k}{k! (k + 1)!}
= -\imath J_1(\imath x),
where :math:`J_1` is the Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
Value of the modified Bessel function of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `i1`.
See also
--------
iv: Modified Bessel function of the first kind
i1e: Exponentially scaled modified Bessel function of order 1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import i1
>>> i1(1.)
0.5651591039924851
Calculate the function at several points:
>>> import numpy as np
>>> i1(np.array([-2., 0., 6.]))
array([-1.59063685, 0. , 61.34193678])
Plot the function between -10 and 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> y = i1(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("i1e",
"""
i1e(x, out=None)
Exponentially scaled modified Bessel function of order 1.
Defined as::
i1e(x) = exp(-abs(x)) * i1(x)
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
Value of the exponentially scaled modified Bessel function of order 1
at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 8] and (8, infinity).
Chebyshev polynomial expansions are employed in each interval. The
polynomial expansions used are the same as those in `i1`, but
they are not multiplied by the dominant exponential factor.
This function is a wrapper for the Cephes [1]_ routine `i1e`. `i1e`
is useful for large arguments `x`: for these, `i1` quickly overflows.
See also
--------
iv: Modified Bessel function of the first kind
i1: Modified Bessel function of order 1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
In the following example `i1` returns infinity whereas `i1e` still returns
a finite number.
>>> from scipy.special import i1, i1e
>>> i1(1000.), i1e(1000.)
(inf, 0.01261093025692863)
Calculate the function at several points by providing a NumPy array or
list for `x`:
>>> import numpy as np
>>> i1e(np.array([-2., 0., 6.]))
array([-0.21526929, 0. , 0.15205146])
Plot the function between -10 and 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> y = i1e(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("_igam_fac",
"""
Internal function, do not use.
""")
add_newdoc("it2i0k0",
r"""
it2i0k0(x, out=None)
Integrals related to modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x \frac{I_0(t) - 1}{t} dt \\
\int_x^\infty \frac{K_0(t)}{t} dt.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
References
----------
.. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
Wiley 1996
Examples
--------
Evaluate the functions at one point.
>>> from scipy.special import it2i0k0
>>> int_i, int_k = it2i0k0(1.)
>>> int_i, int_k
(0.12897944249456852, 0.2085182909001295)
Evaluate the functions at several points.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> int_i, int_k = it2i0k0(points)
>>> int_i, int_k
(array([0.03149527, 0.30187149, 1.50012461]),
array([0.66575102, 0.0823715 , 0.00823631]))
Plot the functions from 0 to 5.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 5., 1000)
>>> int_i, int_k = it2i0k0(x)
>>> ax.plot(x, int_i, label=r"$\int_0^x \frac{I_0(t)-1}{t}\,dt$")
>>> ax.plot(x, int_k, label=r"$\int_x^{\infty} \frac{K_0(t)}{t}\,dt$")
>>> ax.legend()
>>> ax.set_ylim(0, 10)
>>> plt.show()
""")
add_newdoc("it2j0y0",
r"""
it2j0y0(x, out=None)
Integrals related to Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x \frac{1 - J_0(t)}{t} dt \\
\int_x^\infty \frac{Y_0(t)}{t} dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral for `j0`
iy0 : scalar or ndarray
The integral for `y0`
References
----------
.. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
Wiley 1996
Examples
--------
Evaluate the functions at one point.
>>> from scipy.special import it2j0y0
>>> int_j, int_y = it2j0y0(1.)
>>> int_j, int_y
(0.12116524699506871, 0.39527290169929336)
Evaluate the functions at several points.
>>> import numpy as np
>>> points = np.array([0.5, 1.5, 3.])
>>> int_j, int_y = it2j0y0(points)
>>> int_j, int_y
(array([0.03100699, 0.26227724, 0.85614669]),
array([ 0.26968854, 0.29769696, -0.02987272]))
Plot the functions from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> int_j, int_y = it2j0y0(x)
>>> ax.plot(x, int_j, label=r"$\int_0^x \frac{1-J_0(t)}{t}\,dt$")
>>> ax.plot(x, int_y, label=r"$\int_x^{\infty} \frac{Y_0(t)}{t}\,dt$")
>>> ax.legend()
>>> ax.set_ylim(-2.5, 2.5)
>>> plt.show()
""")
add_newdoc("it2struve0",
r"""
it2struve0(x, out=None)
Integral related to the Struve function of order 0.
Returns the integral,
.. math::
\int_x^\infty \frac{H_0(t)}{t}\,dt
where :math:`H_0` is the Struve function of order 0.
Parameters
----------
x : array_like
Lower limit of integration.
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
The value of the integral.
See also
--------
struve
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
Evaluate the function at one point.
>>> import numpy as np
>>> from scipy.special import it2struve0
>>> it2struve0(1.)
0.9571973506383524
Evaluate the function at several points by supplying
an array for `x`.
>>> points = np.array([1., 2., 3.5])
>>> it2struve0(points)
array([0.95719735, 0.46909296, 0.10366042])
Plot the function from -10 to 10.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10., 10., 1000)
>>> it2struve0_values = it2struve0(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, it2struve0_values)
>>> ax.set_xlabel(r'$x$')
>>> ax.set_ylabel(r'$\int_x^{\infty}\frac{H_0(t)}{t}\,dt$')
>>> plt.show()
""")
add_newdoc(
"itairy",
r"""
itairy(x, out=None)
Integrals of Airy functions
Calculates the integrals of Airy functions from 0 to `x`.
Parameters
----------
x : array_like
Upper limit of integration (float).
out : tuple of ndarray, optional
Optional output arrays for the function values
Returns
-------
Apt : scalar or ndarray
Integral of Ai(t) from 0 to x.
Bpt : scalar or ndarray
Integral of Bi(t) from 0 to x.
Ant : scalar or ndarray
Integral of Ai(-t) from 0 to x.
Bnt : scalar or ndarray
Integral of Bi(-t) from 0 to x.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
Compute the functions at ``x=1.``.
>>> import numpy as np
>>> from scipy.special import itairy
>>> import matplotlib.pyplot as plt
>>> apt, bpt, ant, bnt = itairy(1.)
>>> apt, bpt, ant, bnt
(0.23631734191710949,
0.8727691167380077,
0.46567398346706845,
0.3730050096342943)
Compute the functions at several points by providing a NumPy array for `x`.
>>> x = np.array([1., 1.5, 2.5, 5])
>>> apt, bpt, ant, bnt = itairy(x)
>>> apt, bpt, ant, bnt
(array([0.23631734, 0.28678675, 0.324638 , 0.33328759]),
array([ 0.87276912, 1.62470809, 5.20906691, 321.47831857]),
array([0.46567398, 0.72232876, 0.93187776, 0.7178822 ]),
array([ 0.37300501, 0.35038814, -0.02812939, 0.15873094]))
Plot the functions from -10 to 10.
>>> x = np.linspace(-10, 10, 500)
>>> apt, bpt, ant, bnt = itairy(x)
>>> fig, ax = plt.subplots(figsize=(6, 5))
>>> ax.plot(x, apt, label="$\int_0^x\, Ai(t)\, dt$")
>>> ax.plot(x, bpt, ls="dashed", label="$\int_0^x\, Bi(t)\, dt$")
>>> ax.plot(x, ant, ls="dashdot", label="$\int_0^x\, Ai(-t)\, dt$")
>>> ax.plot(x, bnt, ls="dotted", label="$\int_0^x\, Bi(-t)\, dt$")
>>> ax.set_ylim(-2, 1.5)
>>> ax.legend(loc="lower right")
>>> plt.show()
""")
add_newdoc("iti0k0",
r"""
iti0k0(x, out=None)
Integrals of modified Bessel functions of order 0.
Computes the integrals
.. math::
\int_0^x I_0(t) dt \\
\int_0^x K_0(t) dt.
For more on :math:`I_0` and :math:`K_0` see `i0` and `k0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ii0 : scalar or ndarray
The integral for `i0`
ik0 : scalar or ndarray
The integral for `k0`
References
----------
.. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
Wiley 1996
Examples
--------
Evaluate the functions at one point.
>>> from scipy.special import iti0k0
>>> int_i, int_k = iti0k0(1.)
>>> int_i, int_k
(1.0865210970235892, 1.2425098486237771)
Evaluate the functions at several points.
>>> import numpy as np
>>> points = np.array([0., 1.5, 3.])
>>> int_i, int_k = iti0k0(points)
>>> int_i, int_k
(array([0. , 1.80606937, 6.16096149]),
array([0. , 1.39458246, 1.53994809]))
Plot the functions from 0 to 5.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 5., 1000)
>>> int_i, int_k = iti0k0(x)
>>> ax.plot(x, int_i, label="$\int_0^x I_0(t)\,dt$")
>>> ax.plot(x, int_k, label="$\int_0^x K_0(t)\,dt$")
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("itj0y0",
r"""
itj0y0(x, out=None)
Integrals of Bessel functions of the first kind of order 0.
Computes the integrals
.. math::
\int_0^x J_0(t) dt \\
\int_0^x Y_0(t) dt.
For more on :math:`J_0` and :math:`Y_0` see `j0` and `y0`.
Parameters
----------
x : array_like
Values at which to evaluate the integrals.
out : tuple of ndarrays, optional
Optional output arrays for the function results.
Returns
-------
ij0 : scalar or ndarray
The integral of `j0`
iy0 : scalar or ndarray
The integral of `y0`
References
----------
.. [1] S. Zhang and J.M. Jin, "Computation of Special Functions",
Wiley 1996
Examples
--------
Evaluate the functions at one point.
>>> from scipy.special import itj0y0
>>> int_j, int_y = itj0y0(1.)
>>> int_j, int_y
(0.9197304100897596, -0.637069376607422)
Evaluate the functions at several points.
>>> import numpy as np
>>> points = np.array([0., 1.5, 3.])
>>> int_j, int_y = itj0y0(points)
>>> int_j, int_y
(array([0. , 1.24144951, 1.38756725]),
array([ 0. , -0.51175903, 0.19765826]))
Plot the functions from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> int_j, int_y = itj0y0(x)
>>> ax.plot(x, int_j, label="$\int_0^x J_0(t)\,dt$")
>>> ax.plot(x, int_y, label="$\int_0^x Y_0(t)\,dt$")
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("itmodstruve0",
r"""
itmodstruve0(x, out=None)
Integral of the modified Struve function of order 0.
.. math::
I = \int_0^x L_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
The integral of :math:`L_0` from 0 to `x`.
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
See Also
--------
modstruve: Modified Struve function which is integrated by this function
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
Evaluate the function at one point.
>>> import numpy as np
>>> from scipy.special import itmodstruve0
>>> itmodstruve0(1.)
0.3364726286440384
Evaluate the function at several points by supplying
an array for `x`.
>>> points = np.array([1., 2., 3.5])
>>> itmodstruve0(points)
array([0.33647263, 1.588285 , 7.60382578])
Plot the function from -10 to 10.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-10., 10., 1000)
>>> itmodstruve0_values = itmodstruve0(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, itmodstruve0_values)
>>> ax.set_xlabel(r'$x$')
>>> ax.set_ylabel(r'$\int_0^xL_0(t)\,dt$')
>>> plt.show()
""")
add_newdoc("itstruve0",
r"""
itstruve0(x, out=None)
Integral of the Struve function of order 0.
.. math::
I = \int_0^x H_0(t)\,dt
Parameters
----------
x : array_like
Upper limit of integration (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
I : scalar or ndarray
The integral of :math:`H_0` from 0 to `x`.
See also
--------
struve: Function which is integrated by this function
Notes
-----
Wrapper for a Fortran routine created by Shanjie Zhang and Jianming
Jin [1]_.
References
----------
.. [1] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
Examples
--------
Evaluate the function at one point.
>>> import numpy as np
>>> from scipy.special import itstruve0
>>> itstruve0(1.)
0.30109042670805547
Evaluate the function at several points by supplying
an array for `x`.
>>> points = np.array([1., 2., 3.5])
>>> itstruve0(points)
array([0.30109043, 1.01870116, 1.96804581])
Plot the function from -20 to 20.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-20., 20., 1000)
>>> istruve0_values = itstruve0(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, istruve0_values)
>>> ax.set_xlabel(r'$x$')
>>> ax.set_ylabel(r'$\int_0^{x}H_0(t)\,dt$')
>>> plt.show()
""")
add_newdoc("iv",
r"""
iv(v, z, out=None)
Modified Bessel function of the first kind of real order.
Parameters
----------
v : array_like
Order. If `z` is of real type and negative, `v` must be integer
valued.
z : array_like of float or complex
Argument.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the modified Bessel function.
Notes
-----
For real `z` and :math:`v \in [-50, 50]`, the evaluation is carried out
using Temme's method [1]_. For larger orders, uniform asymptotic
expansions are applied.
For complex `z` and positive `v`, the AMOS [2]_ `zbesi` routine is
called. It uses a power series for small `z`, the asymptotic expansion
for large `abs(z)`, the Miller algorithm normalized by the Wronskian
and a Neumann series for intermediate magnitudes, and the uniform
asymptotic expansions for :math:`I_v(z)` and :math:`J_v(z)` for large
orders. Backward recurrence is used to generate sequences or reduce
orders when necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
See also
--------
ive : This function with leading exponential behavior stripped off.
i0 : Faster version of this function for order 0.
i1 : Faster version of this function for order 1.
References
----------
.. [1] Temme, Journal of Computational Physics, vol 21, 343 (1976)
.. [2] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Evaluate the function of order 0 at one point.
>>> from scipy.special import iv
>>> iv(0, 1.)
1.2660658777520084
Evaluate the function at one point for different orders.
>>> iv(0, 1.), iv(1, 1.), iv(1.5, 1.)
(1.2660658777520084, 0.565159103992485, 0.2935253263474798)
The evaluation for different orders can be carried out in one call by
providing a list or NumPy array as argument for the `v` parameter:
>>> iv([0, 1, 1.5], 1.)
array([1.26606588, 0.5651591 , 0.29352533])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> import numpy as np
>>> points = np.array([-2., 0., 3.])
>>> iv(0, points)
array([2.2795853 , 1. , 4.88079259])
If `z` is an array, the order parameter `v` must be broadcastable to
the correct shape if different orders shall be computed in one call.
To calculate the orders 0 and 1 for an 1D array:
>>> orders = np.array([[0], [1]])
>>> orders.shape
(2, 1)
>>> iv(orders, points)
array([[ 2.2795853 , 1. , 4.88079259],
[-1.59063685, 0. , 3.95337022]])
Plot the functions of order 0 to 3 from -5 to 5.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-5., 5., 1000)
>>> for i in range(4):
... ax.plot(x, iv(i, x), label=f'$I_{i!r}$')
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("ive",
r"""
ive(v, z, out=None)
Exponentially scaled modified Bessel function of the first kind.
Defined as::
ive(v, z) = iv(v, z) * exp(-abs(z.real))
For imaginary numbers without a real part, returns the unscaled
Bessel function of the first kind `iv`.
Parameters
----------
v : array_like of float
Order.
z : array_like of float or complex
Argument.
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
Values of the exponentially scaled modified Bessel function.
Notes
-----
For positive `v`, the AMOS [1]_ `zbesi` routine is called. It uses a
power series for small `z`, the asymptotic expansion for large
`abs(z)`, the Miller algorithm normalized by the Wronskian and a
Neumann series for intermediate magnitudes, and the uniform asymptotic
expansions for :math:`I_v(z)` and :math:`J_v(z)` for large orders.
Backward recurrence is used to generate sequences or reduce orders when
necessary.
The calculations above are done in the right half plane and continued
into the left half plane by the formula,
.. math:: I_v(z \exp(\pm\imath\pi)) = \exp(\pm\pi v) I_v(z)
(valid when the real part of `z` is positive). For negative `v`, the
formula
.. math:: I_{-v}(z) = I_v(z) + \frac{2}{\pi} \sin(\pi v) K_v(z)
is used, where :math:`K_v(z)` is the modified Bessel function of the
second kind, evaluated using the AMOS routine `zbesk`.
`ive` is useful for large arguments `z`: for these, `iv` easily overflows,
while `ive` does not due to the exponential scaling.
See also
--------
iv: Modified Bessel function of the first kind
i0e: Faster implementation of this function for order 0
i1e: Faster implementation of this function for order 1
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
In the following example `iv` returns infinity whereas `ive` still returns
a finite number.
>>> from scipy.special import iv, ive
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> iv(3, 1000.), ive(3, 1000.)
(inf, 0.01256056218254712)
Evaluate the function at one point for different orders by
providing a list or NumPy array as argument for the `v` parameter:
>>> ive([0, 1, 1.5], 1.)
array([0.46575961, 0.20791042, 0.10798193])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> points = np.array([-2., 0., 3.])
>>> ive(0, points)
array([0.30850832, 1. , 0.24300035])
Evaluate the function at several points for different orders by
providing arrays for both `v` for `z`. Both arrays have to be
broadcastable to the correct shape. To calculate the orders 0, 1
and 2 for a 1D array of points:
>>> ive([[0], [1], [2]], points)
array([[ 0.30850832, 1. , 0.24300035],
[-0.21526929, 0. , 0.19682671],
[ 0.09323903, 0. , 0.11178255]])
Plot the functions of order 0 to 3 from -5 to 5.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-5., 5., 1000)
>>> for i in range(4):
... ax.plot(x, ive(i, x), label=f'$I_{i!r}(z)\cdot e^{{-|z|}}$')
>>> ax.legend()
>>> ax.set_xlabel(r"$z$")
>>> plt.show()
""")
add_newdoc("j0",
r"""
j0(x, out=None)
Bessel function of the first kind of order 0.
Parameters
----------
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
J : scalar or ndarray
Value of the Bessel function of the first kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval the following rational approximation is used:
.. math::
J_0(x) \approx (w - r_1^2)(w - r_2^2) \frac{P_3(w)}{Q_8(w)},
where :math:`w = x^2` and :math:`r_1`, :math:`r_2` are the zeros of
:math:`J_0`, and :math:`P_3` and :math:`Q_8` are polynomials of degrees 3
and 8, respectively.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `j0`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv : Bessel function of real order and complex argument.
spherical_jn : spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import j0
>>> j0(1.)
0.7651976865579665
Calculate the function at several points:
>>> import numpy as np
>>> j0(np.array([-2., 0., 4.]))
array([ 0.22389078, 1. , -0.39714981])
Plot the function from -20 to 20.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-20., 20., 1000)
>>> y = j0(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("j1",
"""
j1(x, out=None)
Bessel function of the first kind of order 1.
Parameters
----------
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
J : scalar or ndarray
Value of the Bessel function of the first kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 24 term Chebyshev expansion is used. In the second, the
asymptotic trigonometric representation is employed using two rational
functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `j1`.
It should not be confused with the spherical Bessel functions (see
`spherical_jn`).
See also
--------
jv: Bessel function of the first kind
spherical_jn: spherical Bessel functions.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import j1
>>> j1(1.)
0.44005058574493355
Calculate the function at several points:
>>> import numpy as np
>>> j1(np.array([-2., 0., 4.]))
array([-0.57672481, 0. , -0.06604333])
Plot the function from -20 to 20.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-20., 20., 1000)
>>> y = j1(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("jn",
"""
jn(n, x, out=None)
Bessel function of the first kind of integer order and real argument.
Parameters
----------
n : array_like
order of the Bessel function
x : array_like
argument of the Bessel function
out : ndarray, optional
Optional output array for the function values
Returns
-------
scalar or ndarray
The value of the bessel function
See also
--------
jv
spherical_jn : spherical Bessel functions.
Notes
-----
`jn` is an alias of `jv`.
Not to be confused with the spherical Bessel functions (see
`spherical_jn`).
""")
add_newdoc("jv",
r"""
jv(v, z, out=None)
Bessel function of the first kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
J : scalar or ndarray
Value of the Bessel function, :math:`J_v(z)`.
See also
--------
jve : :math:`J_v` with leading exponential behavior stripped off.
spherical_jn : spherical Bessel functions.
j0 : faster version of this function for order 0.
j1 : faster version of this function for order 1.
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Not to be confused with the spherical Bessel functions (see `spherical_jn`).
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Evaluate the function of order 0 at one point.
>>> from scipy.special import jv
>>> jv(0, 1.)
0.7651976865579666
Evaluate the function at one point for different orders.
>>> jv(0, 1.), jv(1, 1.), jv(1.5, 1.)
(0.7651976865579666, 0.44005058574493355, 0.24029783912342725)
The evaluation for different orders can be carried out in one call by
providing a list or NumPy array as argument for the `v` parameter:
>>> jv([0, 1, 1.5], 1.)
array([0.76519769, 0.44005059, 0.24029784])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> import numpy as np
>>> points = np.array([-2., 0., 3.])
>>> jv(0, points)
array([ 0.22389078, 1. , -0.26005195])
If `z` is an array, the order parameter `v` must be broadcastable to
the correct shape if different orders shall be computed in one call.
To calculate the orders 0 and 1 for an 1D array:
>>> orders = np.array([[0], [1]])
>>> orders.shape
(2, 1)
>>> jv(orders, points)
array([[ 0.22389078, 1. , -0.26005195],
[-0.57672481, 0. , 0.33905896]])
Plot the functions of order 0 to 3 from -10 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> for i in range(4):
... ax.plot(x, jv(i, x), label=f'$J_{i!r}$')
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("jve",
r"""
jve(v, z, out=None)
Exponentially scaled Bessel function of the first kind of order `v`.
Defined as::
jve(v, z) = jv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function values
Returns
-------
J : scalar or ndarray
Value of the exponentially scaled Bessel function.
See also
--------
jv: Unscaled Bessel function of the first kind
Notes
-----
For positive `v` values, the computation is carried out using the AMOS
[1]_ `zbesj` routine, which exploits the connection to the modified
Bessel function :math:`I_v`,
.. math::
J_v(z) = \exp(v\pi\imath/2) I_v(-\imath z)\qquad (\Im z > 0)
J_v(z) = \exp(-v\pi\imath/2) I_v(\imath z)\qquad (\Im z < 0)
For negative `v` values the formula,
.. math:: J_{-v}(z) = J_v(z) \cos(\pi v) - Y_v(z) \sin(\pi v)
is used, where :math:`Y_v(z)` is the Bessel function of the second
kind, computed using the AMOS routine `zbesy`. Note that the second
term is exactly zero for integer `v`; to improve accuracy the second
term is explicitly omitted for `v` values such that `v = floor(v)`.
Exponentially scaled Bessel functions are useful for large arguments `z`:
for these, the unscaled Bessel functions can easily under-or overflow.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compare the output of `jv` and `jve` for large complex arguments for `z`
by computing their values for order ``v=1`` at ``z=1000j``. We see that
`jv` overflows but `jve` returns a finite number:
>>> import numpy as np
>>> from scipy.special import jv, jve
>>> v = 1
>>> z = 1000j
>>> jv(v, z), jve(v, z)
((inf+infj), (7.721967686709077e-19+0.012610930256928629j))
For real arguments for `z`, `jve` returns the same as `jv`.
>>> v, z = 1, 1000
>>> jv(v, z), jve(v, z)
(0.004728311907089523, 0.004728311907089523)
The function can be evaluated for several orders at the same time by
providing a list or NumPy array for `v`:
>>> jve([1, 3, 5], 1j)
array([1.27304208e-17+2.07910415e-01j, -4.99352086e-19-8.15530777e-03j,
6.11480940e-21+9.98657141e-05j])
In the same way, the function can be evaluated at several points in one
call by providing a list or NumPy array for `z`:
>>> jve(1, np.array([1j, 2j, 3j]))
array([1.27308412e-17+0.20791042j, 1.31814423e-17+0.21526929j,
1.20521602e-17+0.19682671j])
It is also possible to evaluate several orders at several points
at the same time by providing arrays for `v` and `z` with
compatible shapes for broadcasting. Compute `jve` for two different orders
`v` and three points `z` resulting in a 2x3 array.
>>> v = np.array([[1], [3]])
>>> z = np.array([1j, 2j, 3j])
>>> v.shape, z.shape
((2, 1), (3,))
>>> jve(v, z)
array([[1.27304208e-17+0.20791042j, 1.31810070e-17+0.21526929j,
1.20517622e-17+0.19682671j],
[-4.99352086e-19-0.00815531j, -1.76289571e-18-0.02879122j,
-2.92578784e-18-0.04778332j]])
""")
add_newdoc("k0",
r"""
k0(x, out=None)
Modified Bessel function of the second kind of order 0, :math:`K_0`.
This function is also sometimes referred to as the modified Bessel
function of the third kind of order 0.
Parameters
----------
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the modified Bessel function :math:`K_0` at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0`.
See also
--------
kv: Modified Bessel function of the second kind of any order
k0e: Exponentially scaled modified Bessel function of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import k0
>>> k0(1.)
0.42102443824070823
Calculate the function at several points:
>>> import numpy as np
>>> k0(np.array([0.5, 2., 3.]))
array([0.92441907, 0.11389387, 0.0347395 ])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = k0(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("k0e",
"""
k0e(x, out=None)
Exponentially scaled modified Bessel function K of order 0
Defined as::
k0e(x) = exp(x) * k0(x).
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the exponentially scaled modified Bessel function K of order
0 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k0e`. `k0e` is
useful for large arguments: for these, `k0` easily underflows.
See also
--------
kv: Modified Bessel function of the second kind of any order
k0: Modified Bessel function of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
In the following example `k0` returns 0 whereas `k0e` still returns a
useful finite number:
>>> from scipy.special import k0, k0e
>>> k0(1000.), k0e(1000)
(0., 0.03962832160075422)
Calculate the function at several points by providing a NumPy array or
list for `x`:
>>> import numpy as np
>>> k0e(np.array([0.5, 2., 3.]))
array([1.52410939, 0.84156822, 0.6977616 ])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = k0e(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("k1",
"""
k1(x, out=None)
Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the modified Bessel function K of order 1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1`.
See also
--------
kv: Modified Bessel function of the second kind of any order
k1e: Exponentially scaled modified Bessel function K of order 1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import k1
>>> k1(1.)
0.6019072301972346
Calculate the function at several points:
>>> import numpy as np
>>> k1(np.array([0.5, 2., 3.]))
array([1.65644112, 0.13986588, 0.04015643])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = k1(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("k1e",
"""
k1e(x, out=None)
Exponentially scaled modified Bessel function K of order 1
Defined as::
k1e(x) = exp(x) * k1(x)
Parameters
----------
x : array_like
Argument (float)
out : ndarray, optional
Optional output array for the function values
Returns
-------
K : scalar or ndarray
Value of the exponentially scaled modified Bessel function K of order
1 at `x`.
Notes
-----
The range is partitioned into the two intervals [0, 2] and (2, infinity).
Chebyshev polynomial expansions are employed in each interval.
This function is a wrapper for the Cephes [1]_ routine `k1e`.
See also
--------
kv: Modified Bessel function of the second kind of any order
k1: Modified Bessel function of the second kind of order 1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
In the following example `k1` returns 0 whereas `k1e` still returns a
useful floating point number.
>>> from scipy.special import k1, k1e
>>> k1(1000.), k1e(1000.)
(0., 0.03964813081296021)
Calculate the function at several points by providing a NumPy array or
list for `x`:
>>> import numpy as np
>>> k1e(np.array([0.5, 2., 3.]))
array([2.73100971, 1.03347685, 0.80656348])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = k1e(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("kei",
r"""
kei(x, out=None)
Kelvin function kei.
Defined as
.. math::
\mathrm{kei}(x) = \Im[K_0(x e^{\pi i / 4})]
where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
ker : the corresponding real part
keip : the derivative of kei
kv : modified Bessel function of the second kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import numpy as np
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).imag
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
>>> sc.kei(x)
array([-0.49499464, -0.20240007, -0.05112188, 0.0021984 ])
""")
add_newdoc("keip",
r"""
keip(x, out=None)
Derivative of the Kelvin function kei.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The values of the derivative of kei.
See Also
--------
kei
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kelvin",
"""
kelvin(x, out=None)
Kelvin functions as complex numbers
Parameters
----------
x : array_like
Argument
out : tuple of ndarray, optional
Optional output arrays for the function values
Returns
-------
Be, Ke, Bep, Kep : 4-tuple of scalar or ndarray
The tuple (Be, Ke, Bep, Kep) contains complex numbers
representing the real and imaginary Kelvin functions and their
derivatives evaluated at `x`. For example, kelvin(x)[0].real =
ber x and kelvin(x)[0].imag = bei x with similar relationships
for ker and kei.
""")
add_newdoc("ker",
r"""
ker(x, out=None)
Kelvin function ker.
Defined as
.. math::
\mathrm{ker}(x) = \Re[K_0(x e^{\pi i / 4})]
Where :math:`K_0` is the modified Bessel function of the second
kind (see `kv`). See [dlmf]_ for more details.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the Kelvin function.
See Also
--------
kei : the corresponding imaginary part
kerp : the derivative of ker
kv : modified Bessel function of the second kind
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10.61
Examples
--------
It can be expressed using the modified Bessel function of the
second kind.
>>> import numpy as np
>>> import scipy.special as sc
>>> x = np.array([1.0, 2.0, 3.0, 4.0])
>>> sc.kv(0, x * np.exp(np.pi * 1j / 4)).real
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
>>> sc.ker(x)
array([ 0.28670621, -0.04166451, -0.06702923, -0.03617885])
""")
add_newdoc("kerp",
r"""
kerp(x, out=None)
Derivative of the Kelvin function ker.
Parameters
----------
x : array_like
Real argument.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the derivative of ker.
See Also
--------
ker
References
----------
.. [dlmf] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/10#PT5
""")
add_newdoc("kl_div",
r"""
kl_div(x, y, out=None)
Elementwise function for computing Kullback-Leibler divergence.
.. math::
\mathrm{kl\_div}(x, y) =
\begin{cases}
x \log(x / y) - x + y & x > 0, y > 0 \\
y & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Real arguments
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the Kullback-Liebler divergence.
See Also
--------
entr, rel_entr, scipy.stats.entropy
Notes
-----
.. versionadded:: 0.15.0
This function is non-negative and is jointly convex in `x` and `y`.
The origin of this function is in convex programming; see [1]_ for
details. This is why the function contains the extra :math:`-x
+ y` terms over what might be expected from the Kullback-Leibler
divergence. For a version of the function without the extra terms,
see `rel_entr`.
References
----------
.. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
Cambridge University Press, 2004.
:doi:`https://doi.org/10.1017/CBO9780511804441`
""")
add_newdoc("kn",
r"""
kn(n, x, out=None)
Modified Bessel function of the second kind of integer order `n`
Returns the modified Bessel function of the second kind for integer order
`n` at real `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions.
Parameters
----------
n : array_like of int
Order of Bessel functions (floats will truncate with a warning)
x : array_like of float
Argument at which to evaluate the Bessel functions
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Value of the Modified Bessel function of the second kind,
:math:`K_n(x)`.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : Same function, but accepts real order and complex argument
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
Plot the function of several orders for real input:
>>> import numpy as np
>>> from scipy.special import kn
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in range(6):
... plt.plot(x, kn(N, x), label='$K_{}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_n(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kn([4, 5, 6], 1)
array([ 44.23241585, 360.9605896 , 3653.83831186])
""")
add_newdoc("kolmogi",
"""
kolmogi(p, out=None)
Inverse Survival Function of Kolmogorov distribution
It is the inverse function to `kolmogorov`.
Returns y such that ``kolmogorov(y) == p``.
Parameters
----------
p : float array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value(s) of kolmogi(p)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogorov : The Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
>>> from scipy.special import kolmogi
>>> kolmogi([0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0])
array([ inf, 1.22384787, 1.01918472, 0.82757356, 0.67644769,
0.57117327, 0. ])
""")
add_newdoc("kolmogorov",
r"""
kolmogorov(y, out=None)
Complementary cumulative distribution (Survival Function) function of
Kolmogorov distribution.
Returns the complementary cumulative distribution function of
Kolmogorov's limiting distribution (``D_n*\sqrt(n)`` as n goes to infinity)
of a two-sided test for equality between an empirical and a theoretical
distribution. It is equal to the (limit as n->infinity of the)
probability that ``sqrt(n) * max absolute deviation > y``.
Parameters
----------
y : float array_like
Absolute deviation between the Empirical CDF (ECDF) and the target CDF,
multiplied by sqrt(n).
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value(s) of kolmogorov(y)
Notes
-----
`kolmogorov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.kstwobign` distribution.
See Also
--------
kolmogi : The Inverse Survival Function for the distribution
scipy.stats.kstwobign : Provides the functionality as a continuous distribution
smirnov, smirnovi : Functions for the one-sided distribution
Examples
--------
Show the probability of a gap at least as big as 0, 0.5 and 1.0.
>>> import numpy as np
>>> from scipy.special import kolmogorov
>>> from scipy.stats import kstwobign
>>> kolmogorov([0, 0.5, 1.0])
array([ 1. , 0.96394524, 0.26999967])
Compare a sample of size 1000 drawn from a Laplace(0, 1) distribution against
the target distribution, a Normal(0, 1) distribution.
>>> from scipy.stats import norm, laplace
>>> rng = np.random.default_rng()
>>> n = 1000
>>> lap01 = laplace(0, 1)
>>> x = np.sort(lap01.rvs(n, random_state=rng))
>>> np.mean(x), np.std(x)
(-0.05841730131499543, 1.3968109101997568)
Construct the Empirical CDF and the K-S statistic Dn.
>>> target = norm(0,1) # Normal mean 0, stddev 1
>>> cdfs = target.cdf(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> gaps = np.column_stack([cdfs - ecdfs[:n], ecdfs[1:] - cdfs])
>>> Dn = np.max(gaps)
>>> Kn = np.sqrt(n) * Dn
>>> print('Dn=%f, sqrt(n)*Dn=%f' % (Dn, Kn))
Dn=0.043363, sqrt(n)*Dn=1.371265
>>> print(chr(10).join(['For a sample of size n drawn from a N(0, 1) distribution:',
... ' the approximate Kolmogorov probability that sqrt(n)*Dn>=%f is %f' % (Kn, kolmogorov(Kn)),
... ' the approximate Kolmogorov probability that sqrt(n)*Dn<=%f is %f' % (Kn, kstwobign.cdf(Kn))]))
For a sample of size n drawn from a N(0, 1) distribution:
the approximate Kolmogorov probability that sqrt(n)*Dn>=1.371265 is 0.046533
the approximate Kolmogorov probability that sqrt(n)*Dn<=1.371265 is 0.953467
Plot the Empirical CDF against the target N(0, 1) CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate([[-3], x]), ecdfs, where='post', label='Empirical CDF')
>>> x3 = np.linspace(-3, 3, 100)
>>> plt.plot(x3, target.cdf(x3), label='CDF for N(0, 1)')
>>> plt.ylim([0, 1]); plt.grid(True); plt.legend();
>>> # Add vertical lines marking Dn+ and Dn-
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r', linestyle='dashed', lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='r', linestyle='dashed', lw=4)
>>> plt.show()
""")
add_newdoc("_kolmogc",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogci",
r"""
Internal function, do not use.
""")
add_newdoc("_kolmogp",
r"""
Internal function, do not use.
""")
add_newdoc("kv",
r"""
kv(v, z, out=None)
Modified Bessel function of the second kind of real order `v`
Returns the modified Bessel function of the second kind for real order
`v` at complex `z`.
These are also sometimes called functions of the third kind, Basset
functions, or Macdonald functions. They are defined as those solutions
of the modified Bessel equation for which,
.. math::
K_v(x) \sim \sqrt{\pi/(2x)} \exp(-x)
as :math:`x \to \infty` [3]_.
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The results. Note that input must be of complex type to get complex
output, e.g. ``kv(3, -2+0j)`` instead of ``kv(3, -2)``.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kve : This function with leading exponential behavior stripped off.
kvp : Derivative of this function
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
.. [3] NIST Digital Library of Mathematical Functions,
Eq. 10.25.E3. https://dlmf.nist.gov/10.25.E3
Examples
--------
Plot the function of several orders for real input:
>>> import numpy as np
>>> from scipy.special import kv
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 5, 1000)
>>> for N in np.linspace(0, 6, 5):
... plt.plot(x, kv(N, x), label='$K_{{{}}}(x)$'.format(N))
>>> plt.ylim(0, 10)
>>> plt.legend()
>>> plt.title(r'Modified Bessel function of the second kind $K_\nu(x)$')
>>> plt.show()
Calculate for a single value at multiple orders:
>>> kv([4, 4.5, 5], 1+2j)
array([ 0.1992+2.3892j, 2.3493+3.6j , 7.2827+3.8104j])
""")
add_newdoc("kve",
r"""
kve(v, z, out=None)
Exponentially scaled modified Bessel function of the second kind.
Returns the exponentially scaled, modified Bessel function of the
second kind (sometimes called the third kind) for real order `v` at
complex `z`::
kve(v, z) = kv(v, z) * exp(z)
Parameters
----------
v : array_like of float
Order of Bessel functions
z : array_like of complex
Argument at which to evaluate the Bessel functions
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The exponentially scaled modified Bessel function of the second kind.
Notes
-----
Wrapper for AMOS [1]_ routine `zbesk`. For a discussion of the
algorithm used, see [2]_ and the references therein.
See Also
--------
kv : This function without exponential scaling.
k0e : Faster version of this function for order 0.
k1e : Faster version of this function for order 1.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
.. [2] Donald E. Amos, "Algorithm 644: A portable package for Bessel
functions of a complex argument and nonnegative order", ACM
TOMS Vol. 12 Issue 3, Sept. 1986, p. 265
Examples
--------
In the following example `kv` returns 0 whereas `kve` still returns
a useful finite number.
>>> import numpy as np
>>> from scipy.special import kv, kve
>>> import matplotlib.pyplot as plt
>>> kv(3, 1000.), kve(3, 1000.)
(0.0, 0.03980696128440973)
Evaluate the function at one point for different orders by
providing a list or NumPy array as argument for the `v` parameter:
>>> kve([0, 1, 1.5], 1.)
array([1.14446308, 1.63615349, 2.50662827])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> points = np.array([1., 3., 10.])
>>> kve(0, points)
array([1.14446308, 0.6977616 , 0.39163193])
Evaluate the function at several points for different orders by
providing arrays for both `v` for `z`. Both arrays have to be
broadcastable to the correct shape. To calculate the orders 0, 1
and 2 for a 1D array of points:
>>> kve([[0], [1], [2]], points)
array([[1.14446308, 0.6977616 , 0.39163193],
[1.63615349, 0.80656348, 0.41076657],
[4.41677005, 1.23547058, 0.47378525]])
Plot the functions of order 0 to 3 from 0 to 5.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 5., 1000)
>>> for i in range(4):
... ax.plot(x, kve(i, x), label=f'$K_{i!r}(z)\cdot e^z$')
>>> ax.legend()
>>> ax.set_xlabel(r"$z$")
>>> ax.set_ylim(0, 4)
>>> ax.set_xlim(0, 5)
>>> plt.show()
""")
add_newdoc("_lanczos_sum_expg_scaled",
"""
Internal function, do not use.
""")
add_newdoc("_lgam1p",
"""
Internal function, do not use.
""")
add_newdoc("log1p",
"""
log1p(x, out=None)
Calculates log(1 + x) for use when `x` is near zero.
Parameters
----------
x : array_like
Real or complex valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of ``log(1 + x)``.
See Also
--------
expm1, cosm1
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than using ``log(1 + x)`` directly for ``x``
near 0. Note that in the below example ``1 + 1e-17 == 1`` to
double precision.
>>> sc.log1p(1e-17)
1e-17
>>> np.log(1 + 1e-17)
0.0
""")
add_newdoc("_log1pmx",
"""
Internal function, do not use.
""")
add_newdoc('log_expit',
"""
log_expit(x, out=None)
Logarithm of the logistic sigmoid function.
The SciPy implementation of the logistic sigmoid function is
`scipy.special.expit`, so this function is called ``log_expit``.
The function is mathematically equivalent to ``log(expit(x))``, but
is formulated to avoid loss of precision for inputs with large
(positive or negative) magnitude.
Parameters
----------
x : array_like
The values to apply ``log_expit`` to element-wise.
out : ndarray, optional
Optional output array for the function results
Returns
-------
out : scalar or ndarray
The computed values, an ndarray of the same shape as ``x``.
See Also
--------
expit
Notes
-----
As a ufunc, ``log_expit`` takes a number of optional keyword arguments.
For more information see
`ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 1.8.0
Examples
--------
>>> import numpy as np
>>> from scipy.special import log_expit, expit
>>> log_expit([-3.0, 0.25, 2.5, 5.0])
array([-3.04858735, -0.57593942, -0.07888973, -0.00671535])
Large negative values:
>>> log_expit([-100, -500, -1000])
array([ -100., -500., -1000.])
Note that ``expit(-1000)`` returns 0, so the naive implementation
``log(expit(-1000))`` return ``-inf``.
Large positive values:
>>> log_expit([29, 120, 400])
array([-2.54366565e-013, -7.66764807e-053, -1.91516960e-174])
Compare that to the naive implementation:
>>> np.log(expit([29, 120, 400]))
array([-2.54463117e-13, 0.00000000e+00, 0.00000000e+00])
The first value is accurate to only 3 digits, and the larger inputs
lose all precision and return 0.
""")
add_newdoc('logit',
"""
logit(x, out=None)
Logit ufunc for ndarrays.
The logit function is defined as logit(p) = log(p/(1-p)).
Note that logit(0) = -inf, logit(1) = inf, and logit(p)
for p<0 or p>1 yields nan.
Parameters
----------
x : ndarray
The ndarray to apply logit to element-wise.
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
An ndarray of the same shape as x. Its entries
are logit of the corresponding entry of x.
See Also
--------
expit
Notes
-----
As a ufunc logit takes a number of optional
keyword arguments. For more information
see `ufuncs <https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_
.. versionadded:: 0.10.0
Examples
--------
>>> import numpy as np
>>> from scipy.special import logit, expit
>>> logit([0, 0.25, 0.5, 0.75, 1])
array([ -inf, -1.09861229, 0. , 1.09861229, inf])
`expit` is the inverse of `logit`:
>>> expit(logit([0.1, 0.75, 0.999]))
array([ 0.1 , 0.75 , 0.999])
Plot logit(x) for x in [0, 1]:
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0, 1, 501)
>>> y = logit(x)
>>> plt.plot(x, y)
>>> plt.grid()
>>> plt.ylim(-6, 6)
>>> plt.xlabel('x')
>>> plt.title('logit(x)')
>>> plt.show()
""")
add_newdoc("lpmv",
r"""
lpmv(m, v, x, out=None)
Associated Legendre function of integer order and real degree.
Defined as
.. math::
P_v^m = (-1)^m (1 - x^2)^{m/2} \frac{d^m}{dx^m} P_v(x)
where
.. math::
P_v = \sum_{k = 0}^\infty \frac{(-v)_k (v + 1)_k}{(k!)^2}
\left(\frac{1 - x}{2}\right)^k
is the Legendre function of the first kind. Here :math:`(\cdot)_k`
is the Pochhammer symbol; see `poch`.
Parameters
----------
m : array_like
Order (int or float). If passed a float not equal to an
integer the function returns NaN.
v : array_like
Degree (float).
x : array_like
Argument (float). Must have ``|x| <= 1``.
out : ndarray, optional
Optional output array for the function results
Returns
-------
pmv : scalar or ndarray
Value of the associated Legendre function.
See Also
--------
lpmn : Compute the associated Legendre function for all orders
``0, ..., m`` and degrees ``0, ..., n``.
clpmn : Compute the associated Legendre function at complex
arguments.
Notes
-----
Note that this implementation includes the Condon-Shortley phase.
References
----------
.. [1] Zhang, Jin, "Computation of Special Functions", John Wiley
and Sons, Inc, 1996.
""")
add_newdoc("mathieu_a",
"""
mathieu_a(m, q, out=None)
Characteristic value of even Mathieu functions
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Characteristic value for the even solution, ``ce_m(z, q)``, of
Mathieu's equation.
See Also
--------
mathieu_b, mathieu_cem, mathieu_sem
""")
add_newdoc("mathieu_b",
"""
mathieu_b(m, q, out=None)
Characteristic value of odd Mathieu functions
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Characteristic value for the odd solution, ``se_m(z, q)``, of Mathieu's
equation.
See Also
--------
mathieu_a, mathieu_cem, mathieu_sem
""")
add_newdoc("mathieu_cem",
"""
mathieu_cem(m, q, x, out=None)
Even Mathieu function and its derivative
Returns the even Mathieu function, ``ce_m(x, q)``, of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of ce_m(x, q)
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_a, mathieu_b, mathieu_sem
""")
add_newdoc("mathieu_modcem1",
"""
mathieu_modcem1(m, q, x, out=None)
Even modified Mathieu function of the first kind and its derivative
Evaluates the even modified Mathieu function of the first kind,
``Mc1m(x, q)``, and its derivative at `x` for order `m` and parameter
`q`.
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_modsem1
""")
add_newdoc("mathieu_modcem2",
"""
mathieu_modcem2(m, q, x, out=None)
Even modified Mathieu function of the second kind and its derivative
Evaluates the even modified Mathieu function of the second kind,
Mc2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_modsem2
""")
add_newdoc("mathieu_modsem1",
"""
mathieu_modsem1(m, q, x, out=None)
Odd modified Mathieu function of the first kind and its derivative
Evaluates the odd modified Mathieu function of the first kind,
Ms1m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter `q`.
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_modcem1
""")
add_newdoc("mathieu_modsem2",
"""
mathieu_modsem2(m, q, x, out=None)
Odd modified Mathieu function of the second kind and its derivative
Evaluates the odd modified Mathieu function of the second kind,
Ms2m(x, q), and its derivative at `x` (given in degrees) for order `m`
and parameter q.
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_modcem2
""")
add_newdoc(
"mathieu_sem",
"""
mathieu_sem(m, q, x, out=None)
Odd Mathieu function and its derivative
Returns the odd Mathieu function, se_m(x, q), of order `m` and
parameter `q` evaluated at `x` (given in degrees). Also returns the
derivative with respect to `x` of se_m(x, q).
Parameters
----------
m : array_like
Order of the function
q : array_like
Parameter of the function
x : array_like
Argument of the function, *given in degrees, not radians*.
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
y : scalar or ndarray
Value of the function
yp : scalar or ndarray
Value of the derivative vs x
See Also
--------
mathieu_a, mathieu_b, mathieu_cem
""")
add_newdoc("modfresnelm",
"""
modfresnelm(x, out=None)
Modified Fresnel negative integrals
Parameters
----------
x : array_like
Function argument
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
fm : scalar or ndarray
Integral ``F_-(x)``: ``integral(exp(-1j*t*t), t=x..inf)``
km : scalar or ndarray
Integral ``K_-(x)``: ``1/sqrt(pi)*exp(1j*(x*x+pi/4))*fp``
See Also
--------
modfresnelp
""")
add_newdoc("modfresnelp",
"""
modfresnelp(x, out=None)
Modified Fresnel positive integrals
Parameters
----------
x : array_like
Function argument
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
fp : scalar or ndarray
Integral ``F_+(x)``: ``integral(exp(1j*t*t), t=x..inf)``
kp : scalar or ndarray
Integral ``K_+(x)``: ``1/sqrt(pi)*exp(-1j*(x*x+pi/4))*fp``
See Also
--------
modfresnelm
""")
add_newdoc("modstruve",
r"""
modstruve(v, x, out=None)
Modified Struve function.
Return the value of the modified Struve function of order `v` at `x`. The
modified Struve function is defined as,
.. math::
L_v(x) = -\imath \exp(-\pi\imath v/2) H_v(\imath x),
where :math:`H_v` is the Struve function.
Parameters
----------
v : array_like
Order of the modified Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
out : ndarray, optional
Optional output array for the function results
Returns
-------
L : scalar or ndarray
Value of the modified Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the function:
- power series
- expansion in Bessel functions (if :math:`|x| < |v| + 20`)
- asymptotic large-x expansion (if :math:`x \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
struve
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
Examples
--------
Calculate the modified Struve function of order 1 at 2.
>>> import numpy as np
>>> from scipy.special import modstruve
>>> import matplotlib.pyplot as plt
>>> modstruve(1, 2.)
1.102759787367716
Calculate the modified Struve function at 2 for orders 1, 2 and 3 by
providing a list for the order parameter `v`.
>>> modstruve([1, 2, 3], 2.)
array([1.10275979, 0.41026079, 0.11247294])
Calculate the modified Struve function of order 1 for several points
by providing an array for `x`.
>>> points = np.array([2., 5., 8.])
>>> modstruve(1, points)
array([ 1.10275979, 23.72821578, 399.24709139])
Compute the modified Struve function for several orders at several
points by providing arrays for `v` and `z`. The arrays have to be
broadcastable to the correct shapes.
>>> orders = np.array([[1], [2], [3]])
>>> points.shape, orders.shape
((3,), (3, 1))
>>> modstruve(orders, points)
array([[1.10275979e+00, 2.37282158e+01, 3.99247091e+02],
[4.10260789e-01, 1.65535979e+01, 3.25973609e+02],
[1.12472937e-01, 9.42430454e+00, 2.33544042e+02]])
Plot the modified Struve functions of order 0 to 3 from -5 to 5.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-5., 5., 1000)
>>> for i in range(4):
... ax.plot(x, modstruve(i, x), label=f'$L_{i!r}$')
>>> ax.legend(ncol=2)
>>> ax.set_xlim(-5, 5)
>>> ax.set_title(r"Modified Struve functions $L_{\nu}$")
>>> plt.show()
""")
add_newdoc("nbdtr",
r"""
nbdtr(k, n, p, out=None)
Negative binomial cumulative distribution function.
Returns the sum of the terms 0 through `k` of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=0}^k {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that `k` or fewer failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
F : scalar or ndarray
The probability of `k` or fewer failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtrc : Negative binomial survival function
nbdtrik : Negative binomial quantile function
scipy.stats.nbinom : Negative binomial distribution
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtr}(k, n, p) = I_{p}(n, k + 1).
Wrapper for the Cephes [1]_ routine `nbdtr`.
The negative binomial distribution is also available as
`scipy.stats.nbinom`. Using `nbdtr` directly can improve performance
compared to the ``cdf`` method of `scipy.stats.nbinom` (see last example).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.
>>> import numpy as np
>>> from scipy.special import nbdtr
>>> nbdtr(10, 5, 0.5)
0.940765380859375
Compute the function for ``n=10`` and ``p=0.5`` at several points by
providing a NumPy array or list for `k`.
>>> nbdtr([5, 10, 15], 10, 0.5)
array([0.15087891, 0.58809853, 0.88523853])
Plot the function for four different parameter sets.
>>> import matplotlib.pyplot as plt
>>> k = np.arange(130)
>>> n_parameters = [20, 20, 20, 80]
>>> p_parameters = [0.2, 0.5, 0.8, 0.5]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(p_parameters, n_parameters,
... linestyles))
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> for parameter_set in parameters_list:
... p, n, style = parameter_set
... nbdtr_vals = nbdtr(k, n, p)
... ax.plot(k, nbdtr_vals, label=rf"$n={n},\, p={p}$",
... ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$k$")
>>> ax.set_title("Negative binomial cumulative distribution function")
>>> plt.show()
The negative binomial distribution is also available as
`scipy.stats.nbinom`. Using `nbdtr` directly can be much faster than
calling the ``cdf`` method of `scipy.stats.nbinom`, especially for small
arrays or individual values. To get the same results one must use the
following parametrization: ``nbinom(n, p).cdf(k)=nbdtr(k, n, p)``.
>>> from scipy.stats import nbinom
>>> k, n, p = 5, 3, 0.5
>>> nbdtr_res = nbdtr(k, n, p) # this will often be faster than below
>>> stats_res = nbinom(n, p).cdf(k)
>>> stats_res, nbdtr_res # test that results are equal
(0.85546875, 0.85546875)
`nbdtr` can evaluate different parameter sets by providing arrays with
shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute
the function for three different `k` at four locations `p`, resulting in
a 3x4 array.
>>> k = np.array([[5], [10], [15]])
>>> p = np.array([0.3, 0.5, 0.7, 0.9])
>>> k.shape, p.shape
((3, 1), (4,))
>>> nbdtr(k, 5, p)
array([[0.15026833, 0.62304687, 0.95265101, 0.9998531 ],
[0.48450894, 0.94076538, 0.99932777, 0.99999999],
[0.76249222, 0.99409103, 0.99999445, 1. ]])
""")
add_newdoc("nbdtrc",
r"""
nbdtrc(k, n, p, out=None)
Negative binomial survival function.
Returns the sum of the terms `k + 1` to infinity of the negative binomial
distribution probability mass function,
.. math::
F = \sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j.
In a sequence of Bernoulli trials with individual success probabilities
`p`, this is the probability that more than `k` failures precede the nth
success.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
F : scalar or ndarray
The probability of `k + 1` or more failures before `n` successes in a
sequence of events with individual success probability `p`.
See also
--------
nbdtr : Negative binomial cumulative distribution function
nbdtrik : Negative binomial percentile function
scipy.stats.nbinom : Negative binomial distribution
Notes
-----
If floating point values are passed for `k` or `n`, they will be truncated
to integers.
The terms are not summed directly; instead the regularized incomplete beta
function is employed, according to the formula,
.. math::
\mathrm{nbdtrc}(k, n, p) = I_{1 - p}(k + 1, n).
Wrapper for the Cephes [1]_ routine `nbdtrc`.
The negative binomial distribution is also available as
`scipy.stats.nbinom`. Using `nbdtrc` directly can improve performance
compared to the ``sf`` method of `scipy.stats.nbinom` (see last example).
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Compute the function for ``k=10`` and ``n=5`` at ``p=0.5``.
>>> import numpy as np
>>> from scipy.special import nbdtrc
>>> nbdtrc(10, 5, 0.5)
0.059234619140624986
Compute the function for ``n=10`` and ``p=0.5`` at several points by
providing a NumPy array or list for `k`.
>>> nbdtrc([5, 10, 15], 10, 0.5)
array([0.84912109, 0.41190147, 0.11476147])
Plot the function for four different parameter sets.
>>> import matplotlib.pyplot as plt
>>> k = np.arange(130)
>>> n_parameters = [20, 20, 20, 80]
>>> p_parameters = [0.2, 0.5, 0.8, 0.5]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(p_parameters, n_parameters,
... linestyles))
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> for parameter_set in parameters_list:
... p, n, style = parameter_set
... nbdtrc_vals = nbdtrc(k, n, p)
... ax.plot(k, nbdtrc_vals, label=rf"$n={n},\, p={p}$",
... ls=style)
>>> ax.legend()
>>> ax.set_xlabel("$k$")
>>> ax.set_title("Negative binomial distribution survival function")
>>> plt.show()
The negative binomial distribution is also available as
`scipy.stats.nbinom`. Using `nbdtrc` directly can be much faster than
calling the ``sf`` method of `scipy.stats.nbinom`, especially for small
arrays or individual values. To get the same results one must use the
following parametrization: ``nbinom(n, p).sf(k)=nbdtrc(k, n, p)``.
>>> from scipy.stats import nbinom
>>> k, n, p = 3, 5, 0.5
>>> nbdtr_res = nbdtrc(k, n, p) # this will often be faster than below
>>> stats_res = nbinom(n, p).sf(k)
>>> stats_res, nbdtr_res # test that results are equal
(0.6367187499999999, 0.6367187499999999)
`nbdtrc` can evaluate different parameter sets by providing arrays with
shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute
the function for three different `k` at four locations `p`, resulting in
a 3x4 array.
>>> k = np.array([[5], [10], [15]])
>>> p = np.array([0.3, 0.5, 0.7, 0.9])
>>> k.shape, p.shape
((3, 1), (4,))
>>> nbdtrc(k, 5, p)
array([[8.49731667e-01, 3.76953125e-01, 4.73489874e-02, 1.46902600e-04],
[5.15491059e-01, 5.92346191e-02, 6.72234070e-04, 9.29610100e-09],
[2.37507779e-01, 5.90896606e-03, 5.55025308e-06, 3.26346760e-13]])
""")
add_newdoc(
"nbdtri",
r"""
nbdtri(k, n, y, out=None)
Returns the inverse with respect to the parameter `p` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
n : array_like
The target number of successes (positive int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
p : scalar or ndarray
Probability of success in a single event (float) such that
`nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrc : Negative binomial survival function.
scipy.stats.nbinom : negative binomial distribution.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
scipy.stats.nbinom : Negative binomial distribution
Notes
-----
Wrapper for the Cephes [1]_ routine `nbdtri`.
The negative binomial distribution is also available as
`scipy.stats.nbinom`. Using `nbdtri` directly can improve performance
compared to the ``ppf`` method of `scipy.stats.nbinom`.
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
`nbdtri` is the inverse of `nbdtr` with respect to `p`.
Up to floating point errors the following holds:
``nbdtri(k, n, nbdtr(k, n, p))=p``.
>>> import numpy as np
>>> from scipy.special import nbdtri, nbdtr
>>> k, n, y = 5, 10, 0.2
>>> cdf_val = nbdtr(k, n, y)
>>> nbdtri(k, n, cdf_val)
0.20000000000000004
Compute the function for ``k=10`` and ``n=5`` at several points by
providing a NumPy array or list for `y`.
>>> y = np.array([0.1, 0.4, 0.8])
>>> nbdtri(3, 5, y)
array([0.34462319, 0.51653095, 0.69677416])
Plot the function for three different parameter sets.
>>> import matplotlib.pyplot as plt
>>> n_parameters = [5, 20, 30, 30]
>>> k_parameters = [20, 20, 60, 80]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(n_parameters, k_parameters, linestyles))
>>> cdf_vals = np.linspace(0, 1, 1000)
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> for parameter_set in parameters_list:
... n, k, style = parameter_set
... nbdtri_vals = nbdtri(k, n, cdf_vals)
... ax.plot(cdf_vals, nbdtri_vals, label=rf"$k={k},\ n={n}$",
... ls=style)
>>> ax.legend()
>>> ax.set_ylabel("$p$")
>>> ax.set_xlabel("$CDF$")
>>> title = "nbdtri: inverse of negative binomial CDF with respect to $p$"
>>> ax.set_title(title)
>>> plt.show()
`nbdtri` can evaluate different parameter sets by providing arrays with
shapes compatible for broadcasting for `k`, `n` and `p`. Here we compute
the function for three different `k` at four locations `p`, resulting in
a 3x4 array.
>>> k = np.array([[5], [10], [15]])
>>> y = np.array([0.3, 0.5, 0.7, 0.9])
>>> k.shape, y.shape
((3, 1), (4,))
>>> nbdtri(k, 5, y)
array([[0.37258157, 0.45169416, 0.53249956, 0.64578407],
[0.24588501, 0.30451981, 0.36778453, 0.46397088],
[0.18362101, 0.22966758, 0.28054743, 0.36066188]])
""")
add_newdoc("nbdtrik",
r"""
nbdtrik(y, n, p, out=None)
Negative binomial percentile function.
Returns the inverse with respect to the parameter `k` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
n : array_like
The target number of successes (positive int).
p : array_like
Probability of success in a single event (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
k : scalar or ndarray
The maximum number of allowed failures such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtrc : Survival function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrin : Inverse with respect to `n` of `nbdtr(k, n, p)`.
scipy.stats.nbinom : Negative binomial distribution
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `k` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `k`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Compute the negative binomial cumulative distribution function for an
exemplary parameter set.
>>> import numpy as np
>>> from scipy.special import nbdtr, nbdtrik
>>> k, n, p = 5, 2, 0.5
>>> cdf_value = nbdtr(k, n, p)
>>> cdf_value
0.9375
Verify that `nbdtrik` recovers the original value for `k`.
>>> nbdtrik(cdf_value, n, p)
5.0
Plot the function for different parameter sets.
>>> import matplotlib.pyplot as plt
>>> p_parameters = [0.2, 0.5, 0.7, 0.5]
>>> n_parameters = [30, 30, 30, 80]
>>> linestyles = ['solid', 'dashed', 'dotted', 'dashdot']
>>> parameters_list = list(zip(p_parameters, n_parameters, linestyles))
>>> cdf_vals = np.linspace(0, 1, 1000)
>>> fig, ax = plt.subplots(figsize=(8, 8))
>>> for parameter_set in parameters_list:
... p, n, style = parameter_set
... nbdtrik_vals = nbdtrik(cdf_vals, n, p)
... ax.plot(cdf_vals, nbdtrik_vals, label=rf"$n={n},\ p={p}$",
... ls=style)
>>> ax.legend()
>>> ax.set_ylabel("$k$")
>>> ax.set_xlabel("$CDF$")
>>> ax.set_title("Negative binomial percentile function")
>>> plt.show()
The negative binomial distribution is also available as
`scipy.stats.nbinom`. The percentile function method ``ppf``
returns the result of `nbdtrik` rounded up to integers:
>>> from scipy.stats import nbinom
>>> q, n, p = 0.6, 5, 0.5
>>> nbinom.ppf(q, n, p), nbdtrik(q, n, p)
(5.0, 4.800428460273882)
""")
add_newdoc("nbdtrin",
r"""
nbdtrin(k, y, p, out=None)
Inverse of `nbdtr` vs `n`.
Returns the inverse with respect to the parameter `n` of
`y = nbdtr(k, n, p)`, the negative binomial cumulative distribution
function.
Parameters
----------
k : array_like
The maximum number of allowed failures (nonnegative int).
y : array_like
The probability of `k` or fewer failures before `n` successes (float).
p : array_like
Probability of success in a single event (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
n : scalar or ndarray
The number of successes `n` such that `nbdtr(k, n, p) = y`.
See also
--------
nbdtr : Cumulative distribution function of the negative binomial.
nbdtri : Inverse with respect to `p` of `nbdtr(k, n, p)`.
nbdtrik : Inverse with respect to `k` of `nbdtr(k, n, p)`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdfnbn`.
Formula 26.5.26 of [2]_,
.. math::
\sum_{j=k + 1}^\infty {{n + j - 1}\choose{j}} p^n (1 - p)^j = I_{1 - p}(k + 1, n),
is used to reduce calculation of the cumulative distribution function to
that of a regularized incomplete beta :math:`I`.
Computation of `n` involves a search for a value that produces the desired
value of `y`. The search relies on the monotonicity of `y` with `n`.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
Compute the negative binomial cumulative distribution function for an
exemplary parameter set.
>>> from scipy.special import nbdtr, nbdtrin
>>> k, n, p = 5, 2, 0.5
>>> cdf_value = nbdtr(k, n, p)
>>> cdf_value
0.9375
Verify that `nbdtrin` recovers the original value for `n` up to floating
point accuracy.
>>> nbdtrin(k, cdf_value, p)
1.999999999998137
""")
add_newdoc("ncfdtr",
r"""
ncfdtr(dfn, dfd, nc, f, out=None)
Cumulative distribution function of the non-central F distribution.
The non-central F describes the distribution of,
.. math::
Z = \frac{X/d_n}{Y/d_d}
where :math:`X` and :math:`Y` are independently distributed, with
:math:`X` distributed non-central :math:`\chi^2` with noncentrality
parameter `nc` and :math:`d_n` degrees of freedom, and :math:`Y`
distributed :math:`\chi^2` with :math:`d_d` degrees of freedom.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e. the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
cdf : scalar or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise it will be an array.
See Also
--------
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
Wrapper for the CDFLIB [1]_ Fortran routine `cdffnc`.
The cumulative distribution function is computed using Formula 26.6.20 of
[2]_:
.. math::
F(d_n, d_d, n_c, f) = \sum_{j=0}^\infty e^{-n_c/2} \frac{(n_c/2)^j}{j!} I_{x}(\frac{d_n}{2} + j, \frac{d_d}{2}),
where :math:`I` is the regularized incomplete beta function, and
:math:`x = f d_n/(f d_n + d_d)`.
The computation time required for this routine is proportional to the
noncentrality parameter `nc`. Very large values of this parameter can
consume immense computer resources. This is why the search range is
bounded by 10,000.
References
----------
.. [1] Barry Brown, James Lovato, and Kathy Russell,
CDFLIB: Library of Fortran Routines for Cumulative Distribution
Functions, Inverses, and Other Parameters.
.. [2] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central F distribution, for nc=0. Compare with the
F-distribution from scipy.stats:
>>> x = np.linspace(-1, 8, num=500)
>>> dfn = 3
>>> dfd = 2
>>> ncf_stats = stats.f.cdf(x, dfn, dfd)
>>> ncf_special = special.ncfdtr(dfn, dfd, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, ncf_stats, 'b-', lw=3)
>>> ax.plot(x, ncf_special, 'r-')
>>> plt.show()
""")
add_newdoc("ncfdtri",
"""
ncfdtri(dfn, dfd, nc, p, out=None)
Inverse with respect to `f` of the CDF of the non-central F distribution.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
out : ndarray, optional
Optional output array for the function results
Returns
-------
f : scalar or ndarray
Quantiles, i.e., the upper limit of integration.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtri
Compute the CDF for several values of `f`:
>>> f = [0.5, 1, 1.5]
>>> p = ncfdtr(2, 3, 1.5, f)
>>> p
array([ 0.20782291, 0.36107392, 0.47345752])
Compute the inverse. We recover the values of `f`, as expected:
>>> ncfdtri(2, 3, 1.5, p)
array([ 0.5, 1. , 1.5])
""")
add_newdoc("ncfdtridfd",
"""
ncfdtridfd(dfn, p, nc, f, out=None)
Calculate degrees of freedom (denominator) for the noncentral F-distribution.
This is the inverse with respect to `dfd` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : array_like
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
dfd : scalar or ndarray
Degrees of freedom of the denominator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfd
Compute the CDF for several values of `dfd`:
>>> dfd = [1, 2, 3]
>>> p = ncfdtr(2, dfd, 0.25, 15)
>>> p
array([ 0.8097138 , 0.93020416, 0.96787852])
Compute the inverse. We recover the values of `dfd`, as expected:
>>> ncfdtridfd(2, p, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtridfn",
"""
ncfdtridfn(p, dfd, nc, f, out=None)
Calculate degrees of freedom (numerator) for the noncentral F-distribution.
This is the inverse with respect to `dfn` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (0, 1e4).
f : float
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
dfn : scalar or ndarray
Degrees of freedom of the numerator sum of squares.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtrinc : Inverse of `ncfdtr` with respect to `nc`.
Notes
-----
The value of the cumulative noncentral F distribution is not necessarily
monotone in either degrees of freedom. There thus may be two values that
provide a given CDF value. This routine assumes monotonicity and will
find an arbitrary one of the two values.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtridfn
Compute the CDF for several values of `dfn`:
>>> dfn = [1, 2, 3]
>>> p = ncfdtr(dfn, 2, 0.25, 15)
>>> p
array([ 0.92562363, 0.93020416, 0.93188394])
Compute the inverse. We recover the values of `dfn`, as expected:
>>> ncfdtridfn(p, 2, 0.25, 15)
array([ 1., 2., 3.])
""")
add_newdoc("ncfdtrinc",
"""
ncfdtrinc(dfn, dfd, p, f, out=None)
Calculate non-centrality parameter for non-central F distribution.
This is the inverse with respect to `nc` of `ncfdtr`.
See `ncfdtr` for more details.
Parameters
----------
dfn : array_like
Degrees of freedom of the numerator sum of squares. Range (0, inf).
dfd : array_like
Degrees of freedom of the denominator sum of squares. Range (0, inf).
p : array_like
Value of the cumulative distribution function. Must be in the
range [0, 1].
f : array_like
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
nc : scalar or ndarray
Noncentrality parameter.
See Also
--------
ncfdtr : CDF of the non-central F distribution.
ncfdtri : Quantile function; inverse of `ncfdtr` with respect to `f`.
ncfdtridfd : Inverse of `ncfdtr` with respect to `dfd`.
ncfdtridfn : Inverse of `ncfdtr` with respect to `dfn`.
Examples
--------
>>> from scipy.special import ncfdtr, ncfdtrinc
Compute the CDF for several values of `nc`:
>>> nc = [0.5, 1.5, 2.0]
>>> p = ncfdtr(2, 3, nc, 15)
>>> p
array([ 0.96309246, 0.94327955, 0.93304098])
Compute the inverse. We recover the values of `nc`, as expected:
>>> ncfdtrinc(2, 3, p, 15)
array([ 0.5, 1.5, 2. ])
""")
add_newdoc("nctdtr",
"""
nctdtr(df, nc, t, out=None)
Cumulative distribution function of the non-central `t` distribution.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
cdf : scalar or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise, it will be an array.
See Also
--------
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> from scipy import stats
>>> import matplotlib.pyplot as plt
Plot the CDF of the non-central t distribution, for nc=0. Compare with the
t-distribution from scipy.stats:
>>> x = np.linspace(-5, 5, num=500)
>>> df = 3
>>> nct_stats = stats.t.cdf(x, df)
>>> nct_special = special.nctdtr(df, 0, x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, nct_stats, 'b-', lw=3)
>>> ax.plot(x, nct_special, 'r-')
>>> plt.show()
""")
add_newdoc("nctdtridf",
"""
nctdtridf(p, nc, t, out=None)
Calculate degrees of freedom for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
t : array_like
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
cdf : scalar or ndarray
The calculated CDF. If all inputs are scalar, the return will be a
float. Otherwise, it will be an array.
See Also
--------
nctdtr : CDF of the non-central `t` distribution.
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
""")
add_newdoc("nctdtrinc",
"""
nctdtrinc(df, p, t, out=None)
Calculate non-centrality parameter for non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
p : array_like
CDF values, in range (0, 1].
t : array_like
Quantiles, i.e., the upper limit of integration.
out : ndarray, optional
Optional output array for the function results
Returns
-------
nc : scalar or ndarray
Noncentrality parameter
See Also
--------
nctdtr : CDF of the non-central `t` distribution.
nctdtrit : Inverse CDF (iCDF) of the non-central t distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
""")
add_newdoc("nctdtrit",
"""
nctdtrit(df, nc, p, out=None)
Inverse cumulative distribution function of the non-central t distribution.
See `nctdtr` for more details.
Parameters
----------
df : array_like
Degrees of freedom of the distribution. Should be in range (0, inf).
nc : array_like
Noncentrality parameter. Should be in range (-1e6, 1e6).
p : array_like
CDF values, in range (0, 1].
out : ndarray, optional
Optional output array for the function results
Returns
-------
t : scalar or ndarray
Quantiles
See Also
--------
nctdtr : CDF of the non-central `t` distribution.
nctdtridf : Calculate degrees of freedom, given CDF and iCDF values.
nctdtrinc : Calculate non-centrality parameter, given CDF iCDF values.
""")
add_newdoc("ndtr",
r"""
ndtr(x, out=None)
Cumulative distribution of the standard normal distribution.
Returns the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`
.. math::
\frac{1}{\sqrt{2\pi}} \int_{-\infty}^x \exp(-t^2/2) dt
Parameters
----------
x : array_like, real or complex
Argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value of the normal CDF evaluated at `x`
See Also
--------
log_ndtr : Logarithm of ndtr
ndtri : Inverse of ndtr, standard normal percentile function
erf : Error function
erfc : 1 - erf
scipy.stats.norm : Normal distribution
Examples
--------
Evaluate `ndtr` at one point.
>>> import numpy as np
>>> from scipy.special import ndtr
>>> ndtr(0.5)
0.6914624612740131
Evaluate the function at several points by providing a NumPy array
or list for `x`.
>>> ndtr([0, 0.5, 2])
array([0.5 , 0.69146246, 0.97724987])
Plot the function.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-5, 5, 100)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, ndtr(x))
>>> ax.set_title("Standard normal cumulative distribution function $\Phi$")
>>> plt.show()
""")
add_newdoc("nrdtrimn",
"""
nrdtrimn(p, x, std, out=None)
Calculate mean of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
std : array_like
Standard deviation.
out : ndarray, optional
Optional output array for the function results
Returns
-------
mn : scalar or ndarray
The mean of the normal distribution.
See Also
--------
nrdtrimn, ndtr
""")
add_newdoc("nrdtrisd",
"""
nrdtrisd(p, x, mn, out=None)
Calculate standard deviation of normal distribution given other params.
Parameters
----------
p : array_like
CDF values, in range (0, 1].
x : array_like
Quantiles, i.e. the upper limit of integration.
mn : scalar or ndarray
The mean of the normal distribution.
out : ndarray, optional
Optional output array for the function results
Returns
-------
std : scalar or ndarray
Standard deviation.
See Also
--------
ndtr
""")
add_newdoc("log_ndtr",
"""
log_ndtr(x, out=None)
Logarithm of Gaussian cumulative distribution function.
Returns the log of the area under the standard Gaussian probability
density function, integrated from minus infinity to `x`::
log(1/sqrt(2*pi) * integral(exp(-t**2 / 2), t=-inf..x))
Parameters
----------
x : array_like, real or complex
Argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value of the log of the normal CDF evaluated at `x`
See Also
--------
erf
erfc
scipy.stats.norm
ndtr
Examples
--------
>>> import numpy as np
>>> from scipy.special import log_ndtr, ndtr
The benefit of ``log_ndtr(x)`` over the naive implementation
``np.log(ndtr(x))`` is most evident with moderate to large positive
values of ``x``:
>>> x = np.array([6, 7, 9, 12, 15, 25])
>>> log_ndtr(x)
array([-9.86587646e-010, -1.27981254e-012, -1.12858841e-019,
-1.77648211e-033, -3.67096620e-051, -3.05669671e-138])
The results of the naive calculation for the moderate ``x`` values
have only 5 or 6 correct significant digits. For values of ``x``
greater than approximately 8.3, the naive expression returns 0:
>>> np.log(ndtr(x))
array([-9.86587701e-10, -1.27986510e-12, 0.00000000e+00,
0.00000000e+00, 0.00000000e+00, 0.00000000e+00])
""")
add_newdoc("ndtri",
"""
ndtri(y, out=None)
Inverse of `ndtr` vs x
Returns the argument x for which the area under the standard normal
probability density function (integrated from minus infinity to `x`)
is equal to y.
Parameters
----------
p : array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
x : scalar or ndarray
Value of x such that ``ndtr(x) == p``.
See Also
--------
ndtr : Standard normal cumulative probability distribution
ndtri_exp : Inverse of log_ndtr
Examples
--------
`ndtri` is the percentile function of the standard normal distribution.
This means it returns the inverse of the cumulative density `ndtr`. First,
let us compute a cumulative density value.
>>> import numpy as np
>>> from scipy.special import ndtri, ndtr
>>> cdf_val = ndtr(2)
>>> cdf_val
0.9772498680518208
Verify that `ndtri` yields the original value for `x` up to floating point
errors.
>>> ndtri(cdf_val)
2.0000000000000004
Plot the function. For that purpose, we provide a NumPy array as argument.
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(0.01, 1, 200)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, ndtri(x))
>>> ax.set_title("Standard normal percentile function")
>>> plt.show()
""")
add_newdoc("obl_ang1",
"""
obl_ang1(m, n, c, x, out=None)
Oblate spheroidal angular function of the first kind and its derivative
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_ang1_cv
""")
add_newdoc("obl_ang1_cv",
"""
obl_ang1_cv(m, n, c, cv, x, out=None)
Oblate spheroidal angular function obl_ang1 for precomputed characteristic value
Computes the oblate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_ang1
""")
add_newdoc("obl_cv",
"""
obl_cv(m, n, c, out=None)
Characteristic value of oblate spheroidal function
Computes the characteristic value of oblate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
out : ndarray, optional
Optional output array for the function results
Returns
-------
cv : scalar or ndarray
Characteristic value
""")
add_newdoc("obl_rad1",
"""
obl_rad1(m, n, c, x, out=None)
Oblate spheroidal radial function of the first kind and its derivative
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_rad1_cv
""")
add_newdoc("obl_rad1_cv",
"""
obl_rad1_cv(m, n, c, cv, x, out=None)
Oblate spheroidal radial function obl_rad1 for precomputed characteristic value
Computes the oblate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_rad1
""")
add_newdoc("obl_rad2",
"""
obl_rad2(m, n, c, x, out=None)
Oblate spheroidal radial function of the second kind and its derivative.
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_rad2_cv
""")
add_newdoc("obl_rad2_cv",
"""
obl_rad2_cv(m, n, c, cv, x, out=None)
Oblate spheroidal radial function obl_rad2 for precomputed characteristic value
Computes the oblate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Mode parameter m (nonnegative)
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Parameter x (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
See Also
--------
obl_rad2
""")
add_newdoc("pbdv",
"""
pbdv(v, x, out=None)
Parabolic cylinder function D
Returns (d, dp) the parabolic cylinder function Dv(x) in d and the
derivative, Dv'(x) in dp.
Parameters
----------
v : array_like
Real parameter
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
d : scalar or ndarray
Value of the function
dp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pbvv",
"""
pbvv(v, x, out=None)
Parabolic cylinder function V
Returns the parabolic cylinder function Vv(x) in v and the
derivative, Vv'(x) in vp.
Parameters
----------
v : array_like
Real parameter
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
v : scalar or ndarray
Value of the function
vp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pbwa",
r"""
pbwa(a, x, out=None)
Parabolic cylinder function W.
The function is a particular solution to the differential equation
.. math::
y'' + \left(\frac{1}{4}x^2 - a\right)y = 0,
for a full definition see section 12.14 in [1]_.
Parameters
----------
a : array_like
Real parameter
x : array_like
Real argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
w : scalar or ndarray
Value of the function
wp : scalar or ndarray
Value of the derivative in x
Notes
-----
The function is a wrapper for a Fortran routine by Zhang and Jin
[2]_. The implementation is accurate only for ``|a|, |x| < 5`` and
returns NaN outside that range.
References
----------
.. [1] Digital Library of Mathematical Functions, 14.30.
https://dlmf.nist.gov/14.30
.. [2] Zhang, Shanjie and Jin, Jianming. "Computation of Special
Functions", John Wiley and Sons, 1996.
https://people.sc.fsu.edu/~jburkardt/f_src/special_functions/special_functions.html
""")
add_newdoc("pdtr",
r"""
pdtr(k, m, out=None)
Poisson cumulative distribution function.
Defined as the probability that a Poisson-distributed random
variable with event rate :math:`m` is less than or equal to
:math:`k`. More concretely, this works out to be [1]_
.. math::
\exp(-m) \sum_{j = 0}^{\lfloor{k}\rfloor} \frac{m^j}{j!}.
Parameters
----------
k : array_like
Number of occurrences (nonnegative, real)
m : array_like
Shape parameter (nonnegative, real)
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the Poisson cumulative distribution function
See Also
--------
pdtrc : Poisson survival function
pdtrik : inverse of `pdtr` with respect to `k`
pdtri : inverse of `pdtr` with respect to `m`
References
----------
.. [1] https://en.wikipedia.org/wiki/Poisson_distribution
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is a cumulative distribution function, so it converges to 1
monotonically as `k` goes to infinity.
>>> sc.pdtr([1, 10, 100, np.inf], 1)
array([0.73575888, 0.99999999, 1. , 1. ])
It is discontinuous at integers and constant between integers.
>>> sc.pdtr([1, 1.5, 1.9, 2], 1)
array([0.73575888, 0.73575888, 0.73575888, 0.9196986 ])
""")
add_newdoc("pdtrc",
"""
pdtrc(k, m, out=None)
Poisson survival function
Returns the sum of the terms from k+1 to infinity of the Poisson
distribution: sum(exp(-m) * m**j / j!, j=k+1..inf) = gammainc(
k+1, m). Arguments must both be non-negative doubles.
Parameters
----------
k : array_like
Number of occurrences (nonnegative, real)
m : array_like
Shape parameter (nonnegative, real)
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the Poisson survival function
See Also
--------
pdtr : Poisson cumulative distribution function
pdtrik : inverse of `pdtr` with respect to `k`
pdtri : inverse of `pdtr` with respect to `m`
""")
add_newdoc("pdtri",
"""
pdtri(k, y, out=None)
Inverse to `pdtr` vs m
Returns the Poisson variable `m` such that the sum from 0 to `k` of
the Poisson density is equal to the given probability `y`:
calculated by ``gammaincinv(k + 1, y)``. `k` must be a nonnegative
integer and `y` between 0 and 1.
Parameters
----------
k : array_like
Number of occurrences (nonnegative, real)
y : array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of the shape paramter `m` such that ``pdtr(k, m) = p``
See Also
--------
pdtr : Poisson cumulative distribution function
pdtrc : Poisson survival function
pdtrik : inverse of `pdtr` with respect to `k`
""")
add_newdoc("pdtrik",
"""
pdtrik(p, m, out=None)
Inverse to `pdtr` vs `m`.
Parameters
----------
m : array_like
Shape parameter (nonnegative, real)
p : array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The number of occurrences `k` such that ``pdtr(k, m) = p``
See Also
--------
pdtr : Poisson cumulative distribution function
pdtrc : Poisson survival function
pdtri : inverse of `pdtr` with respect to `m`
""")
add_newdoc("poch",
r"""
poch(z, m, out=None)
Pochhammer symbol.
The Pochhammer symbol (rising factorial) is defined as
.. math::
(z)_m = \frac{\Gamma(z + m)}{\Gamma(z)}
For positive integer `m` it reads
.. math::
(z)_m = z (z + 1) ... (z + m - 1)
See [dlmf]_ for more details.
Parameters
----------
z, m : array_like
Real-valued arguments.
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value of the function.
References
----------
.. [dlmf] Nist, Digital Library of Mathematical Functions
https://dlmf.nist.gov/5.2#iii
Examples
--------
>>> import scipy.special as sc
It is 1 when m is 0.
>>> sc.poch([1, 2, 3, 4], 0)
array([1., 1., 1., 1.])
For z equal to 1 it reduces to the factorial function.
>>> sc.poch(1, 5)
120.0
>>> 1 * 2 * 3 * 4 * 5
120
It can be expressed in terms of the gamma function.
>>> z, m = 3.7, 2.1
>>> sc.poch(z, m)
20.529581933776953
>>> sc.gamma(z + m) / sc.gamma(z)
20.52958193377696
""")
add_newdoc("powm1", """
powm1(x, y, out=None)
Computes ``x**y - 1``.
This function is useful when `y` is near 0, or when `x` is near 1.
The function is implemented for real types only (unlike ``numpy.power``,
which accepts complex inputs).
Parameters
----------
x : array_like
The base. Must be a real type (i.e. integer or float, not complex).
y : array_like
The exponent. Must be a real type (i.e. integer or float, not complex).
Returns
-------
array_like
Result of the calculation
Notes
-----
.. versionadded:: 1.10.0
The underlying code is implemented for single precision and double
precision floats only. Unlike `numpy.power`, integer inputs to
`powm1` are converted to floating point, and complex inputs are
not accepted.
Note the following edge cases:
* ``powm1(x, 0)`` returns 0 for any ``x``, including 0, ``inf``
and ``nan``.
* ``powm1(1, y)`` returns 0 for any ``y``, including ``nan``
and ``inf``.
Examples
--------
>>> import numpy as np
>>> from scipy.special import powm1
>>> x = np.array([1.2, 10.0, 0.9999999975])
>>> y = np.array([1e-9, 1e-11, 0.1875])
>>> powm1(x, y)
array([ 1.82321557e-10, 2.30258509e-11, -4.68749998e-10])
It can be verified that the relative errors in those results
are less than 2.5e-16.
Compare that to the result of ``x**y - 1``, where the
relative errors are all larger than 8e-8:
>>> x**y - 1
array([ 1.82321491e-10, 2.30258035e-11, -4.68750039e-10])
""")
add_newdoc("pro_ang1",
"""
pro_ang1(m, n, c, x, out=None)
Prolate spheroidal angular function of the first kind and its derivative
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pro_ang1_cv",
"""
pro_ang1_cv(m, n, c, cv, x, out=None)
Prolate spheroidal angular function pro_ang1 for precomputed characteristic value
Computes the prolate spheroidal angular function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pro_cv",
"""
pro_cv(m, n, c, out=None)
Characteristic value of prolate spheroidal function
Computes the characteristic value of prolate spheroidal wave
functions of order `m`, `n` (n>=m) and spheroidal parameter `c`.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
out : ndarray, optional
Optional output array for the function results
Returns
-------
cv : scalar or ndarray
Characteristic value
""")
add_newdoc("pro_rad1",
"""
pro_rad1(m, n, c, x, out=None)
Prolate spheroidal radial function of the first kind and its derivative
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pro_rad1_cv",
"""
pro_rad1_cv(m, n, c, cv, x, out=None)
Prolate spheroidal radial function pro_rad1 for precomputed characteristic value
Computes the prolate spheroidal radial function of the first kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pro_rad2",
"""
pro_rad2(m, n, c, x, out=None)
Prolate spheroidal radial function of the second kind and its derivative
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pro_rad2_cv",
"""
pro_rad2_cv(m, n, c, cv, x, out=None)
Prolate spheroidal radial function pro_rad2 for precomputed characteristic value
Computes the prolate spheroidal radial function of the second kind
and its derivative (with respect to `x`) for mode parameters m>=0
and n>=m, spheroidal parameter `c` and ``|x| < 1.0``. Requires
pre-computed characteristic value.
Parameters
----------
m : array_like
Nonnegative mode parameter m
n : array_like
Mode parameter n (>= m)
c : array_like
Spheroidal parameter
cv : array_like
Characteristic value
x : array_like
Real parameter (``|x| < 1.0``)
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Value of the function
sp : scalar or ndarray
Value of the derivative vs x
""")
add_newdoc("pseudo_huber",
r"""
pseudo_huber(delta, r, out=None)
Pseudo-Huber loss function.
.. math:: \mathrm{pseudo\_huber}(\delta, r) = \delta^2 \left( \sqrt{ 1 + \left( \frac{r}{\delta} \right)^2 } - 1 \right)
Parameters
----------
delta : array_like
Input array, indicating the soft quadratic vs. linear loss changepoint.
r : array_like
Input array, possibly representing residuals.
out : ndarray, optional
Optional output array for the function results
Returns
-------
res : scalar or ndarray
The computed Pseudo-Huber loss function values.
See also
--------
huber: Similar function which this function approximates
Notes
-----
Like `huber`, `pseudo_huber` often serves as a robust loss function
in statistics or machine learning to reduce the influence of outliers.
Unlike `huber`, `pseudo_huber` is smooth.
Typically, `r` represents residuals, the difference
between a model prediction and data. Then, for :math:`|r|\leq\delta`,
`pseudo_huber` resembles the squared error and for :math:`|r|>\delta` the
absolute error. This way, the Pseudo-Huber loss often achieves
a fast convergence in model fitting for small residuals like the squared
error loss function and still reduces the influence of outliers
(:math:`|r|>\delta`) like the absolute error loss. As :math:`\delta` is
the cutoff between squared and absolute error regimes, it has
to be tuned carefully for each problem. `pseudo_huber` is also
convex, making it suitable for gradient based optimization. [1]_ [2]_
.. versionadded:: 0.15.0
References
----------
.. [1] Hartley, Zisserman, "Multiple View Geometry in Computer Vision".
2003. Cambridge University Press. p. 619
.. [2] Charbonnier et al. "Deterministic edge-preserving regularization
in computed imaging". 1997. IEEE Trans. Image Processing.
6 (2): 298 - 311.
Examples
--------
Import all necessary modules.
>>> import numpy as np
>>> from scipy.special import pseudo_huber, huber
>>> import matplotlib.pyplot as plt
Calculate the function for ``delta=1`` at ``r=2``.
>>> pseudo_huber(1., 2.)
1.2360679774997898
Calculate the function at ``r=2`` for different `delta` by providing
a list or NumPy array for `delta`.
>>> pseudo_huber([1., 2., 4.], 3.)
array([2.16227766, 3.21110255, 4. ])
Calculate the function for ``delta=1`` at several points by providing
a list or NumPy array for `r`.
>>> pseudo_huber(2., np.array([1., 1.5, 3., 4.]))
array([0.47213595, 1. , 3.21110255, 4.94427191])
The function can be calculated for different `delta` and `r` by
providing arrays for both with compatible shapes for broadcasting.
>>> r = np.array([1., 2.5, 8., 10.])
>>> deltas = np.array([[1.], [5.], [9.]])
>>> print(r.shape, deltas.shape)
(4,) (3, 1)
>>> pseudo_huber(deltas, r)
array([[ 0.41421356, 1.6925824 , 7.06225775, 9.04987562],
[ 0.49509757, 2.95084972, 22.16990566, 30.90169944],
[ 0.49846624, 3.06693762, 27.37435121, 40.08261642]])
Plot the function for different `delta`.
>>> x = np.linspace(-4, 4, 500)
>>> deltas = [1, 2, 3]
>>> linestyles = ["dashed", "dotted", "dashdot"]
>>> fig, ax = plt.subplots()
>>> combined_plot_parameters = list(zip(deltas, linestyles))
>>> for delta, style in combined_plot_parameters:
... ax.plot(x, pseudo_huber(delta, x), label=f"$\delta={delta}$",
... ls=style)
>>> ax.legend(loc="upper center")
>>> ax.set_xlabel("$x$")
>>> ax.set_title("Pseudo-Huber loss function $h_{\delta}(x)$")
>>> ax.set_xlim(-4, 4)
>>> ax.set_ylim(0, 8)
>>> plt.show()
Finally, illustrate the difference between `huber` and `pseudo_huber` by
plotting them and their gradients with respect to `r`. The plot shows
that `pseudo_huber` is continuously differentiable while `huber` is not
at the points :math:`\pm\delta`.
>>> def huber_grad(delta, x):
... grad = np.copy(x)
... linear_area = np.argwhere(np.abs(x) > delta)
... grad[linear_area]=delta*np.sign(x[linear_area])
... return grad
>>> def pseudo_huber_grad(delta, x):
... return x* (1+(x/delta)**2)**(-0.5)
>>> x=np.linspace(-3, 3, 500)
>>> delta = 1.
>>> fig, ax = plt.subplots(figsize=(7, 7))
>>> ax.plot(x, huber(delta, x), label="Huber", ls="dashed")
>>> ax.plot(x, huber_grad(delta, x), label="Huber Gradient", ls="dashdot")
>>> ax.plot(x, pseudo_huber(delta, x), label="Pseudo-Huber", ls="dotted")
>>> ax.plot(x, pseudo_huber_grad(delta, x), label="Pseudo-Huber Gradient",
... ls="solid")
>>> ax.legend(loc="upper center")
>>> plt.show()
""")
add_newdoc("psi",
"""
psi(z, out=None)
The digamma function.
The logarithmic derivative of the gamma function evaluated at ``z``.
Parameters
----------
z : array_like
Real or complex argument.
out : ndarray, optional
Array for the computed values of ``psi``.
Returns
-------
digamma : scalar or ndarray
Computed values of ``psi``.
Notes
-----
For large values not close to the negative real axis, ``psi`` is
computed using the asymptotic series (5.11.2) from [1]_. For small
arguments not close to the negative real axis, the recurrence
relation (5.5.2) from [1]_ is used until the argument is large
enough to use the asymptotic series. For values close to the
negative real axis, the reflection formula (5.5.4) from [1]_ is
used first. Note that ``psi`` has a family of zeros on the
negative real axis which occur between the poles at nonpositive
integers. Around the zeros the reflection formula suffers from
cancellation and the implementation loses precision. The sole
positive zero and the first negative zero, however, are handled
separately by precomputing series expansions using [2]_, so the
function should maintain full accuracy around the origin.
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/5
.. [2] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point arithmetic"
(Version 0.19) http://mpmath.org/
Examples
--------
>>> from scipy.special import psi
>>> z = 3 + 4j
>>> psi(z)
(1.55035981733341+1.0105022091860445j)
Verify psi(z) = psi(z + 1) - 1/z:
>>> psi(z + 1) - 1/z
(1.55035981733341+1.0105022091860445j)
""")
add_newdoc("radian",
"""
radian(d, m, s, out=None)
Convert from degrees to radians.
Returns the angle given in (d)egrees, (m)inutes, and (s)econds in
radians.
Parameters
----------
d : array_like
Degrees, can be real-valued.
m : array_like
Minutes, can be real-valued.
s : array_like
Seconds, can be real-valued.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Values of the inputs in radians.
Examples
--------
>>> import scipy.special as sc
There are many ways to specify an angle.
>>> sc.radian(90, 0, 0)
1.5707963267948966
>>> sc.radian(0, 60 * 90, 0)
1.5707963267948966
>>> sc.radian(0, 0, 60**2 * 90)
1.5707963267948966
The inputs can be real-valued.
>>> sc.radian(1.5, 0, 0)
0.02617993877991494
>>> sc.radian(1, 30, 0)
0.02617993877991494
""")
add_newdoc("rel_entr",
r"""
rel_entr(x, y, out=None)
Elementwise function for computing relative entropy.
.. math::
\mathrm{rel\_entr}(x, y) =
\begin{cases}
x \log(x / y) & x > 0, y > 0 \\
0 & x = 0, y \ge 0 \\
\infty & \text{otherwise}
\end{cases}
Parameters
----------
x, y : array_like
Input arrays
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Relative entropy of the inputs
See Also
--------
entr, kl_div, scipy.stats.entropy
Notes
-----
.. versionadded:: 0.15.0
This function is jointly convex in x and y.
The origin of this function is in convex programming; see
[1]_. Given two discrete probability distributions :math:`p_1,
\ldots, p_n` and :math:`q_1, \ldots, q_n`, the definition of relative
entropy in the context of *information theory* is
.. math::
\sum_{i = 1}^n \mathrm{rel\_entr}(p_i, q_i).
To compute the latter quantity, use `scipy.stats.entropy`.
See [2]_ for details.
References
----------
.. [1] Boyd, Stephen and Lieven Vandenberghe. *Convex optimization*.
Cambridge University Press, 2004.
:doi:`https://doi.org/10.1017/CBO9780511804441`
.. [2] Kullback-Leibler divergence,
https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
""")
add_newdoc("rgamma",
r"""
rgamma(z, out=None)
Reciprocal of the gamma function.
Defined as :math:`1 / \Gamma(z)`, where :math:`\Gamma` is the
gamma function. For more on the gamma function see `gamma`.
Parameters
----------
z : array_like
Real or complex valued input
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Function results
Notes
-----
The gamma function has no zeros and has simple poles at
nonpositive integers, so `rgamma` is an entire function with zeros
at the nonpositive integers. See the discussion in [dlmf]_ for
more details.
See Also
--------
gamma, gammaln, loggamma
References
----------
.. [dlmf] Nist, Digital Library of Mathematical functions,
https://dlmf.nist.gov/5.2#i
Examples
--------
>>> import scipy.special as sc
It is the reciprocal of the gamma function.
>>> sc.rgamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
>>> 1 / sc.gamma([1, 2, 3, 4])
array([1. , 1. , 0.5 , 0.16666667])
It is zero at nonpositive integers.
>>> sc.rgamma([0, -1, -2, -3])
array([0., 0., 0., 0.])
It rapidly underflows to zero along the positive real axis.
>>> sc.rgamma([10, 100, 179])
array([2.75573192e-006, 1.07151029e-156, 0.00000000e+000])
""")
add_newdoc("round",
"""
round(x, out=None)
Round to the nearest integer.
Returns the nearest integer to `x`. If `x` ends in 0.5 exactly,
the nearest even integer is chosen.
Parameters
----------
x : array_like
Real valued input.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
The nearest integers to the elements of `x`. The result is of
floating type, not integer type.
Examples
--------
>>> import scipy.special as sc
It rounds to even.
>>> sc.round([0.5, 1.5])
array([0., 2.])
""")
add_newdoc("shichi",
r"""
shichi(x, out=None)
Hyperbolic sine and cosine integrals.
The hyperbolic sine integral is
.. math::
\int_0^x \frac{\sinh{t}}{t}dt
and the hyperbolic cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cosh{t} - 1}{t} dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principal branch of the logarithm [1]_.
Parameters
----------
x : array_like
Real or complex points at which to compute the hyperbolic sine
and cosine integrals.
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
si : scalar or ndarray
Hyperbolic sine integral at ``x``
ci : scalar or ndarray
Hyperbolic cosine integral at ``x``
See Also
--------
sici : Sine and cosine integrals.
exp1 : Exponential integral E1.
expi : Exponential integral Ei.
Notes
-----
For real arguments with ``x < 0``, ``chi`` is the real part of the
hyperbolic cosine integral. For such points ``chi(x)`` and ``chi(x
+ 0j)`` differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[2]_ *shichi* routine. For complex arguments the algorithm is based
on Mpmath's [3]_ *shi* and *chi* routines.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
(See Section 5.2.)
.. [2] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [3] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point
arithmetic" (Version 0.19) http://mpmath.org/
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import shichi, sici
`shichi` accepts real or complex input:
>>> shichi(0.5)
(0.5069967498196671, -0.05277684495649357)
>>> shichi(0.5 + 2.5j)
((0.11772029666668238+1.831091777729851j),
(0.29912435887648825+1.7395351121166562j))
The hyperbolic sine and cosine integrals Shi(z) and Chi(z) are
related to the sine and cosine integrals Si(z) and Ci(z) by
* Shi(z) = -i*Si(i*z)
* Chi(z) = Ci(-i*z) + i*pi/2
>>> z = 0.25 + 5j
>>> shi, chi = shichi(z)
>>> shi, -1j*sici(1j*z)[0] # Should be the same.
((-0.04834719325101729+1.5469354086921228j),
(-0.04834719325101729+1.5469354086921228j))
>>> chi, sici(-1j*z)[1] + 1j*np.pi/2 # Should be the same.
((-0.19568708973868087+1.556276312103824j),
(-0.19568708973868087+1.556276312103824j))
Plot the functions evaluated on the real axis:
>>> xp = np.geomspace(1e-8, 4.0, 250)
>>> x = np.concatenate((-xp[::-1], xp))
>>> shi, chi = shichi(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, shi, label='Shi(x)')
>>> ax.plot(x, chi, '--', label='Chi(x)')
>>> ax.set_xlabel('x')
>>> ax.set_title('Hyperbolic Sine and Cosine Integrals')
>>> ax.legend(shadow=True, framealpha=1, loc='lower right')
>>> ax.grid(True)
>>> plt.show()
""")
add_newdoc("sici",
r"""
sici(x, out=None)
Sine and cosine integrals.
The sine integral is
.. math::
\int_0^x \frac{\sin{t}}{t}dt
and the cosine integral is
.. math::
\gamma + \log(x) + \int_0^x \frac{\cos{t} - 1}{t}dt
where :math:`\gamma` is Euler's constant and :math:`\log` is the
principal branch of the logarithm [1]_.
Parameters
----------
x : array_like
Real or complex points at which to compute the sine and cosine
integrals.
out : tuple of ndarray, optional
Optional output arrays for the function results
Returns
-------
si : scalar or ndarray
Sine integral at ``x``
ci : scalar or ndarray
Cosine integral at ``x``
See Also
--------
shichi : Hyperbolic sine and cosine integrals.
exp1 : Exponential integral E1.
expi : Exponential integral Ei.
Notes
-----
For real arguments with ``x < 0``, ``ci`` is the real part of the
cosine integral. For such points ``ci(x)`` and ``ci(x + 0j)``
differ by a factor of ``1j*pi``.
For real arguments the function is computed by calling Cephes'
[2]_ *sici* routine. For complex arguments the algorithm is based
on Mpmath's [3]_ *si* and *ci* routines.
References
----------
.. [1] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
(See Section 5.2.)
.. [2] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
.. [3] Fredrik Johansson and others.
"mpmath: a Python library for arbitrary-precision floating-point
arithmetic" (Version 0.19) http://mpmath.org/
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import sici, exp1
`sici` accepts real or complex input:
>>> sici(2.5)
(1.7785201734438267, 0.2858711963653835)
>>> sici(2.5 + 3j)
((4.505735874563953+0.06863305018999577j),
(0.0793644206906966-2.935510262937543j))
For z in the right half plane, the sine and cosine integrals are
related to the exponential integral E1 (implemented in SciPy as
`scipy.special.exp1`) by
* Si(z) = (E1(i*z) - E1(-i*z))/2i + pi/2
* Ci(z) = -(E1(i*z) + E1(-i*z))/2
See [1]_ (equations 5.2.21 and 5.2.23).
We can verify these relations:
>>> z = 2 - 3j
>>> sici(z)
((4.54751388956229-1.3991965806460565j),
(1.408292501520851+2.9836177420296055j))
>>> (exp1(1j*z) - exp1(-1j*z))/2j + np.pi/2 # Same as sine integral
(4.54751388956229-1.3991965806460565j)
>>> -(exp1(1j*z) + exp1(-1j*z))/2 # Same as cosine integral
(1.408292501520851+2.9836177420296055j)
Plot the functions evaluated on the real axis; the dotted horizontal
lines are at pi/2 and -pi/2:
>>> x = np.linspace(-16, 16, 150)
>>> si, ci = sici(x)
>>> fig, ax = plt.subplots()
>>> ax.plot(x, si, label='Si(x)')
>>> ax.plot(x, ci, '--', label='Ci(x)')
>>> ax.legend(shadow=True, framealpha=1, loc='upper left')
>>> ax.set_xlabel('x')
>>> ax.set_title('Sine and Cosine Integrals')
>>> ax.axhline(np.pi/2, linestyle=':', alpha=0.5, color='k')
>>> ax.axhline(-np.pi/2, linestyle=':', alpha=0.5, color='k')
>>> ax.grid(True)
>>> plt.show()
""")
add_newdoc("sindg",
"""
sindg(x, out=None)
Sine of the angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Sine at the input.
See Also
--------
cosdg, tandg, cotdg
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than using sine directly.
>>> x = 180 * np.arange(3)
>>> sc.sindg(x)
array([ 0., -0., 0.])
>>> np.sin(x * np.pi / 180)
array([ 0.0000000e+00, 1.2246468e-16, -2.4492936e-16])
""")
add_newdoc("smirnov",
r"""
smirnov(n, d, out=None)
Kolmogorov-Smirnov complementary cumulative distribution function
Returns the exact Kolmogorov-Smirnov complementary cumulative
distribution function,(aka the Survival Function) of Dn+ (or Dn-)
for a one-sided test of equality between an empirical and a
theoretical distribution. It is equal to the probability that the
maximum difference between a theoretical distribution and an empirical
one based on `n` samples is greater than d.
Parameters
----------
n : int
Number of samples
d : float array_like
Deviation between the Empirical CDF (ECDF) and the target CDF.
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value(s) of smirnov(n, d), Prob(Dn+ >= d) (Also Prob(Dn- >= d))
See Also
--------
smirnovi : The Inverse Survival Function for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
Examples
--------
>>> import numpy as np
>>> from scipy.special import smirnov
>>> from scipy.stats import norm
Show the probability of a gap at least as big as 0, 0.5 and 1.0 for a
sample of size 5.
>>> smirnov(5, [0, 0.5, 1.0])
array([ 1. , 0.056, 0. ])
Compare a sample of size 5 against N(0, 1), the standard normal
distribution with mean 0 and standard deviation 1.
`x` is the sample.
>>> x = np.array([-1.392, -0.135, 0.114, 0.190, 1.82])
>>> target = norm(0, 1)
>>> cdfs = target.cdf(x)
>>> cdfs
array([0.0819612 , 0.44630594, 0.5453811 , 0.57534543, 0.9656205 ])
Construct the empirical CDF and the K-S statistics (Dn+, Dn-, Dn).
>>> n = len(x)
>>> ecdfs = np.arange(n+1, dtype=float)/n
>>> cols = np.column_stack([x, ecdfs[1:], cdfs, cdfs - ecdfs[:n],
... ecdfs[1:] - cdfs])
>>> with np.printoptions(precision=3):
... print(cols)
[[-1.392 0.2 0.082 0.082 0.118]
[-0.135 0.4 0.446 0.246 -0.046]
[ 0.114 0.6 0.545 0.145 0.055]
[ 0.19 0.8 0.575 -0.025 0.225]
[ 1.82 1. 0.966 0.166 0.034]]
>>> gaps = cols[:, -2:]
>>> Dnpm = np.max(gaps, axis=0)
>>> print(f'Dn-={Dnpm[0]:f}, Dn+={Dnpm[1]:f}')
Dn-=0.246306, Dn+=0.224655
>>> probs = smirnov(n, Dnpm)
>>> print(f'For a sample of size {n} drawn from N(0, 1):',
... f' Smirnov n={n}: Prob(Dn- >= {Dnpm[0]:f}) = {probs[0]:.4f}',
... f' Smirnov n={n}: Prob(Dn+ >= {Dnpm[1]:f}) = {probs[1]:.4f}',
... sep='\n')
For a sample of size 5 drawn from N(0, 1):
Smirnov n=5: Prob(Dn- >= 0.246306) = 0.4711
Smirnov n=5: Prob(Dn+ >= 0.224655) = 0.5245
Plot the empirical CDF and the standard normal CDF.
>>> import matplotlib.pyplot as plt
>>> plt.step(np.concatenate(([-2.5], x, [2.5])),
... np.concatenate((ecdfs, [1])),
... where='post', label='Empirical CDF')
>>> xx = np.linspace(-2.5, 2.5, 100)
>>> plt.plot(xx, target.cdf(xx), '--', label='CDF for N(0, 1)')
Add vertical lines marking Dn+ and Dn-.
>>> iminus, iplus = np.argmax(gaps, axis=0)
>>> plt.vlines([x[iminus]], ecdfs[iminus], cdfs[iminus], color='r',
... alpha=0.5, lw=4)
>>> plt.vlines([x[iplus]], cdfs[iplus], ecdfs[iplus+1], color='m',
... alpha=0.5, lw=4)
>>> plt.grid(True)
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.show()
""")
add_newdoc("smirnovi",
"""
smirnovi(n, p, out=None)
Inverse to `smirnov`
Returns `d` such that ``smirnov(n, d) == p``, the critical value
corresponding to `p`.
Parameters
----------
n : int
Number of samples
p : float array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
The value(s) of smirnovi(n, p), the critical values.
See Also
--------
smirnov : The Survival Function (SF) for the distribution
scipy.stats.ksone : Provides the functionality as a continuous distribution
kolmogorov, kolmogi : Functions for the two-sided distribution
scipy.stats.kstwobign : Two-sided Kolmogorov-Smirnov distribution, large n
Notes
-----
`smirnov` is used by `stats.kstest` in the application of the
Kolmogorov-Smirnov Goodness of Fit test. For historial reasons this
function is exposed in `scpy.special`, but the recommended way to achieve
the most accurate CDF/SF/PDF/PPF/ISF computations is to use the
`stats.ksone` distribution.
Examples
--------
>>> from scipy.special import smirnovi, smirnov
>>> n = 24
>>> deviations = [0.1, 0.2, 0.3]
Use `smirnov` to compute the complementary CDF of the Smirnov
distribution for the given number of samples and deviations.
>>> p = smirnov(n, deviations)
>>> p
array([0.58105083, 0.12826832, 0.01032231])
The inverse function ``smirnovi(n, p)`` returns ``deviations``.
>>> smirnovi(n, p)
array([0.1, 0.2, 0.3])
""")
add_newdoc("_smirnovc",
"""
_smirnovc(n, d)
Internal function, do not use.
""")
add_newdoc("_smirnovci",
"""
Internal function, do not use.
""")
add_newdoc("_smirnovp",
"""
_smirnovp(n, p)
Internal function, do not use.
""")
add_newdoc("spence",
r"""
spence(z, out=None)
Spence's function, also known as the dilogarithm.
It is defined to be
.. math::
\int_1^z \frac{\log(t)}{1 - t}dt
for complex :math:`z`, where the contour of integration is taken
to avoid the branch cut of the logarithm. Spence's function is
analytic everywhere except the negative real axis where it has a
branch cut.
Parameters
----------
z : array_like
Points at which to evaluate Spence's function
out : ndarray, optional
Optional output array for the function results
Returns
-------
s : scalar or ndarray
Computed values of Spence's function
Notes
-----
There is a different convention which defines Spence's function by
the integral
.. math::
-\int_0^z \frac{\log(1 - t)}{t}dt;
this is our ``spence(1 - z)``.
Examples
--------
>>> import numpy as np
>>> from scipy.special import spence
>>> import matplotlib.pyplot as plt
The function is defined for complex inputs:
>>> spence([1-1j, 1.5+2j, 3j, -10-5j])
array([-0.20561676+0.91596559j, -0.86766909-1.39560134j,
-0.59422064-2.49129918j, -1.14044398+6.80075924j])
For complex inputs on the branch cut, which is the negative real axis,
the function returns the limit for ``z`` with positive imaginary part.
For example, in the following, note the sign change of the imaginary
part of the output for ``z = -2`` and ``z = -2 - 1e-8j``:
>>> spence([-2 + 1e-8j, -2, -2 - 1e-8j])
array([2.32018041-3.45139229j, 2.32018042-3.4513923j ,
2.32018041+3.45139229j])
The function returns ``nan`` for real inputs on the branch cut:
>>> spence(-1.5)
nan
Verify some particular values: ``spence(0) = pi**2/6``,
``spence(1) = 0`` and ``spence(2) = -pi**2/12``.
>>> spence([0, 1, 2])
array([ 1.64493407, 0. , -0.82246703])
>>> np.pi**2/6, -np.pi**2/12
(1.6449340668482264, -0.8224670334241132)
Verify the identity::
spence(z) + spence(1 - z) = pi**2/6 - log(z)*log(1 - z)
>>> z = 3 + 4j
>>> spence(z) + spence(1 - z)
(-2.6523186143876067+1.8853470951513935j)
>>> np.pi**2/6 - np.log(z)*np.log(1 - z)
(-2.652318614387606+1.885347095151394j)
Plot the function for positive real input.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0, 6, 400)
>>> ax.plot(x, spence(x))
>>> ax.grid()
>>> ax.set_xlabel('x')
>>> ax.set_title('spence(x)')
>>> plt.show()
""")
add_newdoc(
"stdtr",
r"""
stdtr(df, t, out=None)
Student t distribution cumulative distribution function
Returns the integral:
.. math::
\frac{\Gamma((df+1)/2)}{\sqrt{\pi df} \Gamma(df/2)}
\int_{-\infty}^t (1+x^2/df)^{-(df+1)/2}\, dx
Parameters
----------
df : array_like
Degrees of freedom
t : array_like
Upper bound of the integral
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Value of the Student t CDF at t
See Also
--------
stdtridf : inverse of stdtr with respect to `df`
stdtrit : inverse of stdtr with respect to `t`
scipy.stats.t : student t distribution
Notes
-----
The student t distribution is also available as `scipy.stats.t`.
Calling `stdtr` directly can improve performance compared to the
``cdf`` method of `scipy.stats.t` (see last example below).
Examples
--------
Calculate the function for ``df=3`` at ``t=1``.
>>> import numpy as np
>>> from scipy.special import stdtr
>>> import matplotlib.pyplot as plt
>>> stdtr(3, 1)
0.8044988905221148
Plot the function for three different degrees of freedom.
>>> x = np.linspace(-10, 10, 1000)
>>> fig, ax = plt.subplots()
>>> parameters = [(1, "solid"), (3, "dashed"), (10, "dotted")]
>>> for (df, linestyle) in parameters:
... ax.plot(x, stdtr(df, x), ls=linestyle, label=f"$df={df}$")
>>> ax.legend()
>>> ax.set_title("Student t distribution cumulative distribution function")
>>> plt.show()
The function can be computed for several degrees of freedom at the same
time by providing a NumPy array or list for `df`:
>>> stdtr([1, 2, 3], 1)
array([0.75 , 0.78867513, 0.80449889])
It is possible to calculate the function at several points for several
different degrees of freedom simultaneously by providing arrays for `df`
and `t` with shapes compatible for broadcasting. Compute `stdtr` at
4 points for 3 degrees of freedom resulting in an array of shape 3x4.
>>> dfs = np.array([[1], [2], [3]])
>>> t = np.array([2, 4, 6, 8])
>>> dfs.shape, t.shape
((3, 1), (4,))
>>> stdtr(dfs, t)
array([[0.85241638, 0.92202087, 0.94743154, 0.96041658],
[0.90824829, 0.97140452, 0.98666426, 0.99236596],
[0.93033702, 0.98599577, 0.99536364, 0.99796171]])
The t distribution is also available as `scipy.stats.t`. Calling `stdtr`
directly can be much faster than calling the ``cdf`` method of
`scipy.stats.t`. To get the same results, one must use the following
parametrization: ``scipy.stats.t(df).cdf(x) = stdtr(df, x)``.
>>> from scipy.stats import t
>>> df, x = 3, 1
>>> stdtr_result = stdtr(df, x) # this can be faster than below
>>> stats_result = t(df).cdf(x)
>>> stats_result == stdtr_result # test that results are equal
True
""")
add_newdoc("stdtridf",
"""
stdtridf(p, t, out=None)
Inverse of `stdtr` vs df
Returns the argument df such that stdtr(df, t) is equal to `p`.
Parameters
----------
p : array_like
Probability
t : array_like
Upper bound of the integral
out : ndarray, optional
Optional output array for the function results
Returns
-------
df : scalar or ndarray
Value of `df` such that ``stdtr(df, t) == p``
See Also
--------
stdtr : Student t CDF
stdtrit : inverse of stdtr with respect to `t`
scipy.stats.t : Student t distribution
Examples
--------
Compute the student t cumulative distribution function for one
parameter set.
>>> from scipy.special import stdtr, stdtridf
>>> df, x = 5, 2
>>> cdf_value = stdtr(df, x)
>>> cdf_value
0.9490302605850709
Verify that `stdtridf` recovers the original value for `df` given
the CDF value and `x`.
>>> stdtridf(cdf_value, x)
5.0
""")
add_newdoc("stdtrit",
"""
stdtrit(df, p, out=None)
The `p`-th quantile of the student t distribution.
This function is the inverse of the student t distribution cumulative
distribution function (CDF), returning `t` such that `stdtr(df, t) = p`.
Returns the argument `t` such that stdtr(df, t) is equal to `p`.
Parameters
----------
df : array_like
Degrees of freedom
p : array_like
Probability
out : ndarray, optional
Optional output array for the function results
Returns
-------
t : scalar or ndarray
Value of `t` such that ``stdtr(df, t) == p``
See Also
--------
stdtr : Student t CDF
stdtridf : inverse of stdtr with respect to `df`
scipy.stats.t : Student t distribution
Notes
-----
The student t distribution is also available as `scipy.stats.t`. Calling
`stdtrit` directly can improve performance compared to the ``ppf``
method of `scipy.stats.t` (see last example below).
Examples
--------
`stdtrit` represents the inverse of the student t distribution CDF which
is available as `stdtr`. Here, we calculate the CDF for ``df`` at
``x=1``. `stdtrit` then returns ``1`` up to floating point errors
given the same value for `df` and the computed CDF value.
>>> import numpy as np
>>> from scipy.special import stdtr, stdtrit
>>> import matplotlib.pyplot as plt
>>> df = 3
>>> x = 1
>>> cdf_value = stdtr(df, x)
>>> stdtrit(df, cdf_value)
0.9999999994418539
Plot the function for three different degrees of freedom.
>>> x = np.linspace(0, 1, 1000)
>>> parameters = [(1, "solid"), (2, "dashed"), (5, "dotted")]
>>> fig, ax = plt.subplots()
>>> for (df, linestyle) in parameters:
... ax.plot(x, stdtrit(df, x), ls=linestyle, label=f"$df={df}$")
>>> ax.legend()
>>> ax.set_ylim(-10, 10)
>>> ax.set_title("Student t distribution quantile function")
>>> plt.show()
The function can be computed for several degrees of freedom at the same
time by providing a NumPy array or list for `df`:
>>> stdtrit([1, 2, 3], 0.7)
array([0.72654253, 0.6172134 , 0.58438973])
It is possible to calculate the function at several points for several
different degrees of freedom simultaneously by providing arrays for `df`
and `p` with shapes compatible for broadcasting. Compute `stdtrit` at
4 points for 3 degrees of freedom resulting in an array of shape 3x4.
>>> dfs = np.array([[1], [2], [3]])
>>> p = np.array([0.2, 0.4, 0.7, 0.8])
>>> dfs.shape, p.shape
((3, 1), (4,))
>>> stdtrit(dfs, p)
array([[-1.37638192, -0.3249197 , 0.72654253, 1.37638192],
[-1.06066017, -0.28867513, 0.6172134 , 1.06066017],
[-0.97847231, -0.27667066, 0.58438973, 0.97847231]])
The t distribution is also available as `scipy.stats.t`. Calling `stdtrit`
directly can be much faster than calling the ``ppf`` method of
`scipy.stats.t`. To get the same results, one must use the following
parametrization: ``scipy.stats.t(df).ppf(x) = stdtrit(df, x)``.
>>> from scipy.stats import t
>>> df, x = 3, 0.5
>>> stdtrit_result = stdtrit(df, x) # this can be faster than below
>>> stats_result = t(df).ppf(x)
>>> stats_result == stdtrit_result # test that results are equal
True
""")
add_newdoc("struve",
r"""
struve(v, x, out=None)
Struve function.
Return the value of the Struve function of order `v` at `x`. The Struve
function is defined as,
.. math::
H_v(x) = (z/2)^{v + 1} \sum_{n=0}^\infty \frac{(-1)^n (z/2)^{2n}}{\Gamma(n + \frac{3}{2}) \Gamma(n + v + \frac{3}{2})},
where :math:`\Gamma` is the gamma function.
Parameters
----------
v : array_like
Order of the Struve function (float).
x : array_like
Argument of the Struve function (float; must be positive unless `v` is
an integer).
out : ndarray, optional
Optional output array for the function results
Returns
-------
H : scalar or ndarray
Value of the Struve function of order `v` at `x`.
Notes
-----
Three methods discussed in [1]_ are used to evaluate the Struve function:
- power series
- expansion in Bessel functions (if :math:`|z| < |v| + 20`)
- asymptotic large-z expansion (if :math:`z \geq 0.7v + 12`)
Rounding errors are estimated based on the largest terms in the sums, and
the result associated with the smallest error is returned.
See also
--------
modstruve: Modified Struve function
References
----------
.. [1] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/11
Examples
--------
Calculate the Struve function of order 1 at 2.
>>> import numpy as np
>>> from scipy.special import struve
>>> import matplotlib.pyplot as plt
>>> struve(1, 2.)
0.6467637282835622
Calculate the Struve function at 2 for orders 1, 2 and 3 by providing
a list for the order parameter `v`.
>>> struve([1, 2, 3], 2.)
array([0.64676373, 0.28031806, 0.08363767])
Calculate the Struve function of order 1 for several points by providing
an array for `x`.
>>> points = np.array([2., 5., 8.])
>>> struve(1, points)
array([0.64676373, 0.80781195, 0.48811605])
Compute the Struve function for several orders at several points by
providing arrays for `v` and `z`. The arrays have to be broadcastable
to the correct shapes.
>>> orders = np.array([[1], [2], [3]])
>>> points.shape, orders.shape
((3,), (3, 1))
>>> struve(orders, points)
array([[0.64676373, 0.80781195, 0.48811605],
[0.28031806, 1.56937455, 1.51769363],
[0.08363767, 1.50872065, 2.98697513]])
Plot the Struve functions of order 0 to 3 from -10 to 10.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-10., 10., 1000)
>>> for i in range(4):
... ax.plot(x, struve(i, x), label=f'$H_{i!r}$')
>>> ax.legend(ncol=2)
>>> ax.set_xlim(-10, 10)
>>> ax.set_title(r"Struve functions $H_{\nu}$")
>>> plt.show()
""")
add_newdoc("tandg",
"""
tandg(x, out=None)
Tangent of angle `x` given in degrees.
Parameters
----------
x : array_like
Angle, given in degrees.
out : ndarray, optional
Optional output array for the function results.
Returns
-------
scalar or ndarray
Tangent at the input.
See Also
--------
sindg, cosdg, cotdg
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
It is more accurate than using tangent directly.
>>> x = 180 * np.arange(3)
>>> sc.tandg(x)
array([0., 0., 0.])
>>> np.tan(x * np.pi / 180)
array([ 0.0000000e+00, -1.2246468e-16, -2.4492936e-16])
""")
add_newdoc(
"tklmbda",
r"""
tklmbda(x, lmbda, out=None)
Cumulative distribution function of the Tukey lambda distribution.
Parameters
----------
x, lmbda : array_like
Parameters
out : ndarray, optional
Optional output array for the function results
Returns
-------
cdf : scalar or ndarray
Value of the Tukey lambda CDF
See Also
--------
scipy.stats.tukeylambda : Tukey lambda distribution
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.special import tklmbda, expit
Compute the cumulative distribution function (CDF) of the Tukey lambda
distribution at several ``x`` values for `lmbda` = -1.5.
>>> x = np.linspace(-2, 2, 9)
>>> x
array([-2. , -1.5, -1. , -0.5, 0. , 0.5, 1. , 1.5, 2. ])
>>> tklmbda(x, -1.5)
array([0.34688734, 0.3786554 , 0.41528805, 0.45629737, 0.5 ,
0.54370263, 0.58471195, 0.6213446 , 0.65311266])
When `lmbda` is 0, the function is the logistic sigmoid function,
which is implemented in `scipy.special` as `expit`.
>>> tklmbda(x, 0)
array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,
0.62245933, 0.73105858, 0.81757448, 0.88079708])
>>> expit(x)
array([0.11920292, 0.18242552, 0.26894142, 0.37754067, 0.5 ,
0.62245933, 0.73105858, 0.81757448, 0.88079708])
When `lmbda` is 1, the Tukey lambda distribution is uniform on the
interval [-1, 1], so the CDF increases linearly.
>>> t = np.linspace(-1, 1, 9)
>>> tklmbda(t, 1)
array([0. , 0.125, 0.25 , 0.375, 0.5 , 0.625, 0.75 , 0.875, 1. ])
In the following, we generate plots for several values of `lmbda`.
The first figure shows graphs for `lmbda` <= 0.
>>> styles = ['-', '-.', '--', ':']
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-12, 12, 500)
>>> for k, lmbda in enumerate([-1.0, -0.5, 0.0]):
... y = tklmbda(x, lmbda)
... ax.plot(x, y, styles[k], label=f'$\lambda$ = {lmbda:-4.1f}')
>>> ax.set_title('tklmbda(x, $\lambda$)')
>>> ax.set_label('x')
>>> ax.legend(framealpha=1, shadow=True)
>>> ax.grid(True)
The second figure shows graphs for `lmbda` > 0. The dots in the
graphs show the bounds of the support of the distribution.
>>> fig, ax = plt.subplots()
>>> x = np.linspace(-4.2, 4.2, 500)
>>> lmbdas = [0.25, 0.5, 1.0, 1.5]
>>> for k, lmbda in enumerate(lmbdas):
... y = tklmbda(x, lmbda)
... ax.plot(x, y, styles[k], label=f'$\lambda$ = {lmbda}')
>>> ax.set_prop_cycle(None)
>>> for lmbda in lmbdas:
... ax.plot([-1/lmbda, 1/lmbda], [0, 1], '.', ms=8)
>>> ax.set_title('tklmbda(x, $\lambda$)')
>>> ax.set_xlabel('x')
>>> ax.legend(framealpha=1, shadow=True)
>>> ax.grid(True)
>>> plt.tight_layout()
>>> plt.show()
The CDF of the Tukey lambda distribution is also implemented as the
``cdf`` method of `scipy.stats.tukeylambda`. In the following,
``tukeylambda.cdf(x, -0.5)`` and ``tklmbda(x, -0.5)`` compute the
same values:
>>> from scipy.stats import tukeylambda
>>> x = np.linspace(-2, 2, 9)
>>> tukeylambda.cdf(x, -0.5)
array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,
0.58671839, 0.66458323, 0.72906142, 0.78004843])
>>> tklmbda(x, -0.5)
array([0.21995157, 0.27093858, 0.33541677, 0.41328161, 0.5 ,
0.58671839, 0.66458323, 0.72906142, 0.78004843])
The implementation in ``tukeylambda`` also provides location and scale
parameters, and other methods such as ``pdf()`` (the probability
density function) and ``ppf()`` (the inverse of the CDF), so for
working with the Tukey lambda distribution, ``tukeylambda`` is more
generally useful. The primary advantage of ``tklmbda`` is that it is
significantly faster than ``tukeylambda.cdf``.
""")
add_newdoc("wofz",
"""
wofz(z, out=None)
Faddeeva function
Returns the value of the Faddeeva function for complex argument::
exp(-z**2) * erfc(-i*z)
Parameters
----------
z : array_like
complex argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Value of the Faddeeva function
See Also
--------
dawsn, erf, erfc, erfcx, erfi
References
----------
.. [1] Steven G. Johnson, Faddeeva W function implementation.
http://ab-initio.mit.edu/Faddeeva
Examples
--------
>>> import numpy as np
>>> from scipy import special
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-3, 3)
>>> z = special.wofz(x)
>>> plt.plot(x, z.real, label='wofz(x).real')
>>> plt.plot(x, z.imag, label='wofz(x).imag')
>>> plt.xlabel('$x$')
>>> plt.legend(framealpha=1, shadow=True)
>>> plt.grid(alpha=0.25)
>>> plt.show()
""")
add_newdoc("xlogy",
"""
xlogy(x, y, out=None)
Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
z : scalar or ndarray
Computed x*log(y)
Notes
-----
The log function used in the computation is the natural log.
.. versionadded:: 0.13.0
Examples
--------
We can use this function to calculate the binary logistic loss also
known as the binary cross entropy. This loss function is used for
binary classification problems and is defined as:
.. math::
L = 1/n * \\sum_{i=0}^n -(y_i*log(y\\_pred_i) + (1-y_i)*log(1-y\\_pred_i))
We can define the parameters `x` and `y` as y and y_pred respectively.
y is the array of the actual labels which over here can be either 0 or 1.
y_pred is the array of the predicted probabilities with respect to
the positive class (1).
>>> import numpy as np
>>> from scipy.special import xlogy
>>> y = np.array([0, 1, 0, 1, 1, 0])
>>> y_pred = np.array([0.3, 0.8, 0.4, 0.7, 0.9, 0.2])
>>> n = len(y)
>>> loss = -(xlogy(y, y_pred) + xlogy(1 - y, 1 - y_pred)).sum()
>>> loss /= n
>>> loss
0.29597052165495025
A lower loss is usually better as it indicates that the predictions are
similar to the actual labels. In this example since our predicted
probabilties are close to the actual labels, we get an overall loss
that is reasonably low and appropriate.
""")
add_newdoc("xlog1py",
"""
xlog1py(x, y, out=None)
Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
Parameters
----------
x : array_like
Multiplier
y : array_like
Argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
z : scalar or ndarray
Computed x*log1p(y)
Notes
-----
.. versionadded:: 0.13.0
Examples
--------
This example shows how the function can be used to calculate the log of
the probability mass function for a geometric discrete random variable.
The probability mass function of the geometric distribution is defined
as follows:
.. math:: f(k) = (1-p)^{k-1} p
where :math:`p` is the probability of a single success
and :math:`1-p` is the probability of a single failure
and :math:`k` is the number of trials to get the first success.
>>> import numpy as np
>>> from scipy.special import xlog1py
>>> p = 0.5
>>> k = 100
>>> _pmf = np.power(1 - p, k - 1) * p
>>> _pmf
7.888609052210118e-31
If we take k as a relatively large number the value of the probability
mass function can become very low. In such cases taking the log of the
pmf would be more suitable as the log function can change the values
to a scale that is more appropriate to work with.
>>> _log_pmf = xlog1py(k - 1, -p) + np.log(p)
>>> _log_pmf
-69.31471805599453
We can confirm that we get a value close to the original pmf value by
taking the exponential of the log pmf.
>>> _orig_pmf = np.exp(_log_pmf)
>>> np.isclose(_pmf, _orig_pmf)
True
""")
add_newdoc("y0",
r"""
y0(x, out=None)
Bessel function of the second kind of order 0.
Parameters
----------
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
Y : scalar or ndarray
Value of the Bessel function of the second kind of order 0 at `x`.
Notes
-----
The domain is divided into the intervals [0, 5] and (5, infinity). In the
first interval a rational approximation :math:`R(x)` is employed to
compute,
.. math::
Y_0(x) = R(x) + \frac{2 \log(x) J_0(x)}{\pi},
where :math:`J_0` is the Bessel function of the first kind of order 0.
In the second interval, the Hankel asymptotic expansion is employed with
two rational functions of degree 6/6 and 7/7.
This function is a wrapper for the Cephes [1]_ routine `y0`.
See also
--------
j0: Bessel function of the first kind of order 0
yv: Bessel function of the first kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import y0
>>> y0(1.)
0.08825696421567697
Calculate at several points:
>>> import numpy as np
>>> y0(np.array([0.5, 2., 3.]))
array([-0.44451873, 0.51037567, 0.37685001])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = y0(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("y1",
"""
y1(x, out=None)
Bessel function of the second kind of order 1.
Parameters
----------
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
Y : scalar or ndarray
Value of the Bessel function of the second kind of order 1 at `x`.
Notes
-----
The domain is divided into the intervals [0, 8] and (8, infinity). In the
first interval a 25 term Chebyshev expansion is used, and computing
:math:`J_1` (the Bessel function of the first kind) is required. In the
second, the asymptotic trigonometric representation is employed using two
rational functions of degree 5/5.
This function is a wrapper for the Cephes [1]_ routine `y1`.
See also
--------
j1: Bessel function of the first kind of order 1
yn: Bessel function of the second kind
yv: Bessel function of the second kind
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Calculate the function at one point:
>>> from scipy.special import y1
>>> y1(1.)
-0.7812128213002888
Calculate at several points:
>>> import numpy as np
>>> y1(np.array([0.5, 2., 3.]))
array([-1.47147239, -0.10703243, 0.32467442])
Plot the function from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> y = y1(x)
>>> ax.plot(x, y)
>>> plt.show()
""")
add_newdoc("yn",
r"""
yn(n, x, out=None)
Bessel function of the second kind of integer order and real argument.
Parameters
----------
n : array_like
Order (integer).
x : array_like
Argument (float).
out : ndarray, optional
Optional output array for the function results
Returns
-------
Y : scalar or ndarray
Value of the Bessel function, :math:`Y_n(x)`.
Notes
-----
Wrapper for the Cephes [1]_ routine `yn`.
The function is evaluated by forward recurrence on `n`, starting with
values computed by the Cephes routines `y0` and `y1`. If `n = 0` or 1,
the routine for `y0` or `y1` is called directly.
See also
--------
yv : For real order and real or complex argument.
y0: faster implementation of this function for order 0
y1: faster implementation of this function for order 1
References
----------
.. [1] Cephes Mathematical Functions Library,
http://www.netlib.org/cephes/
Examples
--------
Evaluate the function of order 0 at one point.
>>> from scipy.special import yn
>>> yn(0, 1.)
0.08825696421567697
Evaluate the function at one point for different orders.
>>> yn(0, 1.), yn(1, 1.), yn(2, 1.)
(0.08825696421567697, -0.7812128213002888, -1.6506826068162546)
The evaluation for different orders can be carried out in one call by
providing a list or NumPy array as argument for the `v` parameter:
>>> yn([0, 1, 2], 1.)
array([ 0.08825696, -0.78121282, -1.65068261])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 3., 8.])
>>> yn(0, points)
array([-0.44451873, 0.37685001, 0.22352149])
If `z` is an array, the order parameter `v` must be broadcastable to
the correct shape if different orders shall be computed in one call.
To calculate the orders 0 and 1 for an 1D array:
>>> orders = np.array([[0], [1]])
>>> orders.shape
(2, 1)
>>> yn(orders, points)
array([[-0.44451873, 0.37685001, 0.22352149],
[-1.47147239, 0.32467442, -0.15806046]])
Plot the functions of order 0 to 3 from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> for i in range(4):
... ax.plot(x, yn(i, x), label=f'$Y_{i!r}$')
>>> ax.set_ylim(-3, 1)
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("yv",
r"""
yv(v, z, out=None)
Bessel function of the second kind of real order and complex argument.
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function results
Returns
-------
Y : scalar or ndarray
Value of the Bessel function of the second kind, :math:`Y_v(x)`.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
See also
--------
yve : :math:`Y_v` with leading exponential behavior stripped off.
y0: faster implementation of this function for order 0
y1: faster implementation of this function for order 1
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Evaluate the function of order 0 at one point.
>>> from scipy.special import yv
>>> yv(0, 1.)
0.088256964215677
Evaluate the function at one point for different orders.
>>> yv(0, 1.), yv(1, 1.), yv(1.5, 1.)
(0.088256964215677, -0.7812128213002889, -1.102495575160179)
The evaluation for different orders can be carried out in one call by
providing a list or NumPy array as argument for the `v` parameter:
>>> yv([0, 1, 1.5], 1.)
array([ 0.08825696, -0.78121282, -1.10249558])
Evaluate the function at several points for order 0 by providing an
array for `z`.
>>> import numpy as np
>>> points = np.array([0.5, 3., 8.])
>>> yv(0, points)
array([-0.44451873, 0.37685001, 0.22352149])
If `z` is an array, the order parameter `v` must be broadcastable to
the correct shape if different orders shall be computed in one call.
To calculate the orders 0 and 1 for an 1D array:
>>> orders = np.array([[0], [1]])
>>> orders.shape
(2, 1)
>>> yv(orders, points)
array([[-0.44451873, 0.37685001, 0.22352149],
[-1.47147239, 0.32467442, -0.15806046]])
Plot the functions of order 0 to 3 from 0 to 10.
>>> import matplotlib.pyplot as plt
>>> fig, ax = plt.subplots()
>>> x = np.linspace(0., 10., 1000)
>>> for i in range(4):
... ax.plot(x, yv(i, x), label=f'$Y_{i!r}$')
>>> ax.set_ylim(-3, 1)
>>> ax.legend()
>>> plt.show()
""")
add_newdoc("yve",
r"""
yve(v, z, out=None)
Exponentially scaled Bessel function of the second kind of real order.
Returns the exponentially scaled Bessel function of the second
kind of real order `v` at complex `z`::
yve(v, z) = yv(v, z) * exp(-abs(z.imag))
Parameters
----------
v : array_like
Order (float).
z : array_like
Argument (float or complex).
out : ndarray, optional
Optional output array for the function results
Returns
-------
Y : scalar or ndarray
Value of the exponentially scaled Bessel function.
See Also
--------
yv: Unscaled Bessel function of the second kind of real order.
Notes
-----
For positive `v` values, the computation is carried out using the
AMOS [1]_ `zbesy` routine, which exploits the connection to the Hankel
Bessel functions :math:`H_v^{(1)}` and :math:`H_v^{(2)}`,
.. math:: Y_v(z) = \frac{1}{2\imath} (H_v^{(1)} - H_v^{(2)}).
For negative `v` values the formula,
.. math:: Y_{-v}(z) = Y_v(z) \cos(\pi v) + J_v(z) \sin(\pi v)
is used, where :math:`J_v(z)` is the Bessel function of the first kind,
computed using the AMOS routine `zbesj`. Note that the second term is
exactly zero for integer `v`; to improve accuracy the second term is
explicitly omitted for `v` values such that `v = floor(v)`.
Exponentially scaled Bessel functions are useful for large `z`:
for these, the unscaled Bessel functions can easily under-or overflow.
References
----------
.. [1] Donald E. Amos, "AMOS, A Portable Package for Bessel Functions
of a Complex Argument and Nonnegative Order",
http://netlib.org/amos/
Examples
--------
Compare the output of `yv` and `yve` for large complex arguments for `z`
by computing their values for order ``v=1`` at ``z=1000j``. We see that
`yv` returns nan but `yve` returns a finite number:
>>> import numpy as np
>>> from scipy.special import yv, yve
>>> v = 1
>>> z = 1000j
>>> yv(v, z), yve(v, z)
((nan+nanj), (-0.012610930256928629+7.721967686709076e-19j))
For real arguments for `z`, `yve` returns the same as `yv` up to
floating point errors.
>>> v, z = 1, 1000
>>> yv(v, z), yve(v, z)
(-0.02478433129235178, -0.02478433129235179)
The function can be evaluated for several orders at the same time by
providing a list or NumPy array for `v`:
>>> yve([1, 2, 3], 1j)
array([-0.20791042+0.14096627j, 0.38053618-0.04993878j,
0.00815531-1.66311097j])
In the same way, the function can be evaluated at several points in one
call by providing a list or NumPy array for `z`:
>>> yve(1, np.array([1j, 2j, 3j]))
array([-0.20791042+0.14096627j, -0.21526929+0.01205044j,
-0.19682671+0.00127278j])
It is also possible to evaluate several orders at several points
at the same time by providing arrays for `v` and `z` with
broadcasting compatible shapes. Compute `yve` for two different orders
`v` and three points `z` resulting in a 2x3 array.
>>> v = np.array([[1], [2]])
>>> z = np.array([3j, 4j, 5j])
>>> v.shape, z.shape
((2, 1), (3,))
>>> yve(v, z)
array([[-1.96826713e-01+1.27277544e-03j, -1.78750840e-01+1.45558819e-04j,
-1.63972267e-01+1.73494110e-05j],
[1.94960056e-03-1.11782545e-01j, 2.02902325e-04-1.17626501e-01j,
2.27727687e-05-1.17951906e-01j]])
""")
add_newdoc("_zeta",
"""
_zeta(x, q)
Internal function, Hurwitz zeta.
""")
add_newdoc("zetac",
"""
zetac(x, out=None)
Riemann zeta function minus 1.
This function is defined as
.. math:: \\zeta(x) = \\sum_{k=2}^{\\infty} 1 / k^x,
where ``x > 1``. For ``x < 1`` the analytic continuation is
computed. For more information on the Riemann zeta function, see
[dlmf]_.
Parameters
----------
x : array_like of float
Values at which to compute zeta(x) - 1 (must be real).
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Values of zeta(x) - 1.
See Also
--------
zeta
Examples
--------
>>> import numpy as np
>>> from scipy.special import zetac, zeta
Some special values:
>>> zetac(2), np.pi**2/6 - 1
(0.64493406684822641, 0.6449340668482264)
>>> zetac(-1), -1.0/12 - 1
(-1.0833333333333333, -1.0833333333333333)
Compare ``zetac(x)`` to ``zeta(x) - 1`` for large `x`:
>>> zetac(60), zeta(60) - 1
(8.673617380119933e-19, 0.0)
References
----------
.. [dlmf] NIST Digital Library of Mathematical Functions
https://dlmf.nist.gov/25
""")
add_newdoc("_riemann_zeta",
"""
Internal function, use `zeta` instead.
""")
add_newdoc("_struve_asymp_large_z",
"""
_struve_asymp_large_z(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using asymptotic expansion
Returns
-------
v, err
""")
add_newdoc("_struve_power_series",
"""
_struve_power_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using power series
Returns
-------
v, err
""")
add_newdoc("_struve_bessel_series",
"""
_struve_bessel_series(v, z, is_h)
Internal function for testing `struve` & `modstruve`
Evaluates using Bessel function series
Returns
-------
v, err
""")
add_newdoc("_spherical_jn",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_jn_d",
"""
Internal function, use `spherical_jn` instead.
""")
add_newdoc("_spherical_yn",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_yn_d",
"""
Internal function, use `spherical_yn` instead.
""")
add_newdoc("_spherical_in",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_in_d",
"""
Internal function, use `spherical_in` instead.
""")
add_newdoc("_spherical_kn",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("_spherical_kn_d",
"""
Internal function, use `spherical_kn` instead.
""")
add_newdoc("loggamma",
r"""
loggamma(z, out=None)
Principal branch of the logarithm of the gamma function.
Defined to be :math:`\log(\Gamma(x))` for :math:`x > 0` and
extended to the complex plane by analytic continuation. The
function has a single branch cut on the negative real axis.
.. versionadded:: 0.18.0
Parameters
----------
z : array_like
Values in the complex plane at which to compute ``loggamma``
out : ndarray, optional
Output array for computed values of ``loggamma``
Returns
-------
loggamma : scalar or ndarray
Values of ``loggamma`` at z.
Notes
-----
It is not generally true that :math:`\log\Gamma(z) =
\log(\Gamma(z))`, though the real parts of the functions do
agree. The benefit of not defining `loggamma` as
:math:`\log(\Gamma(z))` is that the latter function has a
complicated branch cut structure whereas `loggamma` is analytic
except for on the negative real axis.
The identities
.. math::
\exp(\log\Gamma(z)) &= \Gamma(z) \\
\log\Gamma(z + 1) &= \log(z) + \log\Gamma(z)
make `loggamma` useful for working in complex logspace.
On the real line `loggamma` is related to `gammaln` via
``exp(loggamma(x + 0j)) = gammasgn(x)*exp(gammaln(x))``, up to
rounding error.
The implementation here is based on [hare1997]_.
See also
--------
gammaln : logarithm of the absolute value of the gamma function
gammasgn : sign of the gamma function
References
----------
.. [hare1997] D.E.G. Hare,
*Computing the Principal Branch of log-Gamma*,
Journal of Algorithms, Volume 25, Issue 2, November 1997, pages 221-236.
""")
add_newdoc("_sinpi",
"""
Internal function, do not use.
""")
add_newdoc("_cospi",
"""
Internal function, do not use.
""")
add_newdoc("owens_t",
"""
owens_t(h, a, out=None)
Owen's T Function.
The function T(h, a) gives the probability of the event
(X > h and 0 < Y < a * X) where X and Y are independent
standard normal random variables.
Parameters
----------
h: array_like
Input value.
a: array_like
Input value.
out : ndarray, optional
Optional output array for the function results
Returns
-------
t: scalar or ndarray
Probability of the event (X > h and 0 < Y < a * X),
where X and Y are independent standard normal random variables.
Examples
--------
>>> from scipy import special
>>> a = 3.5
>>> h = 0.78
>>> special.owens_t(h, a)
0.10877216734852274
References
----------
.. [1] M. Patefield and D. Tandy, "Fast and accurate calculation of
Owen's T Function", Statistical Software vol. 5, pp. 1-25, 2000.
""")
add_newdoc("_factorial",
"""
Internal function, do not use.
""")
add_newdoc("wright_bessel",
r"""
wright_bessel(a, b, x, out=None)
Wright's generalized Bessel function.
Wright's generalized Bessel function is an entire function and defined as
.. math:: \Phi(a, b; x) = \sum_{k=0}^\infty \frac{x^k}{k! \Gamma(a k + b)}
See also [1].
Parameters
----------
a : array_like of float
a >= 0
b : array_like of float
b >= 0
x : array_like of float
x >= 0
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Value of the Wright's generalized Bessel function
Notes
-----
Due to the compexity of the function with its three parameters, only
non-negative arguments are implemented.
Examples
--------
>>> from scipy.special import wright_bessel
>>> a, b, x = 1.5, 1.1, 2.5
>>> wright_bessel(a, b-1, x)
4.5314465939443025
Now, let us verify the relation
.. math:: \Phi(a, b-1; x) = a x \Phi(a, b+a; x) + (b-1) \Phi(a, b; x)
>>> a * x * wright_bessel(a, b+a, x) + (b-1) * wright_bessel(a, b, x)
4.5314465939443025
References
----------
.. [1] Digital Library of Mathematical Functions, 10.46.
https://dlmf.nist.gov/10.46.E1
""")
add_newdoc("ndtri_exp",
r"""
ndtri_exp(y, out=None)
Inverse of `log_ndtr` vs x. Allows for greater precision than
`ndtri` composed with `numpy.exp` for very small values of y and for
y close to 0.
Parameters
----------
y : array_like of float
Function argument
out : ndarray, optional
Optional output array for the function results
Returns
-------
scalar or ndarray
Inverse of the log CDF of the standard normal distribution, evaluated
at y.
Examples
--------
>>> import numpy as np
>>> import scipy.special as sc
`ndtri_exp` agrees with the naive implementation when the latter does
not suffer from underflow.
>>> sc.ndtri_exp(-1)
-0.33747496376420244
>>> sc.ndtri(np.exp(-1))
-0.33747496376420244
For extreme values of y, the naive approach fails
>>> sc.ndtri(np.exp(-800))
-inf
>>> sc.ndtri(np.exp(-1e-20))
inf
whereas `ndtri_exp` is still able to compute the result to high precision.
>>> sc.ndtri_exp(-800)
-39.88469483825668
>>> sc.ndtri_exp(-1e-20)
9.262340089798409
See Also
--------
log_ndtr, ndtri, ndtr
""")
| 391,328
| 26.350363
| 194
|
py
|
scipy
|
scipy-main/scipy/special/sf_error.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
import warnings
from . import _sf_error
__all__ = [ # noqa: F822
'SpecialFunctionWarning',
'SpecialFunctionError'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.sf_error is deprecated and has no attribute "
f"{name}. Try looking in scipy.special instead.")
warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
"the `scipy.special.sf_error` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_sf_error, name)
| 792
| 26.344828
| 77
|
py
|
scipy
|
scipy-main/scipy/special/_spherical_bessel.py
|
from ._ufuncs import (_spherical_jn, _spherical_yn, _spherical_in,
_spherical_kn, _spherical_jn_d, _spherical_yn_d,
_spherical_in_d, _spherical_kn_d)
def spherical_jn(n, z, derivative=False):
r"""Spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: j_n(z) = \sqrt{\frac{\pi}{2z}} J_{n + 1/2}(z),
where :math:`J_n` is the Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
jn : ndarray
Notes
-----
For real arguments greater than the order, the function is computed
using the ascending recurrence [2]_. For small real or complex
arguments, the definitional relation to the cylindrical Bessel function
of the first kind is used.
The derivative is computed using the relations [3]_,
.. math::
j_n'(z) = j_{n-1}(z) - \frac{n + 1}{z} j_n(z).
j_0'(z) = -j_1(z)
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E3
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The spherical Bessel functions of the first kind :math:`j_n` accept
both real and complex second argument. They can return a complex type:
>>> from scipy.special import spherical_jn
>>> spherical_jn(0, 3+5j)
(-9.878987731663194-8.021894345786002j)
>>> type(spherical_jn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_jn(3, x, True),
... spherical_jn(2, x) - 4/x * spherical_jn(3, x))
True
The first few :math:`j_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 10.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-0.5, 1.5)
>>> ax.set_title(r'Spherical Bessel functions $j_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_jn(n, x), label=rf'$j_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_jn_d(n, z)
else:
return _spherical_jn(n, z)
def spherical_yn(n, z, derivative=False):
r"""Spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: y_n(z) = \sqrt{\frac{\pi}{2z}} Y_{n + 1/2}(z),
where :math:`Y_n` is the Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
yn : ndarray
Notes
-----
For real arguments, the function is computed using the ascending
recurrence [2]_. For complex arguments, the definitional relation to
the cylindrical Bessel function of the second kind is used.
The derivative is computed using the relations [3]_,
.. math::
y_n' = y_{n-1} - \frac{n + 1}{z} y_n.
y_0' = -y_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E4
.. [2] https://dlmf.nist.gov/10.51.E1
.. [3] https://dlmf.nist.gov/10.51.E2
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The spherical Bessel functions of the second kind :math:`y_n` accept
both real and complex second argument. They can return a complex type:
>>> from scipy.special import spherical_yn
>>> spherical_yn(0, 3+5j)
(8.022343088587197-9.880052589376795j)
>>> type(spherical_yn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_yn(3, x, True),
... spherical_yn(2, x) - 4/x * spherical_yn(3, x))
True
The first few :math:`y_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 10.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-2.0, 1.0)
>>> ax.set_title(r'Spherical Bessel functions $y_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_yn(n, x), label=rf'$y_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_yn_d(n, z)
else:
return _spherical_yn(n, z)
def spherical_in(n, z, derivative=False):
r"""Modified spherical Bessel function of the first kind or its derivative.
Defined as [1]_,
.. math:: i_n(z) = \sqrt{\frac{\pi}{2z}} I_{n + 1/2}(z),
where :math:`I_n` is the modified Bessel function of the first kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
in : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the first kind.
The derivative is computed using the relations [2]_,
.. math::
i_n' = i_{n-1} - \frac{n + 1}{z} i_n.
i_1' = i_0
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E7
.. [2] https://dlmf.nist.gov/10.51.E5
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The modified spherical Bessel functions of the first kind :math:`i_n`
accept both real and complex second argument.
They can return a complex type:
>>> from scipy.special import spherical_in
>>> spherical_in(0, 3+5j)
(-1.1689867793369182-1.2697305267234222j)
>>> type(spherical_in(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_in(3, x, True),
... spherical_in(2, x) - 4/x * spherical_in(3, x))
True
The first few :math:`i_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 6.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(-0.5, 5.0)
>>> ax.set_title(r'Modified spherical Bessel functions $i_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_in(n, x), label=rf'$i_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_in_d(n, z)
else:
return _spherical_in(n, z)
def spherical_kn(n, z, derivative=False):
r"""Modified spherical Bessel function of the second kind or its derivative.
Defined as [1]_,
.. math:: k_n(z) = \sqrt{\frac{\pi}{2z}} K_{n + 1/2}(z),
where :math:`K_n` is the modified Bessel function of the second kind.
Parameters
----------
n : int, array_like
Order of the Bessel function (n >= 0).
z : complex or float, array_like
Argument of the Bessel function.
derivative : bool, optional
If True, the value of the derivative (rather than the function
itself) is returned.
Returns
-------
kn : ndarray
Notes
-----
The function is computed using its definitional relation to the
modified cylindrical Bessel function of the second kind.
The derivative is computed using the relations [2]_,
.. math::
k_n' = -k_{n-1} - \frac{n + 1}{z} k_n.
k_0' = -k_1
.. versionadded:: 0.18.0
References
----------
.. [1] https://dlmf.nist.gov/10.47.E9
.. [2] https://dlmf.nist.gov/10.51.E5
.. [AS] Milton Abramowitz and Irene A. Stegun, eds.
Handbook of Mathematical Functions with Formulas,
Graphs, and Mathematical Tables. New York: Dover, 1972.
Examples
--------
The modified spherical Bessel functions of the second kind :math:`k_n`
accept both real and complex second argument.
They can return a complex type:
>>> from scipy.special import spherical_kn
>>> spherical_kn(0, 3+5j)
(0.012985785614001561+0.003354691603137546j)
>>> type(spherical_kn(0, 3+5j))
<class 'numpy.complex128'>
We can verify the relation for the derivative from the Notes
for :math:`n=3` in the interval :math:`[1, 2]`:
>>> import numpy as np
>>> x = np.arange(1.0, 2.0, 0.01)
>>> np.allclose(spherical_kn(3, x, True),
... - 4/x * spherical_kn(3, x) - spherical_kn(2, x))
True
The first few :math:`k_n` with real argument:
>>> import matplotlib.pyplot as plt
>>> x = np.arange(0.0, 4.0, 0.01)
>>> fig, ax = plt.subplots()
>>> ax.set_ylim(0.0, 5.0)
>>> ax.set_title(r'Modified spherical Bessel functions $k_n$')
>>> for n in np.arange(0, 4):
... ax.plot(x, spherical_kn(n, x), label=rf'$k_{n}$')
>>> plt.legend(loc='best')
>>> plt.show()
"""
if derivative:
return _spherical_kn_d(n, z)
else:
return _spherical_kn(n, z)
| 10,217
| 28.194286
| 80
|
py
|
scipy
|
scipy-main/scipy/special/__init__.py
|
"""
========================================
Special functions (:mod:`scipy.special`)
========================================
.. currentmodule:: scipy.special
Almost all of the functions below accept NumPy arrays as input
arguments as well as single numbers. This means they follow
broadcasting and automatic array-looping rules. Technically,
they are `NumPy universal functions
<https://numpy.org/doc/stable/user/basics.ufuncs.html#ufuncs-basics>`_.
Functions which do not accept NumPy arrays are marked by a warning
in the section description.
.. seealso::
`scipy.special.cython_special` -- Typed Cython versions of special functions
Error handling
==============
Errors are handled by returning NaNs or other appropriate values.
Some of the special function routines can emit warnings or raise
exceptions when an error occurs. By default this is disabled; to
query and control the current error handling state the following
functions are provided.
.. autosummary::
:toctree: generated/
geterr -- Get the current way of handling special-function errors.
seterr -- Set how special-function errors are handled.
errstate -- Context manager for special-function error handling.
SpecialFunctionWarning -- Warning that can be emitted by special functions.
SpecialFunctionError -- Exception that can be raised by special functions.
Available functions
===================
Airy functions
--------------
.. autosummary::
:toctree: generated/
airy -- Airy functions and their derivatives.
airye -- Exponentially scaled Airy functions and their derivatives.
ai_zeros -- Compute `nt` zeros and values of the Airy function Ai and its derivative.
bi_zeros -- Compute `nt` zeros and values of the Airy function Bi and its derivative.
itairy -- Integrals of Airy functions
Elliptic functions and integrals
--------------------------------
.. autosummary::
:toctree: generated/
ellipj -- Jacobian elliptic functions.
ellipk -- Complete elliptic integral of the first kind.
ellipkm1 -- Complete elliptic integral of the first kind around `m` = 1.
ellipkinc -- Incomplete elliptic integral of the first kind.
ellipe -- Complete elliptic integral of the second kind.
ellipeinc -- Incomplete elliptic integral of the second kind.
elliprc -- Degenerate symmetric integral RC.
elliprd -- Symmetric elliptic integral of the second kind.
elliprf -- Completely-symmetric elliptic integral of the first kind.
elliprg -- Completely-symmetric elliptic integral of the second kind.
elliprj -- Symmetric elliptic integral of the third kind.
Bessel functions
----------------
.. autosummary::
:toctree: generated/
jv -- Bessel function of the first kind of real order and \
complex argument.
jve -- Exponentially scaled Bessel function of order `v`.
yn -- Bessel function of the second kind of integer order and \
real argument.
yv -- Bessel function of the second kind of real order and \
complex argument.
yve -- Exponentially scaled Bessel function of the second kind \
of real order.
kn -- Modified Bessel function of the second kind of integer \
order `n`
kv -- Modified Bessel function of the second kind of real order \
`v`
kve -- Exponentially scaled modified Bessel function of the \
second kind.
iv -- Modified Bessel function of the first kind of real order.
ive -- Exponentially scaled modified Bessel function of the \
first kind.
hankel1 -- Hankel function of the first kind.
hankel1e -- Exponentially scaled Hankel function of the first kind.
hankel2 -- Hankel function of the second kind.
hankel2e -- Exponentially scaled Hankel function of the second kind.
wright_bessel -- Wright's generalized Bessel function.
The following function does not accept NumPy arrays (it is not a
universal function):
.. autosummary::
:toctree: generated/
lmbda -- Jahnke-Emden Lambda function, Lambdav(x).
Zeros of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
jnjnp_zeros -- Compute zeros of integer-order Bessel functions Jn and Jn'.
jnyn_zeros -- Compute nt zeros of Bessel functions Jn(x), Jn'(x), Yn(x), and Yn'(x).
jn_zeros -- Compute zeros of integer-order Bessel function Jn(x).
jnp_zeros -- Compute zeros of integer-order Bessel function derivative Jn'(x).
yn_zeros -- Compute zeros of integer-order Bessel function Yn(x).
ynp_zeros -- Compute zeros of integer-order Bessel function derivative Yn'(x).
y0_zeros -- Compute nt zeros of Bessel function Y0(z), and derivative at each zero.
y1_zeros -- Compute nt zeros of Bessel function Y1(z), and derivative at each zero.
y1p_zeros -- Compute nt zeros of Bessel derivative Y1'(z), and value at each zero.
Faster versions of common Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
j0 -- Bessel function of the first kind of order 0.
j1 -- Bessel function of the first kind of order 1.
y0 -- Bessel function of the second kind of order 0.
y1 -- Bessel function of the second kind of order 1.
i0 -- Modified Bessel function of order 0.
i0e -- Exponentially scaled modified Bessel function of order 0.
i1 -- Modified Bessel function of order 1.
i1e -- Exponentially scaled modified Bessel function of order 1.
k0 -- Modified Bessel function of the second kind of order 0, :math:`K_0`.
k0e -- Exponentially scaled modified Bessel function K of order 0
k1 -- Modified Bessel function of the second kind of order 1, :math:`K_1(x)`.
k1e -- Exponentially scaled modified Bessel function K of order 1.
Integrals of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
itj0y0 -- Integrals of Bessel functions of order 0.
it2j0y0 -- Integrals related to Bessel functions of order 0.
iti0k0 -- Integrals of modified Bessel functions of order 0.
it2i0k0 -- Integrals related to modified Bessel functions of order 0.
besselpoly -- Weighted integral of a Bessel function.
Derivatives of Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
jvp -- Compute nth derivative of Bessel function Jv(z) with respect to `z`.
yvp -- Compute nth derivative of Bessel function Yv(z) with respect to `z`.
kvp -- Compute nth derivative of real-order modified Bessel function Kv(z)
ivp -- Compute nth derivative of modified Bessel function Iv(z) with respect to `z`.
h1vp -- Compute nth derivative of Hankel function H1v(z) with respect to `z`.
h2vp -- Compute nth derivative of Hankel function H2v(z) with respect to `z`.
Spherical Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
spherical_jn -- Spherical Bessel function of the first kind or its derivative.
spherical_yn -- Spherical Bessel function of the second kind or its derivative.
spherical_in -- Modified spherical Bessel function of the first kind or its derivative.
spherical_kn -- Modified spherical Bessel function of the second kind or its derivative.
Riccati-Bessel functions
^^^^^^^^^^^^^^^^^^^^^^^^
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
riccati_jn -- Compute Ricatti-Bessel function of the first kind and its derivative.
riccati_yn -- Compute Ricatti-Bessel function of the second kind and its derivative.
Struve functions
----------------
.. autosummary::
:toctree: generated/
struve -- Struve function.
modstruve -- Modified Struve function.
itstruve0 -- Integral of the Struve function of order 0.
it2struve0 -- Integral related to the Struve function of order 0.
itmodstruve0 -- Integral of the modified Struve function of order 0.
Raw statistical functions
-------------------------
.. seealso:: :mod:`scipy.stats`: Friendly versions of these functions.
Binomial distribution
^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
bdtr -- Binomial distribution cumulative distribution function.
bdtrc -- Binomial distribution survival function.
bdtri -- Inverse function to `bdtr` with respect to `p`.
bdtrik -- Inverse function to `bdtr` with respect to `k`.
bdtrin -- Inverse function to `bdtr` with respect to `n`.
Beta distribution
^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
btdtr -- Cumulative distribution function of the beta distribution.
btdtri -- The `p`-th quantile of the beta distribution.
btdtria -- Inverse of `btdtr` with respect to `a`.
btdtrib -- btdtria(a, p, x).
F distribution
^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
fdtr -- F cumulative distribution function.
fdtrc -- F survival function.
fdtri -- The `p`-th quantile of the F-distribution.
fdtridfd -- Inverse to `fdtr` vs dfd.
Gamma distribution
^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
gdtr -- Gamma distribution cumulative distribution function.
gdtrc -- Gamma distribution survival function.
gdtria -- Inverse of `gdtr` vs a.
gdtrib -- Inverse of `gdtr` vs b.
gdtrix -- Inverse of `gdtr` vs x.
Negative binomial distribution
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
nbdtr -- Negative binomial cumulative distribution function.
nbdtrc -- Negative binomial survival function.
nbdtri -- Inverse of `nbdtr` vs `p`.
nbdtrik -- Inverse of `nbdtr` vs `k`.
nbdtrin -- Inverse of `nbdtr` vs `n`.
Noncentral F distribution
^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
ncfdtr -- Cumulative distribution function of the non-central F distribution.
ncfdtridfd -- Calculate degrees of freedom (denominator) for the noncentral F-distribution.
ncfdtridfn -- Calculate degrees of freedom (numerator) for the noncentral F-distribution.
ncfdtri -- Inverse cumulative distribution function of the non-central F distribution.
ncfdtrinc -- Calculate non-centrality parameter for non-central F distribution.
Noncentral t distribution
^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
nctdtr -- Cumulative distribution function of the non-central `t` distribution.
nctdtridf -- Calculate degrees of freedom for non-central t distribution.
nctdtrit -- Inverse cumulative distribution function of the non-central t distribution.
nctdtrinc -- Calculate non-centrality parameter for non-central t distribution.
Normal distribution
^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
nrdtrimn -- Calculate mean of normal distribution given other params.
nrdtrisd -- Calculate standard deviation of normal distribution given other params.
ndtr -- Normal cumulative distribution function.
log_ndtr -- Logarithm of normal cumulative distribution function.
ndtri -- Inverse of `ndtr` vs x.
ndtri_exp -- Inverse of `log_ndtr` vs x.
Poisson distribution
^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
pdtr -- Poisson cumulative distribution function.
pdtrc -- Poisson survival function.
pdtri -- Inverse to `pdtr` vs m.
pdtrik -- Inverse to `pdtr` vs k.
Student t distribution
^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
stdtr -- Student t distribution cumulative distribution function.
stdtridf -- Inverse of `stdtr` vs df.
stdtrit -- Inverse of `stdtr` vs `t`.
Chi square distribution
^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
chdtr -- Chi square cumulative distribution function.
chdtrc -- Chi square survival function.
chdtri -- Inverse to `chdtrc`.
chdtriv -- Inverse to `chdtr` vs `v`.
Non-central chi square distribution
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
chndtr -- Non-central chi square cumulative distribution function.
chndtridf -- Inverse to `chndtr` vs `df`.
chndtrinc -- Inverse to `chndtr` vs `nc`.
chndtrix -- Inverse to `chndtr` vs `x`.
Kolmogorov distribution
^^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
smirnov -- Kolmogorov-Smirnov complementary cumulative distribution function.
smirnovi -- Inverse to `smirnov`.
kolmogorov -- Complementary cumulative distribution function of Kolmogorov distribution.
kolmogi -- Inverse function to `kolmogorov`.
Box-Cox transformation
^^^^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
boxcox -- Compute the Box-Cox transformation.
boxcox1p -- Compute the Box-Cox transformation of 1 + `x`.
inv_boxcox -- Compute the inverse of the Box-Cox transformation.
inv_boxcox1p -- Compute the inverse of the Box-Cox transformation.
Sigmoidal functions
^^^^^^^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
logit -- Logit ufunc for ndarrays.
expit -- Logistic sigmoid function.
log_expit -- Logarithm of the logistic sigmoid function.
Miscellaneous
^^^^^^^^^^^^^
.. autosummary::
:toctree: generated/
tklmbda -- Tukey-Lambda cumulative distribution function.
owens_t -- Owen's T Function.
Information Theory functions
----------------------------
.. autosummary::
:toctree: generated/
entr -- Elementwise function for computing entropy.
rel_entr -- Elementwise function for computing relative entropy.
kl_div -- Elementwise function for computing Kullback-Leibler divergence.
huber -- Huber loss function.
pseudo_huber -- Pseudo-Huber loss function.
Gamma and related functions
---------------------------
.. autosummary::
:toctree: generated/
gamma -- Gamma function.
gammaln -- Logarithm of the absolute value of the Gamma function for real inputs.
loggamma -- Principal branch of the logarithm of the Gamma function.
gammasgn -- Sign of the gamma function.
gammainc -- Regularized lower incomplete gamma function.
gammaincinv -- Inverse to `gammainc`.
gammaincc -- Regularized upper incomplete gamma function.
gammainccinv -- Inverse to `gammaincc`.
beta -- Beta function.
betaln -- Natural logarithm of absolute value of beta function.
betainc -- Incomplete beta integral.
betaincinv -- Inverse function to beta integral.
psi -- The digamma function.
rgamma -- Gamma function inverted.
polygamma -- Polygamma function n.
multigammaln -- Returns the log of multivariate gamma, also sometimes called the generalized gamma.
digamma -- psi(x[, out]).
poch -- Rising factorial (z)_m.
Error function and Fresnel integrals
------------------------------------
.. autosummary::
:toctree: generated/
erf -- Returns the error function of complex argument.
erfc -- Complementary error function, ``1 - erf(x)``.
erfcx -- Scaled complementary error function, ``exp(x**2) * erfc(x)``.
erfi -- Imaginary error function, ``-i erf(i z)``.
erfinv -- Inverse function for erf.
erfcinv -- Inverse function for erfc.
wofz -- Faddeeva function.
dawsn -- Dawson's integral.
fresnel -- Fresnel sin and cos integrals.
fresnel_zeros -- Compute nt complex zeros of sine and cosine Fresnel integrals S(z) and C(z).
modfresnelp -- Modified Fresnel positive integrals.
modfresnelm -- Modified Fresnel negative integrals.
voigt_profile -- Voigt profile.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
erf_zeros -- Compute nt complex zeros of error function erf(z).
fresnelc_zeros -- Compute nt complex zeros of cosine Fresnel integral C(z).
fresnels_zeros -- Compute nt complex zeros of sine Fresnel integral S(z).
Legendre functions
------------------
.. autosummary::
:toctree: generated/
lpmv -- Associated Legendre function of integer order and real degree.
sph_harm -- Compute spherical harmonics.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
clpmn -- Associated Legendre function of the first kind for complex arguments.
lpn -- Legendre function of the first kind.
lqn -- Legendre function of the second kind.
lpmn -- Sequence of associated Legendre functions of the first kind.
lqmn -- Sequence of associated Legendre functions of the second kind.
Ellipsoidal harmonics
---------------------
.. autosummary::
:toctree: generated/
ellip_harm -- Ellipsoidal harmonic functions E^p_n(l).
ellip_harm_2 -- Ellipsoidal harmonic functions F^p_n(l).
ellip_normal -- Ellipsoidal harmonic normalization constants gamma^p_n.
Orthogonal polynomials
----------------------
The following functions evaluate values of orthogonal polynomials:
.. autosummary::
:toctree: generated/
assoc_laguerre -- Compute the generalized (associated) Laguerre polynomial of degree n and order k.
eval_legendre -- Evaluate Legendre polynomial at a point.
eval_chebyt -- Evaluate Chebyshev polynomial of the first kind at a point.
eval_chebyu -- Evaluate Chebyshev polynomial of the second kind at a point.
eval_chebyc -- Evaluate Chebyshev polynomial of the first kind on [-2, 2] at a point.
eval_chebys -- Evaluate Chebyshev polynomial of the second kind on [-2, 2] at a point.
eval_jacobi -- Evaluate Jacobi polynomial at a point.
eval_laguerre -- Evaluate Laguerre polynomial at a point.
eval_genlaguerre -- Evaluate generalized Laguerre polynomial at a point.
eval_hermite -- Evaluate physicist's Hermite polynomial at a point.
eval_hermitenorm -- Evaluate probabilist's (normalized) Hermite polynomial at a point.
eval_gegenbauer -- Evaluate Gegenbauer polynomial at a point.
eval_sh_legendre -- Evaluate shifted Legendre polynomial at a point.
eval_sh_chebyt -- Evaluate shifted Chebyshev polynomial of the first kind at a point.
eval_sh_chebyu -- Evaluate shifted Chebyshev polynomial of the second kind at a point.
eval_sh_jacobi -- Evaluate shifted Jacobi polynomial at a point.
The following functions compute roots and quadrature weights for
orthogonal polynomials:
.. autosummary::
:toctree: generated/
roots_legendre -- Gauss-Legendre quadrature.
roots_chebyt -- Gauss-Chebyshev (first kind) quadrature.
roots_chebyu -- Gauss-Chebyshev (second kind) quadrature.
roots_chebyc -- Gauss-Chebyshev (first kind) quadrature.
roots_chebys -- Gauss-Chebyshev (second kind) quadrature.
roots_jacobi -- Gauss-Jacobi quadrature.
roots_laguerre -- Gauss-Laguerre quadrature.
roots_genlaguerre -- Gauss-generalized Laguerre quadrature.
roots_hermite -- Gauss-Hermite (physicst's) quadrature.
roots_hermitenorm -- Gauss-Hermite (statistician's) quadrature.
roots_gegenbauer -- Gauss-Gegenbauer quadrature.
roots_sh_legendre -- Gauss-Legendre (shifted) quadrature.
roots_sh_chebyt -- Gauss-Chebyshev (first kind, shifted) quadrature.
roots_sh_chebyu -- Gauss-Chebyshev (second kind, shifted) quadrature.
roots_sh_jacobi -- Gauss-Jacobi (shifted) quadrature.
The functions below, in turn, return the polynomial coefficients in
``orthopoly1d`` objects, which function similarly as `numpy.poly1d`.
The ``orthopoly1d`` class also has an attribute ``weights``, which returns
the roots, weights, and total weights for the appropriate form of Gaussian
quadrature. These are returned in an ``n x 3`` array with roots in the first
column, weights in the second column, and total weights in the final column.
Note that ``orthopoly1d`` objects are converted to `~numpy.poly1d` when doing
arithmetic, and lose information of the original orthogonal polynomial.
.. autosummary::
:toctree: generated/
legendre -- Legendre polynomial.
chebyt -- Chebyshev polynomial of the first kind.
chebyu -- Chebyshev polynomial of the second kind.
chebyc -- Chebyshev polynomial of the first kind on :math:`[-2, 2]`.
chebys -- Chebyshev polynomial of the second kind on :math:`[-2, 2]`.
jacobi -- Jacobi polynomial.
laguerre -- Laguerre polynomial.
genlaguerre -- Generalized (associated) Laguerre polynomial.
hermite -- Physicist's Hermite polynomial.
hermitenorm -- Normalized (probabilist's) Hermite polynomial.
gegenbauer -- Gegenbauer (ultraspherical) polynomial.
sh_legendre -- Shifted Legendre polynomial.
sh_chebyt -- Shifted Chebyshev polynomial of the first kind.
sh_chebyu -- Shifted Chebyshev polynomial of the second kind.
sh_jacobi -- Shifted Jacobi polynomial.
.. warning::
Computing values of high-order polynomials (around ``order > 20``) using
polynomial coefficients is numerically unstable. To evaluate polynomial
values, the ``eval_*`` functions should be used instead.
Hypergeometric functions
------------------------
.. autosummary::
:toctree: generated/
hyp2f1 -- Gauss hypergeometric function 2F1(a, b; c; z).
hyp1f1 -- Confluent hypergeometric function 1F1(a, b; x).
hyperu -- Confluent hypergeometric function U(a, b, x) of the second kind.
hyp0f1 -- Confluent hypergeometric limit function 0F1.
Parabolic cylinder functions
----------------------------
.. autosummary::
:toctree: generated/
pbdv -- Parabolic cylinder function D.
pbvv -- Parabolic cylinder function V.
pbwa -- Parabolic cylinder function W.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
pbdv_seq -- Parabolic cylinder functions Dv(x) and derivatives.
pbvv_seq -- Parabolic cylinder functions Vv(x) and derivatives.
pbdn_seq -- Parabolic cylinder functions Dn(z) and derivatives.
Mathieu and related functions
-----------------------------
.. autosummary::
:toctree: generated/
mathieu_a -- Characteristic value of even Mathieu functions.
mathieu_b -- Characteristic value of odd Mathieu functions.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
mathieu_even_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
mathieu_odd_coef -- Fourier coefficients for even Mathieu and modified Mathieu functions.
The following return both function and first derivative:
.. autosummary::
:toctree: generated/
mathieu_cem -- Even Mathieu function and its derivative.
mathieu_sem -- Odd Mathieu function and its derivative.
mathieu_modcem1 -- Even modified Mathieu function of the first kind and its derivative.
mathieu_modcem2 -- Even modified Mathieu function of the second kind and its derivative.
mathieu_modsem1 -- Odd modified Mathieu function of the first kind and its derivative.
mathieu_modsem2 -- Odd modified Mathieu function of the second kind and its derivative.
Spheroidal wave functions
-------------------------
.. autosummary::
:toctree: generated/
pro_ang1 -- Prolate spheroidal angular function of the first kind and its derivative.
pro_rad1 -- Prolate spheroidal radial function of the first kind and its derivative.
pro_rad2 -- Prolate spheroidal radial function of the secon kind and its derivative.
obl_ang1 -- Oblate spheroidal angular function of the first kind and its derivative.
obl_rad1 -- Oblate spheroidal radial function of the first kind and its derivative.
obl_rad2 -- Oblate spheroidal radial function of the second kind and its derivative.
pro_cv -- Characteristic value of prolate spheroidal function.
obl_cv -- Characteristic value of oblate spheroidal function.
pro_cv_seq -- Characteristic values for prolate spheroidal wave functions.
obl_cv_seq -- Characteristic values for oblate spheroidal wave functions.
The following functions require pre-computed characteristic value:
.. autosummary::
:toctree: generated/
pro_ang1_cv -- Prolate spheroidal angular function pro_ang1 for precomputed characteristic value.
pro_rad1_cv -- Prolate spheroidal radial function pro_rad1 for precomputed characteristic value.
pro_rad2_cv -- Prolate spheroidal radial function pro_rad2 for precomputed characteristic value.
obl_ang1_cv -- Oblate spheroidal angular function obl_ang1 for precomputed characteristic value.
obl_rad1_cv -- Oblate spheroidal radial function obl_rad1 for precomputed characteristic value.
obl_rad2_cv -- Oblate spheroidal radial function obl_rad2 for precomputed characteristic value.
Kelvin functions
----------------
.. autosummary::
:toctree: generated/
kelvin -- Kelvin functions as complex numbers.
kelvin_zeros -- Compute nt zeros of all Kelvin functions.
ber -- Kelvin function ber.
bei -- Kelvin function bei
berp -- Derivative of the Kelvin function `ber`.
beip -- Derivative of the Kelvin function `bei`.
ker -- Kelvin function ker.
kei -- Kelvin function ker.
kerp -- Derivative of the Kelvin function ker.
keip -- Derivative of the Kelvin function kei.
The following functions do not accept NumPy arrays (they are not
universal functions):
.. autosummary::
:toctree: generated/
ber_zeros -- Compute nt zeros of the Kelvin function ber(x).
bei_zeros -- Compute nt zeros of the Kelvin function bei(x).
berp_zeros -- Compute nt zeros of the Kelvin function ber'(x).
beip_zeros -- Compute nt zeros of the Kelvin function bei'(x).
ker_zeros -- Compute nt zeros of the Kelvin function ker(x).
kei_zeros -- Compute nt zeros of the Kelvin function kei(x).
kerp_zeros -- Compute nt zeros of the Kelvin function ker'(x).
keip_zeros -- Compute nt zeros of the Kelvin function kei'(x).
Combinatorics
-------------
.. autosummary::
:toctree: generated/
comb -- The number of combinations of N things taken k at a time.
perm -- Permutations of N things taken k at a time, i.e., k-permutations of N.
Lambert W and related functions
-------------------------------
.. autosummary::
:toctree: generated/
lambertw -- Lambert W function.
wrightomega -- Wright Omega function.
Other special functions
-----------------------
.. autosummary::
:toctree: generated/
agm -- Arithmetic, Geometric Mean.
bernoulli -- Bernoulli numbers B0..Bn (inclusive).
binom -- Binomial coefficient
diric -- Periodic sinc function, also called the Dirichlet function.
euler -- Euler numbers E0..En (inclusive).
expn -- Exponential integral E_n.
exp1 -- Exponential integral E_1 of complex argument z.
expi -- Exponential integral Ei.
factorial -- The factorial of a number or array of numbers.
factorial2 -- Double factorial.
factorialk -- Multifactorial of n of order k, n(!!...!).
shichi -- Hyperbolic sine and cosine integrals.
sici -- Sine and cosine integrals.
softmax -- Softmax function.
log_softmax -- Logarithm of softmax function.
spence -- Spence's function, also known as the dilogarithm.
zeta -- Riemann zeta function.
zetac -- Riemann zeta function minus 1.
Convenience functions
---------------------
.. autosummary::
:toctree: generated/
cbrt -- Cube root of `x`.
exp10 -- 10**x.
exp2 -- 2**x.
radian -- Convert from degrees to radians.
cosdg -- Cosine of the angle `x` given in degrees.
sindg -- Sine of angle given in degrees.
tandg -- Tangent of angle x given in degrees.
cotdg -- Cotangent of the angle `x` given in degrees.
log1p -- Calculates log(1+x) for use when `x` is near zero.
expm1 -- ``exp(x) - 1`` for use when `x` is near zero.
cosm1 -- ``cos(x) - 1`` for use when `x` is near zero.
powm1 -- ``x**y - 1`` for use when `y` is near zero or `x` is near 1.
round -- Round to nearest integer.
xlogy -- Compute ``x*log(y)`` so that the result is 0 if ``x = 0``.
xlog1py -- Compute ``x*log1p(y)`` so that the result is 0 if ``x = 0``.
logsumexp -- Compute the log of the sum of exponentials of input elements.
exprel -- Relative error exponential, (exp(x)-1)/x, for use when `x` is near zero.
sinc -- Return the sinc function.
"""
from ._sf_error import SpecialFunctionWarning, SpecialFunctionError
from . import _ufuncs
from ._ufuncs import *
from . import _basic
from ._basic import *
from ._logsumexp import logsumexp, softmax, log_softmax
from . import _orthogonal
from ._orthogonal import *
from ._spfun_stats import multigammaln
from ._ellip_harm import (
ellip_harm,
ellip_harm_2,
ellip_normal
)
from ._lambertw import lambertw
from ._spherical_bessel import (
spherical_jn,
spherical_yn,
spherical_in,
spherical_kn
)
# Deprecated namespaces, to be removed in v2.0.0
from . import add_newdocs, basic, orthogonal, specfun, sf_error, spfun_stats
__all__ = _ufuncs.__all__ + _basic.__all__ + _orthogonal.__all__ + [
'SpecialFunctionWarning',
'SpecialFunctionError',
'logsumexp',
'softmax',
'log_softmax',
'multigammaln',
'ellip_harm',
'ellip_harm_2',
'ellip_normal',
'lambertw',
'spherical_jn',
'spherical_yn',
'spherical_in',
'spherical_kn',
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
| 30,387
| 36.058537
| 104
|
py
|
scipy
|
scipy-main/scipy/special/_logsumexp.py
|
import numpy as np
from scipy._lib._util import _asarray_validated
__all__ = ["logsumexp", "softmax", "log_softmax"]
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
"""Compute the log of the sum of exponentials of input elements.
Parameters
----------
a : array_like
Input array.
axis : None or int or tuple of ints, optional
Axis or axes over which the sum is taken. By default `axis` is None,
and all elements are summed.
.. versionadded:: 0.11.0
b : array-like, optional
Scaling factor for exp(`a`) must be of the same shape as `a` or
broadcastable to `a`. These values may be negative in order to
implement subtraction.
.. versionadded:: 0.12.0
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the original array.
.. versionadded:: 0.15.0
return_sign : bool, optional
If this is set to True, the result will be a pair containing sign
information; if False, results that are negative will be returned
as NaN. Default is False (no sign information).
.. versionadded:: 0.16.0
Returns
-------
res : ndarray
The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
is returned.
sgn : ndarray
If return_sign is True, this will be an array of floating-point
numbers matching res and +1, 0, or -1 depending on the sign
of the result. If False, only one result is returned.
See Also
--------
numpy.logaddexp, numpy.logaddexp2
Notes
-----
NumPy has a logaddexp function which is very similar to `logsumexp`, but
only handles two arguments. `logaddexp.reduce` is similar to this
function, but may be less stable.
Examples
--------
>>> import numpy as np
>>> from scipy.special import logsumexp
>>> a = np.arange(10)
>>> logsumexp(a)
9.4586297444267107
>>> np.log(np.sum(np.exp(a)))
9.4586297444267107
With weights
>>> a = np.arange(10)
>>> b = np.arange(10, 0, -1)
>>> logsumexp(a, b=b)
9.9170178533034665
>>> np.log(np.sum(b*np.exp(a)))
9.9170178533034647
Returning a sign flag
>>> logsumexp([1,2],b=[1,-1],return_sign=True)
(1.5413248546129181, -1.0)
Notice that `logsumexp` does not directly support masked arrays. To use it
on a masked array, convert the mask into zero weights:
>>> a = np.ma.array([np.log(2), 2, np.log(3)],
... mask=[False, True, False])
>>> b = (~a.mask).astype(int)
>>> logsumexp(a.data, b=b), np.log(5)
1.6094379124341005, 1.6094379124341005
"""
a = _asarray_validated(a, check_finite=False)
if b is not None:
a, b = np.broadcast_arrays(a, b)
if np.any(b == 0):
a = a + 0. # promote to at least float
a[b == 0] = -np.inf
a_max = np.amax(a, axis=axis, keepdims=True)
if a_max.ndim > 0:
a_max[~np.isfinite(a_max)] = 0
elif not np.isfinite(a_max):
a_max = 0
if b is not None:
b = np.asarray(b)
tmp = b * np.exp(a - a_max)
else:
tmp = np.exp(a - a_max)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = np.sum(tmp, axis=axis, keepdims=keepdims)
if return_sign:
sgn = np.sign(s)
s *= sgn # /= makes more sense but we need zero -> zero
out = np.log(s)
if not keepdims:
a_max = np.squeeze(a_max, axis=axis)
out += a_max
if return_sign:
return out, sgn
else:
return out
def softmax(x, axis=None):
r"""Compute the softmax function.
The softmax function transforms each element of a collection by
computing the exponential of each element divided by the sum of the
exponentials of all the elements. That is, if `x` is a one-dimensional
numpy array::
softmax(x) = np.exp(x)/sum(np.exp(x))
Parameters
----------
x : array_like
Input array.
axis : int or tuple of ints, optional
Axis to compute values along. Default is None and softmax will be
computed over the entire array `x`.
Returns
-------
s : ndarray
An array the same shape as `x`. The result will sum to 1 along the
specified axis.
Notes
-----
The formula for the softmax function :math:`\sigma(x)` for a vector
:math:`x = \{x_0, x_1, ..., x_{n-1}\}` is
.. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}}
The `softmax` function is the gradient of `logsumexp`.
The implementation uses shifting to avoid overflow. See [1]_ for more
details.
.. versionadded:: 1.2.0
References
----------
.. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the
log-sum-exp and softmax functions", IMA Journal of Numerical Analysis,
Vol.41(4), :doi:`10.1093/imanum/draa038`.
Examples
--------
>>> import numpy as np
>>> from scipy.special import softmax
>>> np.set_printoptions(precision=5)
>>> x = np.array([[1, 0.5, 0.2, 3],
... [1, -1, 7, 3],
... [2, 12, 13, 3]])
...
Compute the softmax transformation over the entire array.
>>> m = softmax(x)
>>> m
array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05],
[ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05],
[ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]])
>>> m.sum()
1.0
Compute the softmax transformation along the first axis (i.e., the
columns).
>>> m = softmax(x, axis=0)
>>> m
array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01],
[ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01],
[ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]])
>>> m.sum(axis=0)
array([ 1., 1., 1., 1.])
Compute the softmax transformation along the second axis (i.e., the rows).
>>> m = softmax(x, axis=1)
>>> m
array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01],
[ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02],
[ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]])
>>> m.sum(axis=1)
array([ 1., 1., 1.])
"""
x = _asarray_validated(x, check_finite=False)
x_max = np.amax(x, axis=axis, keepdims=True)
exp_x_shifted = np.exp(x - x_max)
return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True)
def log_softmax(x, axis=None):
r"""Compute the logarithm of the softmax function.
In principle::
log_softmax(x) = log(softmax(x))
but using a more accurate implementation.
Parameters
----------
x : array_like
Input array.
axis : int or tuple of ints, optional
Axis to compute values along. Default is None and softmax will be
computed over the entire array `x`.
Returns
-------
s : ndarray or scalar
An array with the same shape as `x`. Exponential of the result will
sum to 1 along the specified axis. If `x` is a scalar, a scalar is
returned.
Notes
-----
`log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that
make `softmax` saturate (see examples below).
.. versionadded:: 1.5.0
Examples
--------
>>> import numpy as np
>>> from scipy.special import log_softmax
>>> from scipy.special import softmax
>>> np.set_printoptions(precision=5)
>>> x = np.array([1000.0, 1.0])
>>> y = log_softmax(x)
>>> y
array([ 0., -999.])
>>> with np.errstate(divide='ignore'):
... y = np.log(softmax(x))
...
>>> y
array([ 0., -inf])
"""
x = _asarray_validated(x, check_finite=False)
x_max = np.amax(x, axis=axis, keepdims=True)
if x_max.ndim > 0:
x_max[~np.isfinite(x_max)] = 0
elif not np.isfinite(x_max):
x_max = 0
tmp = x - x_max
exp_tmp = np.exp(tmp)
# suppress warnings about log of zero
with np.errstate(divide='ignore'):
s = np.sum(exp_tmp, axis=axis, keepdims=True)
out = np.log(s)
out = tmp - out
return out
| 8,523
| 27.508361
| 79
|
py
|
scipy
|
scipy-main/scipy/special/basic.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.special` namespace for importing the functions
# included below.
import warnings
from . import _basic
from ._ufuncs import (mathieu_a, mathieu_b, iv, jv, gamma,
psi, hankel1, hankel2, yv, kv)
__all__ = [ # noqa: F822
'ai_zeros',
'assoc_laguerre',
'bei_zeros',
'beip_zeros',
'ber_zeros',
'bernoulli',
'berp_zeros',
'bi_zeros',
'clpmn',
'comb',
'digamma',
'diric',
'erf_zeros',
'euler',
'factorial',
'factorial2',
'factorialk',
'fresnel_zeros',
'fresnelc_zeros',
'fresnels_zeros',
'gamma',
'h1vp',
'h2vp',
'hankel1',
'hankel2',
'iv',
'ivp',
'jn_zeros',
'jnjnp_zeros',
'jnp_zeros',
'jnyn_zeros',
'jv',
'jvp',
'kei_zeros',
'keip_zeros',
'kelvin_zeros',
'ker_zeros',
'kerp_zeros',
'kv',
'kvp',
'lmbda',
'lpmn',
'lpn',
'lqmn',
'lqn',
'mathieu_a',
'mathieu_b',
'mathieu_even_coef',
'mathieu_odd_coef',
'obl_cv_seq',
'pbdn_seq',
'pbdv_seq',
'pbvv_seq',
'perm',
'polygamma',
'pro_cv_seq',
'psi',
'riccati_jn',
'riccati_yn',
'sinc',
'y0_zeros',
'y1_zeros',
'y1p_zeros',
'yn_zeros',
'ynp_zeros',
'yv',
'yvp',
'zeta'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.special.basic is deprecated and has no attribute "
f"{name}. Try looking in scipy.special instead.")
warnings.warn(f"Please use `{name}` from the `scipy.special` namespace, "
"the `scipy.special.basic` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_basic, name)
| 1,896
| 18.357143
| 77
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_logit.py
|
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal,
assert_allclose)
from scipy.special import logit, expit, log_expit
class TestLogit:
def check_logit_out(self, dtype, expected):
a = np.linspace(0, 1, 10)
a = np.array(a, dtype=dtype)
with np.errstate(divide='ignore'):
actual = logit(a)
assert_almost_equal(actual, expected)
assert_equal(actual.dtype, np.dtype(dtype))
def test_float32(self):
expected = np.array([-np.inf, -2.07944155,
-1.25276291, -0.69314718,
-0.22314353, 0.22314365,
0.6931473, 1.25276303,
2.07944155, np.inf], dtype=np.float32)
self.check_logit_out('f4', expected)
def test_float64(self):
expected = np.array([-np.inf, -2.07944154,
-1.25276297, -0.69314718,
-0.22314355, 0.22314355,
0.69314718, 1.25276297,
2.07944154, np.inf])
self.check_logit_out('f8', expected)
def test_nan(self):
expected = np.array([np.nan]*4)
with np.errstate(invalid='ignore'):
actual = logit(np.array([-3., -2., 2., 3.]))
assert_equal(expected, actual)
class TestExpit:
def check_expit_out(self, dtype, expected):
a = np.linspace(-4, 4, 10)
a = np.array(a, dtype=dtype)
actual = expit(a)
assert_almost_equal(actual, expected)
assert_equal(actual.dtype, np.dtype(dtype))
def test_float32(self):
expected = np.array([0.01798621, 0.04265125,
0.09777259, 0.20860852,
0.39068246, 0.60931754,
0.79139149, 0.9022274,
0.95734876, 0.98201376], dtype=np.float32)
self.check_expit_out('f4', expected)
def test_float64(self):
expected = np.array([0.01798621, 0.04265125,
0.0977726, 0.20860853,
0.39068246, 0.60931754,
0.79139147, 0.9022274,
0.95734875, 0.98201379])
self.check_expit_out('f8', expected)
def test_large(self):
for dtype in (np.float32, np.float64, np.longdouble):
for n in (88, 89, 709, 710, 11356, 11357):
n = np.array(n, dtype=dtype)
assert_allclose(expit(n), 1.0, atol=1e-20)
assert_allclose(expit(-n), 0.0, atol=1e-20)
assert_equal(expit(n).dtype, dtype)
assert_equal(expit(-n).dtype, dtype)
class TestLogExpit:
def test_large_negative(self):
x = np.array([-10000.0, -750.0, -500.0, -35.0])
y = log_expit(x)
assert_equal(y, x)
def test_large_positive(self):
x = np.array([750.0, 1000.0, 10000.0])
y = log_expit(x)
# y will contain -0.0, and -0.0 is used in the expected value,
# but assert_equal does not check the sign of zeros, and I don't
# think the sign is an essential part of the test (i.e. it would
# probably be OK if log_expit(1000) returned 0.0 instead of -0.0).
assert_equal(y, np.array([-0.0, -0.0, -0.0]))
def test_basic_float64(self):
x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
0, 1e-9, 0.1, 1, 10, 100, 500, 710, 725, 735])
y = log_expit(x)
#
# Expected values were computed with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def mp_log_expit(x):
# return -mpmath.log1p(mpmath.exp(-x))
#
# expected = [float(mp_log_expit(t)) for t in x]
#
expected = [-32.000000000000014, -20.000000002061153,
-10.000045398899218, -3.048587351573742,
-1.3132616875182228, -0.7443966600735709,
-0.6931471810599453, -0.6931471805599453,
-0.6931471800599454, -0.6443966600735709,
-0.3132616875182228, -4.539889921686465e-05,
-3.720075976020836e-44, -7.124576406741286e-218,
-4.47628622567513e-309, -1.36930634e-315,
-6.217e-320]
# When tested locally, only one value in y was not exactly equal to
# expected. That was for x=1, and the y value differed from the
# expected by 1 ULP. For this test, however, I'll use rtol=1e-15.
assert_allclose(y, expected, rtol=1e-15)
def test_basic_float32(self):
x = np.array([-32, -20, -10, -3, -1, -0.1, -1e-9,
0, 1e-9, 0.1, 1, 10, 100], dtype=np.float32)
y = log_expit(x)
#
# Expected values were computed with mpmath:
#
# import mpmath
#
# mpmath.mp.dps = 100
#
# def mp_log_expit(x):
# return -mpmath.log1p(mpmath.exp(-x))
#
# expected = [np.float32(mp_log_expit(t)) for t in x]
#
expected = np.array([-32.0, -20.0, -10.000046, -3.0485873,
-1.3132616, -0.7443967, -0.6931472,
-0.6931472, -0.6931472, -0.64439666,
-0.3132617, -4.5398898e-05, -3.8e-44],
dtype=np.float32)
assert_allclose(y, expected, rtol=5e-7)
| 5,540
| 36.952055
| 75
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_precompute_gammainc.py
|
import pytest
from scipy.special._testutils import MissingModule, check_version
from scipy.special._mptestutils import (
Arg, IntArg, mp_assert_allclose, assert_mpmath_equal)
from scipy.special._precompute.gammainc_asy import (
compute_g, compute_alpha, compute_d)
from scipy.special._precompute.gammainc_data import gammainc, gammaincc
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
@check_version(mp, '0.19')
def test_g():
# Test data for the g_k. See DLMF 5.11.4.
with mp.workdps(30):
g = [mp.mpf(1), mp.mpf(1)/12, mp.mpf(1)/288,
-mp.mpf(139)/51840, -mp.mpf(571)/2488320,
mp.mpf(163879)/209018880, mp.mpf(5246819)/75246796800]
mp_assert_allclose(compute_g(7), g)
@pytest.mark.slow
@check_version(mp, '0.19')
@check_version(sympy, '0.7')
@pytest.mark.xfail_on_32bit("rtol only 2e-11, see gh-6938")
def test_alpha():
# Test data for the alpha_k. See DLMF 8.12.14.
with mp.workdps(30):
alpha = [mp.mpf(0), mp.mpf(1), mp.mpf(1)/3, mp.mpf(1)/36,
-mp.mpf(1)/270, mp.mpf(1)/4320, mp.mpf(1)/17010,
-mp.mpf(139)/5443200, mp.mpf(1)/204120]
mp_assert_allclose(compute_alpha(9), alpha)
@pytest.mark.xslow
@check_version(mp, '0.19')
@check_version(sympy, '0.7')
def test_d():
# Compare the d_{k, n} to the results in appendix F of [1].
#
# Sources
# -------
# [1] DiDonato and Morris, Computation of the Incomplete Gamma
# Function Ratios and their Inverse, ACM Transactions on
# Mathematical Software, 1986.
with mp.workdps(50):
dataset = [(0, 0, -mp.mpf('0.333333333333333333333333333333')),
(0, 12, mp.mpf('0.102618097842403080425739573227e-7')),
(1, 0, -mp.mpf('0.185185185185185185185185185185e-2')),
(1, 12, mp.mpf('0.119516285997781473243076536700e-7')),
(2, 0, mp.mpf('0.413359788359788359788359788360e-2')),
(2, 12, -mp.mpf('0.140925299108675210532930244154e-7')),
(3, 0, mp.mpf('0.649434156378600823045267489712e-3')),
(3, 12, -mp.mpf('0.191111684859736540606728140873e-7')),
(4, 0, -mp.mpf('0.861888290916711698604702719929e-3')),
(4, 12, mp.mpf('0.288658297427087836297341274604e-7')),
(5, 0, -mp.mpf('0.336798553366358150308767592718e-3')),
(5, 12, mp.mpf('0.482409670378941807563762631739e-7')),
(6, 0, mp.mpf('0.531307936463992223165748542978e-3')),
(6, 12, -mp.mpf('0.882860074633048352505085243179e-7')),
(7, 0, mp.mpf('0.344367606892377671254279625109e-3')),
(7, 12, -mp.mpf('0.175629733590604619378669693914e-6')),
(8, 0, -mp.mpf('0.652623918595309418922034919727e-3')),
(8, 12, mp.mpf('0.377358774161109793380344937299e-6')),
(9, 0, -mp.mpf('0.596761290192746250124390067179e-3')),
(9, 12, mp.mpf('0.870823417786464116761231237189e-6'))]
d = compute_d(10, 13)
res = [d[k][n] for k, n, std in dataset]
std = [x[2] for x in dataset]
mp_assert_allclose(res, std)
@check_version(mp, '0.19')
def test_gammainc():
# Quick check that the gammainc in
# special._precompute.gammainc_data agrees with mpmath's
# gammainc.
assert_mpmath_equal(gammainc,
lambda a, x: mp.gammainc(a, b=x, regularized=True),
[Arg(0, 100, inclusive_a=False), Arg(0, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=50)
@pytest.mark.xslow
@check_version(mp, '0.19')
def test_gammaincc():
# Check that the gammaincc in special._precompute.gammainc_data
# agrees with mpmath's gammainc.
assert_mpmath_equal(lambda a, x: gammaincc(a, x, dps=1000),
lambda a, x: mp.gammainc(a, a=x, regularized=True),
[Arg(20, 100), Arg(20, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=1000)
# Test the fast integer path
assert_mpmath_equal(gammaincc,
lambda a, x: mp.gammainc(a, a=x, regularized=True),
[IntArg(1, 100), Arg(0, 100)],
nan_ok=False, rtol=1e-17, n=50, dps=50)
| 4,459
| 39.917431
| 75
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_wright_bessel.py
|
# Reference MPMATH implementation:
#
# import mpmath
# from mpmath import nsum
#
# def Wright_Series_MPMATH(a, b, z, dps=50, method='r+s+e', steps=[1000]):
# """Compute Wright' generalized Bessel function as Series.
#
# This uses mpmath for arbitrary precision.
# """
# with mpmath.workdps(dps):
# res = nsum(lambda k: z**k/mpmath.fac(k) * mpmath.rgamma(a*k+b),
# [0, mpmath.inf],
# tol=dps, method=method, steps=steps
# )
#
# return res
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
from scipy.special import rgamma, wright_bessel
@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
def test_wright_bessel_zero(a, b):
"""Test at x = 0."""
assert_equal(wright_bessel(a, b, 0.), rgamma(b))
@pytest.mark.parametrize('b', [0, 1e-6, 0.1, 0.5, 1, 10])
@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1])
def test_wright_bessel_iv(b, x):
"""Test relation of wright_bessel and modified bessel function iv.
iv(z) = (1/2*z)**v * Phi(1, v+1; 1/4*z**2).
See https://dlmf.nist.gov/10.46.E2
"""
if x != 0:
v = b - 1
wb = wright_bessel(1, v + 1, x**2 / 4.)
# Note: iv(v, x) has precision of less than 1e-12 for some cases
# e.g v=1-1e-6 and x=1e-06)
assert_allclose(np.power(x / 2., v) * wb,
sc.iv(v, x),
rtol=1e-11, atol=1e-11)
@pytest.mark.parametrize('a', [0, 1e-6, 0.1, 0.5, 1, 10])
@pytest.mark.parametrize('b', [1, 1 + 1e-3, 2, 5, 10])
@pytest.mark.parametrize('x', [0, 1e-6, 0.1, 0.5, 1, 5, 10, 100])
def test_wright_functional(a, b, x):
"""Test functional relation of wright_bessel.
Phi(a, b-1, z) = a*z*Phi(a, b+a, z) + (b-1)*Phi(a, b, z)
Note that d/dx Phi(a, b, x) = Phi(a, b-1, x)
See Eq. (22) of
B. Stankovic, On the Function of E. M. Wright,
Publ. de l' Institut Mathematique, Beograd,
Nouvelle S`er. 10 (1970), 113-124.
"""
assert_allclose(wright_bessel(a, b - 1, x),
a * x * wright_bessel(a, b + a, x)
+ (b - 1) * wright_bessel(a, b, x),
rtol=1e-8, atol=1e-8)
# grid of rows [a, b, x, value, accuracy] that do not reach 1e-11 accuracy
# see output of:
# cd scipy/scipy/_precompute
# python wright_bessel_data.py
grid_a_b_x_value_acc = np.array([
[0.1, 100.0, 709.7827128933841, 8.026353022981087e+34, 2e-8],
[0.5, 10.0, 709.7827128933841, 2.680788404494657e+48, 9e-8],
[0.5, 10.0, 1000.0, 2.005901980702872e+64, 1e-8],
[0.5, 100.0, 1000.0, 3.4112367580445246e-117, 6e-8],
[1.0, 20.0, 100000.0, 1.7717158630699857e+225, 3e-11],
[1.0, 100.0, 100000.0, 1.0269334596230763e+22, np.nan],
[1.0000000000000222, 20.0, 100000.0, 1.7717158630001672e+225, 3e-11],
[1.0000000000000222, 100.0, 100000.0, 1.0269334595866202e+22, np.nan],
[1.5, 0.0, 500.0, 15648961196.432373, 3e-11],
[1.5, 2.220446049250313e-14, 500.0, 15648961196.431465, 3e-11],
[1.5, 1e-10, 500.0, 15648961192.344728, 3e-11],
[1.5, 1e-05, 500.0, 15648552437.334162, 3e-11],
[1.5, 0.1, 500.0, 12049870581.10317, 2e-11],
[1.5, 20.0, 100000.0, 7.81930438331405e+43, 3e-9],
[1.5, 100.0, 100000.0, 9.653370857459075e-130, np.nan],
])
@pytest.mark.xfail
@pytest.mark.parametrize(
'a, b, x, phi',
grid_a_b_x_value_acc[:, :4].tolist())
def test_wright_data_grid_failures(a, b, x, phi):
"""Test cases of test_data that do not reach relative accuracy of 1e-11"""
assert_allclose(wright_bessel(a, b, x), phi, rtol=1e-11)
@pytest.mark.parametrize(
'a, b, x, phi, accuracy',
grid_a_b_x_value_acc.tolist())
def test_wright_data_grid_less_accurate(a, b, x, phi, accuracy):
"""Test cases of test_data that do not reach relative accuracy of 1e-11
Here we test for reduced accuracy or even nan.
"""
if np.isnan(accuracy):
assert np.isnan(wright_bessel(a, b, x))
else:
assert_allclose(wright_bessel(a, b, x), phi, rtol=accuracy)
| 4,155
| 34.827586
| 78
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_data.py
|
import os
import numpy as np
from numpy.testing import suppress_warnings
import pytest
from scipy.special import (
lpn, lpmn, lpmv, lqn, lqmn, sph_harm, eval_legendre, eval_hermite,
eval_laguerre, eval_genlaguerre, binom, cbrt, expm1, log1p, zeta,
jn, jv, jvp, yn, yv, yvp, iv, ivp, kn, kv, kvp,
gamma, gammaln, gammainc, gammaincc, gammaincinv, gammainccinv, digamma,
beta, betainc, betaincinv, poch,
ellipe, ellipeinc, ellipk, ellipkm1, ellipkinc,
elliprc, elliprd, elliprf, elliprg, elliprj,
erf, erfc, erfinv, erfcinv, exp1, expi, expn,
bdtrik, btdtr, btdtri, btdtria, btdtrib, chndtr, gdtr, gdtrc, gdtrix, gdtrib,
nbdtrik, pdtrik, owens_t,
mathieu_a, mathieu_b, mathieu_cem, mathieu_sem, mathieu_modcem1,
mathieu_modsem1, mathieu_modcem2, mathieu_modsem2,
ellip_harm, ellip_harm_2, spherical_jn, spherical_yn, wright_bessel
)
from scipy.integrate import IntegrationWarning
from scipy.special._testutils import FuncData
DATASETS_BOOST = np.load(os.path.join(os.path.dirname(__file__),
"data", "boost.npz"))
DATASETS_GSL = np.load(os.path.join(os.path.dirname(__file__),
"data", "gsl.npz"))
DATASETS_LOCAL = np.load(os.path.join(os.path.dirname(__file__),
"data", "local.npz"))
def data(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_BOOST[dataname], *a, **kw)
def data_gsl(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_GSL[dataname], *a, **kw)
def data_local(func, dataname, *a, **kw):
kw.setdefault('dataname', dataname)
return FuncData(func, DATASETS_LOCAL[dataname], *a, **kw)
def ellipk_(k):
return ellipk(k*k)
def ellipkinc_(f, k):
return ellipkinc(f, k*k)
def ellipe_(k):
return ellipe(k*k)
def ellipeinc_(f, k):
return ellipeinc(f, k*k)
def zeta_(x):
return zeta(x, 1.)
def assoc_legendre_p_boost_(nu, mu, x):
# the boost test data is for integer orders only
return lpmv(mu, nu.astype(int), x)
def legendre_p_via_assoc_(nu, x):
return lpmv(0, nu, x)
def lpn_(n, x):
return lpn(n.astype('l'), x)[0][-1]
def lqn_(n, x):
return lqn(n.astype('l'), x)[0][-1]
def legendre_p_via_lpmn(n, x):
return lpmn(0, n, x)[0][0,-1]
def legendre_q_via_lqmn(n, x):
return lqmn(0, n, x)[0][0,-1]
def mathieu_ce_rad(m, q, x):
return mathieu_cem(m, q, x*180/np.pi)[0]
def mathieu_se_rad(m, q, x):
return mathieu_sem(m, q, x*180/np.pi)[0]
def mathieu_mc1_scaled(m, q, x):
# GSL follows a different normalization.
# We follow Abramowitz & Stegun, they apparently something else.
return mathieu_modcem1(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_ms1_scaled(m, q, x):
return mathieu_modsem1(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_mc2_scaled(m, q, x):
return mathieu_modcem2(m, q, x)[0] * np.sqrt(np.pi/2)
def mathieu_ms2_scaled(m, q, x):
return mathieu_modsem2(m, q, x)[0] * np.sqrt(np.pi/2)
def eval_legendre_ld(n, x):
return eval_legendre(n.astype('l'), x)
def eval_legendre_dd(n, x):
return eval_legendre(n.astype('d'), x)
def eval_hermite_ld(n, x):
return eval_hermite(n.astype('l'), x)
def eval_laguerre_ld(n, x):
return eval_laguerre(n.astype('l'), x)
def eval_laguerre_dd(n, x):
return eval_laguerre(n.astype('d'), x)
def eval_genlaguerre_ldd(n, a, x):
return eval_genlaguerre(n.astype('l'), a, x)
def eval_genlaguerre_ddd(n, a, x):
return eval_genlaguerre(n.astype('d'), a, x)
def bdtrik_comp(y, n, p):
return bdtrik(1-y, n, p)
def btdtri_comp(a, b, p):
return btdtri(a, b, 1-p)
def btdtria_comp(p, b, x):
return btdtria(1-p, b, x)
def btdtrib_comp(a, p, x):
return btdtrib(a, 1-p, x)
def gdtr_(p, x):
return gdtr(1.0, p, x)
def gdtrc_(p, x):
return gdtrc(1.0, p, x)
def gdtrix_(b, p):
return gdtrix(1.0, b, p)
def gdtrix_comp(b, p):
return gdtrix(1.0, b, 1-p)
def gdtrib_(p, x):
return gdtrib(1.0, p, x)
def gdtrib_comp(p, x):
return gdtrib(1.0, 1-p, x)
def nbdtrik_comp(y, n, p):
return nbdtrik(1-y, n, p)
def pdtrik_comp(p, m):
return pdtrik(1-p, m)
def poch_(z, m):
return 1.0 / poch(z, m)
def poch_minus(z, m):
return 1.0 / poch(z, -m)
def spherical_jn_(n, x):
return spherical_jn(n.astype('l'), x)
def spherical_yn_(n, x):
return spherical_yn(n.astype('l'), x)
def sph_harm_(m, n, theta, phi):
y = sph_harm(m, n, theta, phi)
return (y.real, y.imag)
def cexpm1(x, y):
z = expm1(x + 1j*y)
return z.real, z.imag
def clog1p(x, y):
z = log1p(x + 1j*y)
return z.real, z.imag
BOOST_TESTS = [
data(assoc_legendre_p_boost_, 'assoc_legendre_p_ipp-assoc_legendre_p', (0,1,2), 3, rtol=1e-11),
data(legendre_p_via_assoc_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=1e-11),
data(legendre_p_via_assoc_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=9.6e-14),
data(legendre_p_via_lpmn, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
data(legendre_p_via_lpmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=9.6e-14, vectorized=False),
data(lpn_, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=5e-14, vectorized=False),
data(lpn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=3e-13, vectorized=False),
data(eval_legendre_ld, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=6e-14),
data(eval_legendre_ld, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
data(eval_legendre_dd, 'legendre_p_ipp-legendre_p', (0,1), 2, rtol=2e-14),
data(eval_legendre_dd, 'legendre_p_large_ipp-legendre_p_large', (0,1), 2, rtol=2e-13),
data(lqn_, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
data(lqn_, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
data(legendre_q_via_lqmn, 'legendre_p_ipp-legendre_p', (0,1), 3, rtol=2e-14, vectorized=False),
data(legendre_q_via_lqmn, 'legendre_p_large_ipp-legendre_p_large', (0,1), 3, rtol=2e-12, vectorized=False),
data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
data(beta, 'beta_exp_data_ipp-beta_exp_data', (0,1), 2, rtol=1e-13),
data(beta, 'beta_med_data_ipp-beta_med_data', (0,1), 2, rtol=5e-13),
data(betainc, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
data(betainc, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=5e-13),
data(betainc, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
data(betainc, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
data(betaincinv, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
data(btdtr, 'ibeta_small_data_ipp-ibeta_small_data', (0,1,2), 5, rtol=6e-15),
data(btdtr, 'ibeta_data_ipp-ibeta_data', (0,1,2), 5, rtol=4e-13),
data(btdtr, 'ibeta_int_data_ipp-ibeta_int_data', (0,1,2), 5, rtol=2e-14),
data(btdtr, 'ibeta_large_data_ipp-ibeta_large_data', (0,1,2), 5, rtol=4e-10),
data(btdtri, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 3, rtol=1e-5),
data(btdtri_comp, 'ibeta_inv_data_ipp-ibeta_inv_data', (0,1,2), 4, rtol=8e-7),
data(btdtria, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 3, rtol=5e-9),
data(btdtria_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (2,0,1), 4, rtol=5e-9),
data(btdtrib, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 5, rtol=5e-9),
data(btdtrib_comp, 'ibeta_inva_data_ipp-ibeta_inva_data', (0,2,1), 6, rtol=5e-9),
data(binom, 'binomial_data_ipp-binomial_data', (0,1), 2, rtol=1e-13),
data(binom, 'binomial_large_data_ipp-binomial_large_data', (0,1), 2, rtol=5e-13),
data(bdtrik, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 3, rtol=5e-9),
data(bdtrik_comp, 'binomial_quantile_ipp-binomial_quantile_data', (2,0,1), 4, rtol=5e-9),
data(nbdtrik, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 3, rtol=4e-9),
data(nbdtrik_comp, 'negative_binomial_quantile_ipp-negative_binomial_quantile_data', (2,0,1), 4, rtol=4e-9),
data(pdtrik, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 2, rtol=3e-9),
data(pdtrik_comp, 'poisson_quantile_ipp-poisson_quantile_data', (1,0), 3, rtol=4e-9),
data(cbrt, 'cbrt_data_ipp-cbrt_data', 1, 0),
data(digamma, 'digamma_data_ipp-digamma_data', 0, 1),
data(digamma, 'digamma_data_ipp-digamma_data', 0j, 1),
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0, 1, rtol=2e-13),
data(digamma, 'digamma_neg_data_ipp-digamma_neg_data', 0j, 1, rtol=1e-13),
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0, 1, rtol=1e-15),
data(digamma, 'digamma_root_data_ipp-digamma_root_data', 0j, 1, rtol=1e-15),
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0, 1, rtol=1e-15),
data(digamma, 'digamma_small_data_ipp-digamma_small_data', 0j, 1, rtol=1e-14),
data(ellipk_, 'ellint_k_data_ipp-ellint_k_data', 0, 1),
data(ellipkinc_, 'ellint_f_data_ipp-ellint_f_data', (0,1), 2, rtol=1e-14),
data(ellipe_, 'ellint_e_data_ipp-ellint_e_data', 0, 1),
data(ellipeinc_, 'ellint_e2_data_ipp-ellint_e2_data', (0,1), 2, rtol=1e-14),
data(erf, 'erf_data_ipp-erf_data', 0, 1),
data(erf, 'erf_data_ipp-erf_data', 0j, 1, rtol=1e-13),
data(erfc, 'erf_data_ipp-erf_data', 0, 2, rtol=6e-15),
data(erf, 'erf_large_data_ipp-erf_large_data', 0, 1),
data(erf, 'erf_large_data_ipp-erf_large_data', 0j, 1),
data(erfc, 'erf_large_data_ipp-erf_large_data', 0, 2, rtol=4e-14),
data(erf, 'erf_small_data_ipp-erf_small_data', 0, 1),
data(erf, 'erf_small_data_ipp-erf_small_data', 0j, 1, rtol=1e-13),
data(erfc, 'erf_small_data_ipp-erf_small_data', 0, 2),
data(erfinv, 'erf_inv_data_ipp-erf_inv_data', 0, 1),
data(erfcinv, 'erfc_inv_data_ipp-erfc_inv_data', 0, 1),
data(erfcinv, 'erfc_inv_big_data_ipp-erfc_inv_big_data', 0, 1, param_filter=(lambda s: s > 0)),
data(exp1, 'expint_1_data_ipp-expint_1_data', 1, 2, rtol=1e-13),
data(exp1, 'expint_1_data_ipp-expint_1_data', 1j, 2, rtol=5e-9),
data(expi, 'expinti_data_ipp-expinti_data', 0, 1, rtol=1e-13),
data(expi, 'expinti_data_double_ipp-expinti_data_double', 0, 1, rtol=1e-13),
data(expi, 'expinti_data_long_ipp-expinti_data_long', 0, 1),
data(expn, 'expint_small_data_ipp-expint_small_data', (0,1), 2),
data(expn, 'expint_data_ipp-expint_data', (0,1), 2, rtol=1e-14),
data(gamma, 'test_gamma_data_ipp-near_0', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_1', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_2', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_m10', 0, 1),
data(gamma, 'test_gamma_data_ipp-near_m55', 0, 1, rtol=7e-12),
data(gamma, 'test_gamma_data_ipp-factorials', 0, 1, rtol=4e-14),
data(gamma, 'test_gamma_data_ipp-near_0', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_1', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_2', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_m10', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-near_m55', 0j, 1, rtol=2e-9),
data(gamma, 'test_gamma_data_ipp-factorials', 0j, 1, rtol=2e-13),
data(gammaln, 'test_gamma_data_ipp-near_0', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_1', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_2', 0, 2, rtol=2e-10),
data(gammaln, 'test_gamma_data_ipp-near_m10', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-near_m55', 0, 2, rtol=5e-11),
data(gammaln, 'test_gamma_data_ipp-factorials', 0, 2),
data(gammainc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=5e-15),
data(gammainc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
data(gammainc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
data(gammainc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=1e-12),
data(gdtr_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 5, rtol=1e-13),
data(gdtr_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 5, rtol=2e-13),
data(gdtr_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 5, rtol=2e-13),
data(gdtr_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 5, rtol=2e-9),
data(gammaincc, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
data(gammaincc, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
data(gammaincc, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
data(gammaincc, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
data(gdtrc_, 'igamma_small_data_ipp-igamma_small_data', (0,1), 3, rtol=1e-13),
data(gdtrc_, 'igamma_med_data_ipp-igamma_med_data', (0,1), 3, rtol=2e-13),
data(gdtrc_, 'igamma_int_data_ipp-igamma_int_data', (0,1), 3, rtol=4e-14),
data(gdtrc_, 'igamma_big_data_ipp-igamma_big_data', (0,1), 3, rtol=1e-11),
data(gdtrib_, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 2, rtol=5e-9),
data(gdtrib_comp, 'igamma_inva_data_ipp-igamma_inva_data', (1,0), 3, rtol=5e-9),
data(poch_, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 2, rtol=2e-13),
data(poch_, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 2,),
data(poch_, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 2,),
data(poch_minus, 'tgamma_delta_ratio_data_ipp-tgamma_delta_ratio_data', (0,1), 3, rtol=2e-13),
data(poch_minus, 'tgamma_delta_ratio_int_ipp-tgamma_delta_ratio_int', (0,1), 3),
data(poch_minus, 'tgamma_delta_ratio_int2_ipp-tgamma_delta_ratio_int2', (0,1), 3),
data(eval_hermite_ld, 'hermite_ipp-hermite', (0,1), 2, rtol=2e-14),
data(eval_laguerre_ld, 'laguerre2_ipp-laguerre2', (0,1), 2, rtol=7e-12),
data(eval_laguerre_dd, 'laguerre2_ipp-laguerre2', (0,1), 2, knownfailure='hyp2f1 insufficiently accurate.'),
data(eval_genlaguerre_ldd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, rtol=2e-13),
data(eval_genlaguerre_ddd, 'laguerre3_ipp-laguerre3', (0,1,2), 3, knownfailure='hyp2f1 insufficiently accurate.'),
data(log1p, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 1),
data(expm1, 'log1p_expm1_data_ipp-log1p_expm1_data', 0, 2),
data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1), 2, rtol=1e-12),
data(iv, 'bessel_i_data_ipp-bessel_i_data', (0,1j), 2, rtol=2e-10, atol=1e-306),
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1), 2, rtol=1e-9),
data(iv, 'bessel_i_int_data_ipp-bessel_i_int_data', (0,1j), 2, rtol=2e-10),
data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', (0,1), 2, rtol=1.2e-13),
data(ivp, 'bessel_i_prime_int_data_ipp-bessel_i_prime_int_data', (0,1j), 2, rtol=1.2e-13, atol=1e-300),
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
data(jn, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1), 2, rtol=6e-11),
data(jn, 'bessel_j_large_data_ipp-bessel_j_large_data', (0,1j), 2, rtol=6e-11),
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1), 2, rtol=1e-12),
data(jv, 'bessel_j_int_data_ipp-bessel_j_int_data', (0,1j), 2, rtol=1e-12),
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1), 2, rtol=1e-12),
data(jv, 'bessel_j_data_ipp-bessel_j_data', (0,1j), 2, rtol=1e-12),
data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', (0,1), 2, rtol=1e-13),
data(jvp, 'bessel_j_prime_int_data_ipp-bessel_j_prime_int_data', (0,1j), 2, rtol=1e-13),
data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', (0,1), 2, rtol=1e-11),
data(jvp, 'bessel_j_prime_large_data_ipp-bessel_j_prime_large_data', (0,1j), 2, rtol=1e-11),
data(kn, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_int_data_ipp-bessel_k_int_data', (0,1j), 2, rtol=1e-12),
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1), 2, rtol=1e-12),
data(kv, 'bessel_k_data_ipp-bessel_k_data', (0,1j), 2, rtol=1e-12),
data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', (0,1), 2, rtol=3e-14),
data(kvp, 'bessel_k_prime_int_data_ipp-bessel_k_prime_int_data', (0,1j), 2, rtol=3e-14),
data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1), 2, rtol=7e-14),
data(kvp, 'bessel_k_prime_data_ipp-bessel_k_prime_data', (0,1j), 2, rtol=7e-14),
data(yn, 'bessel_y01_data_ipp-bessel_y01_data', (0,1), 2, rtol=1e-12),
data(yn, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1), 2, rtol=1e-12),
data(yv, 'bessel_yn_data_ipp-bessel_yn_data', (0,1j), 2, rtol=1e-12),
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1), 2, rtol=1e-10),
data(yv, 'bessel_yv_data_ipp-bessel_yv_data', (0,1j), 2, rtol=1e-10),
data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', (0, 1), 2, rtol=4e-9),
data(yvp, 'bessel_yv_prime_data_ipp-bessel_yv_prime_data', (0, 1j), 2, rtol=4e-9),
data(zeta_, 'zeta_data_ipp-zeta_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_neg_data_ipp-zeta_neg_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_1_up_data_ipp-zeta_1_up_data', 0, 1, param_filter=(lambda s: s > 1)),
data(zeta_, 'zeta_1_below_data_ipp-zeta_1_below_data', 0, 1, param_filter=(lambda s: s > 1)),
data(gammaincinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=1e-11),
data(gammaincinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=1e-14),
data(gammaincinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2, rtol=1e-11),
data(gammainccinv, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 3, rtol=1e-12),
data(gammainccinv, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=1e-14),
data(gammainccinv, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3, rtol=1e-14),
data(gdtrix_, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, rtol=3e-13, knownfailure='gdtrix unflow some points'),
data(gdtrix_, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 2, rtol=3e-15),
data(gdtrix_, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 2),
data(gdtrix_comp, 'gamma_inv_small_data_ipp-gamma_inv_small_data', (0,1), 2, knownfailure='gdtrix bad some points'),
data(gdtrix_comp, 'gamma_inv_data_ipp-gamma_inv_data', (0,1), 3, rtol=6e-15),
data(gdtrix_comp, 'gamma_inv_big_data_ipp-gamma_inv_big_data', (0,1), 3),
data(chndtr, 'nccs_ipp-nccs', (2,0,1), 3, rtol=3e-5),
data(chndtr, 'nccs_big_ipp-nccs_big', (2,0,1), 3, rtol=5e-4, knownfailure='chndtr inaccurate some points'),
data(sph_harm_, 'spherical_harmonic_ipp-spherical_harmonic', (1,0,3,2), (4,5), rtol=5e-11,
param_filter=(lambda p: np.ones(p.shape, '?'),
lambda p: np.ones(p.shape, '?'),
lambda p: np.logical_and(p < 2*np.pi, p >= 0),
lambda p: np.logical_and(p < np.pi, p >= 0))),
data(spherical_jn_, 'sph_bessel_data_ipp-sph_bessel_data', (0,1), 2, rtol=1e-13),
data(spherical_yn_, 'sph_neumann_data_ipp-sph_neumann_data', (0,1), 2, rtol=8e-15),
data(owens_t, 'owens_t_ipp-owens_t', (0, 1), 2, rtol=5e-14),
data(owens_t, 'owens_t_large_data_ipp-owens_t_large_data', (0, 1), 2, rtol=8e-12),
# -- test data exists in boost but is not used in scipy --
# ibeta_derivative_data_ipp/ibeta_derivative_data.txt
# ibeta_derivative_int_data_ipp/ibeta_derivative_int_data.txt
# ibeta_derivative_large_data_ipp/ibeta_derivative_large_data.txt
# ibeta_derivative_small_data_ipp/ibeta_derivative_small_data.txt
# bessel_y01_prime_data_ipp/bessel_y01_prime_data.txt
# bessel_yn_prime_data_ipp/bessel_yn_prime_data.txt
# sph_bessel_prime_data_ipp/sph_bessel_prime_data.txt
# sph_neumann_prime_data_ipp/sph_neumann_prime_data.txt
# ellint_d2_data_ipp/ellint_d2_data.txt
# ellint_d_data_ipp/ellint_d_data.txt
# ellint_pi2_data_ipp/ellint_pi2_data.txt
# ellint_pi3_data_ipp/ellint_pi3_data.txt
# ellint_pi3_large_data_ipp/ellint_pi3_large_data.txt
data(elliprc, 'ellint_rc_data_ipp-ellint_rc_data', (0, 1), 2,
rtol=5e-16),
data(elliprd, 'ellint_rd_data_ipp-ellint_rd_data', (0, 1, 2), 3,
rtol=5e-16),
data(elliprd, 'ellint_rd_0xy_ipp-ellint_rd_0xy', (0, 1, 2), 3,
rtol=5e-16),
data(elliprd, 'ellint_rd_0yy_ipp-ellint_rd_0yy', (0, 1, 2), 3,
rtol=5e-16),
data(elliprd, 'ellint_rd_xxx_ipp-ellint_rd_xxx', (0, 1, 2), 3,
rtol=5e-16),
# Some of the following rtol for elliprd may be larger than 5e-16 to
# work around some hard cases in the Boost test where we get slightly
# larger error than the ideal bound when the x (==y) input is close to
# zero.
# Also the accuracy on 32-bit buids with g++ may suffer from excess
# loss of precision; see GCC bugzilla 323
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=323
data(elliprd, 'ellint_rd_xxz_ipp-ellint_rd_xxz', (0, 1, 2), 3,
rtol=6.5e-16),
data(elliprd, 'ellint_rd_xyy_ipp-ellint_rd_xyy', (0, 1, 2), 3,
rtol=6e-16),
data(elliprf, 'ellint_rf_data_ipp-ellint_rf_data', (0, 1, 2), 3,
rtol=5e-16),
data(elliprf, 'ellint_rf_xxx_ipp-ellint_rf_xxx', (0, 1, 2), 3,
rtol=5e-16),
data(elliprf, 'ellint_rf_xyy_ipp-ellint_rf_xyy', (0, 1, 2), 3,
rtol=5e-16),
data(elliprf, 'ellint_rf_xy0_ipp-ellint_rf_xy0', (0, 1, 2), 3,
rtol=5e-16),
data(elliprf, 'ellint_rf_0yy_ipp-ellint_rf_0yy', (0, 1, 2), 3,
rtol=5e-16),
# The accuracy of R_G is primarily limited by R_D that is used
# internally. It is generally worse than R_D. Notice that we increased
# the rtol for R_G here. The cases with duplicate arguments are
# slightly less likely to be unbalanced (at least two arguments are
# already balanced) so the error bound is slightly better. Again,
# precision with g++ 32-bit is even worse.
data(elliprg, 'ellint_rg_ipp-ellint_rg', (0, 1, 2), 3,
rtol=8.0e-16),
data(elliprg, 'ellint_rg_xxx_ipp-ellint_rg_xxx', (0, 1, 2), 3,
rtol=6e-16),
data(elliprg, 'ellint_rg_xyy_ipp-ellint_rg_xyy', (0, 1, 2), 3,
rtol=7.5e-16),
data(elliprg, 'ellint_rg_xy0_ipp-ellint_rg_xy0', (0, 1, 2), 3,
rtol=5e-16),
data(elliprg, 'ellint_rg_00x_ipp-ellint_rg_00x', (0, 1, 2), 3,
rtol=5e-16),
data(elliprj, 'ellint_rj_data_ipp-ellint_rj_data', (0, 1, 2, 3), 4,
rtol=5e-16, atol=1e-25,
param_filter=(lambda s: s <= 5e-26,)),
# ellint_rc_data_ipp/ellint_rc_data.txt
# ellint_rd_0xy_ipp/ellint_rd_0xy.txt
# ellint_rd_0yy_ipp/ellint_rd_0yy.txt
# ellint_rd_data_ipp/ellint_rd_data.txt
# ellint_rd_xxx_ipp/ellint_rd_xxx.txt
# ellint_rd_xxz_ipp/ellint_rd_xxz.txt
# ellint_rd_xyy_ipp/ellint_rd_xyy.txt
# ellint_rf_0yy_ipp/ellint_rf_0yy.txt
# ellint_rf_data_ipp/ellint_rf_data.txt
# ellint_rf_xxx_ipp/ellint_rf_xxx.txt
# ellint_rf_xy0_ipp/ellint_rf_xy0.txt
# ellint_rf_xyy_ipp/ellint_rf_xyy.txt
# ellint_rg_00x_ipp/ellint_rg_00x.txt
# ellint_rg_ipp/ellint_rg.txt
# ellint_rg_xxx_ipp/ellint_rg_xxx.txt
# ellint_rg_xy0_ipp/ellint_rg_xy0.txt
# ellint_rg_xyy_ipp/ellint_rg_xyy.txt
# ellint_rj_data_ipp/ellint_rj_data.txt
# ellint_rj_e2_ipp/ellint_rj_e2.txt
# ellint_rj_e3_ipp/ellint_rj_e3.txt
# ellint_rj_e4_ipp/ellint_rj_e4.txt
# ellint_rj_zp_ipp/ellint_rj_zp.txt
# jacobi_elliptic_ipp/jacobi_elliptic.txt
# jacobi_elliptic_small_ipp/jacobi_elliptic_small.txt
# jacobi_large_phi_ipp/jacobi_large_phi.txt
# jacobi_near_1_ipp/jacobi_near_1.txt
# jacobi_zeta_big_phi_ipp/jacobi_zeta_big_phi.txt
# jacobi_zeta_data_ipp/jacobi_zeta_data.txt
# heuman_lambda_data_ipp/heuman_lambda_data.txt
# hypergeometric_0F2_ipp/hypergeometric_0F2.txt
# hypergeometric_1F1_big_ipp/hypergeometric_1F1_big.txt
# hypergeometric_1F1_ipp/hypergeometric_1F1.txt
# hypergeometric_1F1_small_random_ipp/hypergeometric_1F1_small_random.txt
# hypergeometric_1F2_ipp/hypergeometric_1F2.txt
# hypergeometric_1f1_large_regularized_ipp/hypergeometric_1f1_large_regularized.txt
# hypergeometric_1f1_log_large_unsolved_ipp/hypergeometric_1f1_log_large_unsolved.txt
# hypergeometric_2F0_half_ipp/hypergeometric_2F0_half.txt
# hypergeometric_2F0_integer_a2_ipp/hypergeometric_2F0_integer_a2.txt
# hypergeometric_2F0_ipp/hypergeometric_2F0.txt
# hypergeometric_2F0_large_z_ipp/hypergeometric_2F0_large_z.txt
# hypergeometric_2F1_ipp/hypergeometric_2F1.txt
# hypergeometric_2F2_ipp/hypergeometric_2F2.txt
# ncbeta_big_ipp/ncbeta_big.txt
# nct_small_delta_ipp/nct_small_delta.txt
# nct_asym_ipp/nct_asym.txt
# ncbeta_ipp/ncbeta.txt
# powm1_data_ipp/powm1_big_data.txt
# powm1_sqrtp1m1_test_hpp/sqrtp1m1_data.txt
# sinc_data_ipp/sinc_data.txt
# test_gamma_data_ipp/gammap1m1_data.txt
# tgamma_ratio_data_ipp/tgamma_ratio_data.txt
# trig_data_ipp/trig_data.txt
# trig_data2_ipp/trig_data2.txt
]
@pytest.mark.parametrize('test', BOOST_TESTS, ids=repr)
def test_boost(test):
_test_factory(test)
GSL_TESTS = [
data_gsl(mathieu_a, 'mathieu_ab', (0, 1), 2, rtol=1e-13, atol=1e-13),
data_gsl(mathieu_b, 'mathieu_ab', (0, 1), 3, rtol=1e-13, atol=1e-13),
# Also the GSL output has limited accuracy...
data_gsl(mathieu_ce_rad, 'mathieu_ce_se', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_se_rad, 'mathieu_ce_se', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_mc1_scaled, 'mathieu_mc_ms', (0, 1, 2), 3, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_ms1_scaled, 'mathieu_mc_ms', (0, 1, 2), 4, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_mc2_scaled, 'mathieu_mc_ms', (0, 1, 2), 5, rtol=1e-7, atol=1e-13),
data_gsl(mathieu_ms2_scaled, 'mathieu_mc_ms', (0, 1, 2), 6, rtol=1e-7, atol=1e-13),
]
@pytest.mark.parametrize('test', GSL_TESTS, ids=repr)
def test_gsl(test):
_test_factory(test)
LOCAL_TESTS = [
data_local(ellipkinc, 'ellipkinc_neg_m', (0, 1), 2),
data_local(ellipkm1, 'ellipkm1', 0, 1),
data_local(ellipeinc, 'ellipeinc_neg_m', (0, 1), 2),
data_local(clog1p, 'log1p_expm1_complex', (0,1), (2,3), rtol=1e-14),
data_local(cexpm1, 'log1p_expm1_complex', (0,1), (4,5), rtol=1e-14),
data_local(gammainc, 'gammainc', (0, 1), 2, rtol=1e-12),
data_local(gammaincc, 'gammaincc', (0, 1), 2, rtol=1e-11),
data_local(ellip_harm_2, 'ellip',(0, 1, 2, 3, 4), 6, rtol=1e-10, atol=1e-13),
data_local(ellip_harm, 'ellip',(0, 1, 2, 3, 4), 5, rtol=1e-10, atol=1e-13),
data_local(wright_bessel, 'wright_bessel', (0, 1, 2), 3, rtol=1e-11),
]
@pytest.mark.parametrize('test', LOCAL_TESTS, ids=repr)
def test_local(test):
_test_factory(test)
def _test_factory(test, dtype=np.double):
"""Boost test"""
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error is detected")
with np.errstate(all='ignore'):
test.check(dtype=dtype)
| 28,485
| 45.394137
| 135
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_cython_special.py
|
from __future__ import annotations
from typing import Callable
import pytest
from itertools import product
from numpy.testing import assert_allclose, suppress_warnings
from scipy import special
from scipy.special import cython_special
bint_points = [True, False]
int_points = [-10, -1, 1, 10]
real_points = [-10.0, -1.0, 1.0, 10.0]
complex_points = [complex(*tup) for tup in product(real_points, repeat=2)]
CYTHON_SIGNATURE_MAP = {
'b': 'bint',
'f': 'float',
'd': 'double',
'g': 'long double',
'F': 'float complex',
'D': 'double complex',
'G': 'long double complex',
'i': 'int',
'l': 'long'
}
TEST_POINTS = {
'b': bint_points,
'f': real_points,
'd': real_points,
'g': real_points,
'F': complex_points,
'D': complex_points,
'G': complex_points,
'i': int_points,
'l': int_points,
}
PARAMS: list[tuple[Callable, Callable, tuple[str, ...], str | None]] = [
(special.agm, cython_special.agm, ('dd',), None),
(special.airy, cython_special._airy_pywrap, ('d', 'D'), None),
(special.airye, cython_special._airye_pywrap, ('d', 'D'), None),
(special.bdtr, cython_special.bdtr, ('dld', 'ddd'), None),
(special.bdtrc, cython_special.bdtrc, ('dld', 'ddd'), None),
(special.bdtri, cython_special.bdtri, ('dld', 'ddd'), None),
(special.bdtrik, cython_special.bdtrik, ('ddd',), None),
(special.bdtrin, cython_special.bdtrin, ('ddd',), None),
(special.bei, cython_special.bei, ('d',), None),
(special.beip, cython_special.beip, ('d',), None),
(special.ber, cython_special.ber, ('d',), None),
(special.berp, cython_special.berp, ('d',), None),
(special.besselpoly, cython_special.besselpoly, ('ddd',), None),
(special.beta, cython_special.beta, ('dd',), None),
(special.betainc, cython_special.betainc, ('ddd',), None),
(special.betaincinv, cython_special.betaincinv, ('ddd',), None),
(special.betaln, cython_special.betaln, ('dd',), None),
(special.binom, cython_special.binom, ('dd',), None),
(special.boxcox, cython_special.boxcox, ('dd',), None),
(special.boxcox1p, cython_special.boxcox1p, ('dd',), None),
(special.btdtr, cython_special.btdtr, ('ddd',), None),
(special.btdtri, cython_special.btdtri, ('ddd',), None),
(special.btdtria, cython_special.btdtria, ('ddd',), None),
(special.btdtrib, cython_special.btdtrib, ('ddd',), None),
(special.cbrt, cython_special.cbrt, ('d',), None),
(special.chdtr, cython_special.chdtr, ('dd',), None),
(special.chdtrc, cython_special.chdtrc, ('dd',), None),
(special.chdtri, cython_special.chdtri, ('dd',), None),
(special.chdtriv, cython_special.chdtriv, ('dd',), None),
(special.chndtr, cython_special.chndtr, ('ddd',), None),
(special.chndtridf, cython_special.chndtridf, ('ddd',), None),
(special.chndtrinc, cython_special.chndtrinc, ('ddd',), None),
(special.chndtrix, cython_special.chndtrix, ('ddd',), None),
(special.cosdg, cython_special.cosdg, ('d',), None),
(special.cosm1, cython_special.cosm1, ('d',), None),
(special.cotdg, cython_special.cotdg, ('d',), None),
(special.dawsn, cython_special.dawsn, ('d', 'D'), None),
(special.ellipe, cython_special.ellipe, ('d',), None),
(special.ellipeinc, cython_special.ellipeinc, ('dd',), None),
(special.ellipj, cython_special._ellipj_pywrap, ('dd',), None),
(special.ellipkinc, cython_special.ellipkinc, ('dd',), None),
(special.ellipkm1, cython_special.ellipkm1, ('d',), None),
(special.ellipk, cython_special.ellipk, ('d',), None),
(special.elliprc, cython_special.elliprc, ('dd', 'DD'), None),
(special.elliprd, cython_special.elliprd, ('ddd', 'DDD'), None),
(special.elliprf, cython_special.elliprf, ('ddd', 'DDD'), None),
(special.elliprg, cython_special.elliprg, ('ddd', 'DDD'), None),
(special.elliprj, cython_special.elliprj, ('dddd', 'DDDD'), None),
(special.entr, cython_special.entr, ('d',), None),
(special.erf, cython_special.erf, ('d', 'D'), None),
(special.erfc, cython_special.erfc, ('d', 'D'), None),
(special.erfcx, cython_special.erfcx, ('d', 'D'), None),
(special.erfi, cython_special.erfi, ('d', 'D'), None),
(special.erfinv, cython_special.erfinv, ('d',), None),
(special.erfcinv, cython_special.erfcinv, ('d',), None),
(special.eval_chebyc, cython_special.eval_chebyc, ('dd', 'dD', 'ld'), None),
(special.eval_chebys, cython_special.eval_chebys, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyt, cython_special.eval_chebyt, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_chebyu, cython_special.eval_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_gegenbauer, cython_special.eval_gegenbauer, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_genlaguerre, cython_special.eval_genlaguerre, ('ddd', 'ddD', 'ldd'),
'd and l differ for negative int'),
(special.eval_hermite, cython_special.eval_hermite, ('ld',), None),
(special.eval_hermitenorm, cython_special.eval_hermitenorm, ('ld',), None),
(special.eval_jacobi, cython_special.eval_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_laguerre, cython_special.eval_laguerre, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_legendre, cython_special.eval_legendre, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyt, cython_special.eval_sh_chebyt, ('dd', 'dD', 'ld'), None),
(special.eval_sh_chebyu, cython_special.eval_sh_chebyu, ('dd', 'dD', 'ld'),
'd and l differ for negative int'),
(special.eval_sh_jacobi, cython_special.eval_sh_jacobi, ('dddd', 'dddD', 'lddd'),
'd and l differ for negative int'),
(special.eval_sh_legendre, cython_special.eval_sh_legendre, ('dd', 'dD', 'ld'), None),
(special.exp1, cython_special.exp1, ('d', 'D'), None),
(special.exp10, cython_special.exp10, ('d',), None),
(special.exp2, cython_special.exp2, ('d',), None),
(special.expi, cython_special.expi, ('d', 'D'), None),
(special.expit, cython_special.expit, ('f', 'd', 'g'), None),
(special.expm1, cython_special.expm1, ('d', 'D'), None),
(special.expn, cython_special.expn, ('ld', 'dd'), None),
(special.exprel, cython_special.exprel, ('d',), None),
(special.fdtr, cython_special.fdtr, ('ddd',), None),
(special.fdtrc, cython_special.fdtrc, ('ddd',), None),
(special.fdtri, cython_special.fdtri, ('ddd',), None),
(special.fdtridfd, cython_special.fdtridfd, ('ddd',), None),
(special.fresnel, cython_special._fresnel_pywrap, ('d', 'D'), None),
(special.gamma, cython_special.gamma, ('d', 'D'), None),
(special.gammainc, cython_special.gammainc, ('dd',), None),
(special.gammaincc, cython_special.gammaincc, ('dd',), None),
(special.gammainccinv, cython_special.gammainccinv, ('dd',), None),
(special.gammaincinv, cython_special.gammaincinv, ('dd',), None),
(special.gammaln, cython_special.gammaln, ('d',), None),
(special.gammasgn, cython_special.gammasgn, ('d',), None),
(special.gdtr, cython_special.gdtr, ('ddd',), None),
(special.gdtrc, cython_special.gdtrc, ('ddd',), None),
(special.gdtria, cython_special.gdtria, ('ddd',), None),
(special.gdtrib, cython_special.gdtrib, ('ddd',), None),
(special.gdtrix, cython_special.gdtrix, ('ddd',), None),
(special.hankel1, cython_special.hankel1, ('dD',), None),
(special.hankel1e, cython_special.hankel1e, ('dD',), None),
(special.hankel2, cython_special.hankel2, ('dD',), None),
(special.hankel2e, cython_special.hankel2e, ('dD',), None),
(special.huber, cython_special.huber, ('dd',), None),
(special.hyp0f1, cython_special.hyp0f1, ('dd', 'dD'), None),
(special.hyp1f1, cython_special.hyp1f1, ('ddd', 'ddD'), None),
(special.hyp2f1, cython_special.hyp2f1, ('dddd', 'dddD'), None),
(special.hyperu, cython_special.hyperu, ('ddd',), None),
(special.i0, cython_special.i0, ('d',), None),
(special.i0e, cython_special.i0e, ('d',), None),
(special.i1, cython_special.i1, ('d',), None),
(special.i1e, cython_special.i1e, ('d',), None),
(special.inv_boxcox, cython_special.inv_boxcox, ('dd',), None),
(special.inv_boxcox1p, cython_special.inv_boxcox1p, ('dd',), None),
(special.it2i0k0, cython_special._it2i0k0_pywrap, ('d',), None),
(special.it2j0y0, cython_special._it2j0y0_pywrap, ('d',), None),
(special.it2struve0, cython_special.it2struve0, ('d',), None),
(special.itairy, cython_special._itairy_pywrap, ('d',), None),
(special.iti0k0, cython_special._iti0k0_pywrap, ('d',), None),
(special.itj0y0, cython_special._itj0y0_pywrap, ('d',), None),
(special.itmodstruve0, cython_special.itmodstruve0, ('d',), None),
(special.itstruve0, cython_special.itstruve0, ('d',), None),
(special.iv, cython_special.iv, ('dd', 'dD'), None),
(special.ive, cython_special.ive, ('dd', 'dD'), None),
(special.j0, cython_special.j0, ('d',), None),
(special.j1, cython_special.j1, ('d',), None),
(special.jv, cython_special.jv, ('dd', 'dD'), None),
(special.jve, cython_special.jve, ('dd', 'dD'), None),
(special.k0, cython_special.k0, ('d',), None),
(special.k0e, cython_special.k0e, ('d',), None),
(special.k1, cython_special.k1, ('d',), None),
(special.k1e, cython_special.k1e, ('d',), None),
(special.kei, cython_special.kei, ('d',), None),
(special.keip, cython_special.keip, ('d',), None),
(special.kelvin, cython_special._kelvin_pywrap, ('d',), None),
(special.ker, cython_special.ker, ('d',), None),
(special.kerp, cython_special.kerp, ('d',), None),
(special.kl_div, cython_special.kl_div, ('dd',), None),
(special.kn, cython_special.kn, ('ld', 'dd'), None),
(special.kolmogi, cython_special.kolmogi, ('d',), None),
(special.kolmogorov, cython_special.kolmogorov, ('d',), None),
(special.kv, cython_special.kv, ('dd', 'dD'), None),
(special.kve, cython_special.kve, ('dd', 'dD'), None),
(special.log1p, cython_special.log1p, ('d', 'D'), None),
(special.log_expit, cython_special.log_expit, ('f', 'd', 'g'), None),
(special.log_ndtr, cython_special.log_ndtr, ('d', 'D'), None),
(special.ndtri_exp, cython_special.ndtri_exp, ('d',), None),
(special.loggamma, cython_special.loggamma, ('D',), None),
(special.logit, cython_special.logit, ('f', 'd', 'g'), None),
(special.lpmv, cython_special.lpmv, ('ddd',), None),
(special.mathieu_a, cython_special.mathieu_a, ('dd',), None),
(special.mathieu_b, cython_special.mathieu_b, ('dd',), None),
(special.mathieu_cem, cython_special._mathieu_cem_pywrap, ('ddd',), None),
(special.mathieu_modcem1, cython_special._mathieu_modcem1_pywrap, ('ddd',), None),
(special.mathieu_modcem2, cython_special._mathieu_modcem2_pywrap, ('ddd',), None),
(special.mathieu_modsem1, cython_special._mathieu_modsem1_pywrap, ('ddd',), None),
(special.mathieu_modsem2, cython_special._mathieu_modsem2_pywrap, ('ddd',), None),
(special.mathieu_sem, cython_special._mathieu_sem_pywrap, ('ddd',), None),
(special.modfresnelm, cython_special._modfresnelm_pywrap, ('d',), None),
(special.modfresnelp, cython_special._modfresnelp_pywrap, ('d',), None),
(special.modstruve, cython_special.modstruve, ('dd',), None),
(special.nbdtr, cython_special.nbdtr, ('lld', 'ddd'), None),
(special.nbdtrc, cython_special.nbdtrc, ('lld', 'ddd'), None),
(special.nbdtri, cython_special.nbdtri, ('lld', 'ddd'), None),
(special.nbdtrik, cython_special.nbdtrik, ('ddd',), None),
(special.nbdtrin, cython_special.nbdtrin, ('ddd',), None),
(special.ncfdtr, cython_special.ncfdtr, ('dddd',), None),
(special.ncfdtri, cython_special.ncfdtri, ('dddd',), None),
(special.ncfdtridfd, cython_special.ncfdtridfd, ('dddd',), None),
(special.ncfdtridfn, cython_special.ncfdtridfn, ('dddd',), None),
(special.ncfdtrinc, cython_special.ncfdtrinc, ('dddd',), None),
(special.nctdtr, cython_special.nctdtr, ('ddd',), None),
(special.nctdtridf, cython_special.nctdtridf, ('ddd',), None),
(special.nctdtrinc, cython_special.nctdtrinc, ('ddd',), None),
(special.nctdtrit, cython_special.nctdtrit, ('ddd',), None),
(special.ndtr, cython_special.ndtr, ('d', 'D'), None),
(special.ndtri, cython_special.ndtri, ('d',), None),
(special.nrdtrimn, cython_special.nrdtrimn, ('ddd',), None),
(special.nrdtrisd, cython_special.nrdtrisd, ('ddd',), None),
(special.obl_ang1, cython_special._obl_ang1_pywrap, ('dddd',), None),
(special.obl_ang1_cv, cython_special._obl_ang1_cv_pywrap, ('ddddd',), None),
(special.obl_cv, cython_special.obl_cv, ('ddd',), None),
(special.obl_rad1, cython_special._obl_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad1_cv, cython_special._obl_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.obl_rad2, cython_special._obl_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.obl_rad2_cv, cython_special._obl_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pbdv, cython_special._pbdv_pywrap, ('dd',), None),
(special.pbvv, cython_special._pbvv_pywrap, ('dd',), None),
(special.pbwa, cython_special._pbwa_pywrap, ('dd',), None),
(special.pdtr, cython_special.pdtr, ('dd', 'dd'), None),
(special.pdtrc, cython_special.pdtrc, ('dd', 'dd'), None),
(special.pdtri, cython_special.pdtri, ('ld', 'dd'), None),
(special.pdtrik, cython_special.pdtrik, ('dd',), None),
(special.poch, cython_special.poch, ('dd',), None),
(special.powm1, cython_special.powm1, ('dd',), None),
(special.pro_ang1, cython_special._pro_ang1_pywrap, ('dddd',), None),
(special.pro_ang1_cv, cython_special._pro_ang1_cv_pywrap, ('ddddd',), None),
(special.pro_cv, cython_special.pro_cv, ('ddd',), None),
(special.pro_rad1, cython_special._pro_rad1_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad1_cv, cython_special._pro_rad1_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pro_rad2, cython_special._pro_rad2_pywrap, ('dddd',), "see gh-6211"),
(special.pro_rad2_cv, cython_special._pro_rad2_cv_pywrap, ('ddddd',), "see gh-6211"),
(special.pseudo_huber, cython_special.pseudo_huber, ('dd',), None),
(special.psi, cython_special.psi, ('d', 'D'), None),
(special.radian, cython_special.radian, ('ddd',), None),
(special.rel_entr, cython_special.rel_entr, ('dd',), None),
(special.rgamma, cython_special.rgamma, ('d', 'D'), None),
(special.round, cython_special.round, ('d',), None),
(special.spherical_jn, cython_special.spherical_jn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_yn, cython_special.spherical_yn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_in, cython_special.spherical_in, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.spherical_kn, cython_special.spherical_kn, ('ld', 'ldb', 'lD', 'lDb'), None),
(special.shichi, cython_special._shichi_pywrap, ('d', 'D'), None),
(special.sici, cython_special._sici_pywrap, ('d', 'D'), None),
(special.sindg, cython_special.sindg, ('d',), None),
(special.smirnov, cython_special.smirnov, ('ld', 'dd'), None),
(special.smirnovi, cython_special.smirnovi, ('ld', 'dd'), None),
(special.spence, cython_special.spence, ('d', 'D'), None),
(special.sph_harm, cython_special.sph_harm, ('lldd', 'dddd'), None),
(special.stdtr, cython_special.stdtr, ('dd',), None),
(special.stdtridf, cython_special.stdtridf, ('dd',), None),
(special.stdtrit, cython_special.stdtrit, ('dd',), None),
(special.struve, cython_special.struve, ('dd',), None),
(special.tandg, cython_special.tandg, ('d',), None),
(special.tklmbda, cython_special.tklmbda, ('dd',), None),
(special.voigt_profile, cython_special.voigt_profile, ('ddd',), None),
(special.wofz, cython_special.wofz, ('D',), None),
(special.wright_bessel, cython_special.wright_bessel, ('ddd',), None),
(special.wrightomega, cython_special.wrightomega, ('D',), None),
(special.xlog1py, cython_special.xlog1py, ('dd', 'DD'), None),
(special.xlogy, cython_special.xlogy, ('dd', 'DD'), None),
(special.y0, cython_special.y0, ('d',), None),
(special.y1, cython_special.y1, ('d',), None),
(special.yn, cython_special.yn, ('ld', 'dd'), None),
(special.yv, cython_special.yv, ('dd', 'dD'), None),
(special.yve, cython_special.yve, ('dd', 'dD'), None),
(special.zetac, cython_special.zetac, ('d',), None),
(special.owens_t, cython_special.owens_t, ('dd',), None)
]
IDS = [x[0].__name__ for x in PARAMS]
def _generate_test_points(typecodes):
axes = tuple(TEST_POINTS[x] for x in typecodes)
pts = list(product(*axes))
return pts
def test_cython_api_completeness():
# Check that everything is tested
for name in dir(cython_special):
func = getattr(cython_special, name)
if callable(func) and not name.startswith('_'):
for _, cyfun, _, _ in PARAMS:
if cyfun is func:
break
else:
raise RuntimeError(f"{name} missing from tests!")
@pytest.mark.parametrize("param", PARAMS, ids=IDS)
def test_cython_api(param):
pyfunc, cyfunc, specializations, knownfailure = param
if knownfailure:
pytest.xfail(reason=knownfailure)
# Check which parameters are expected to be fused types
max_params = max(len(spec) for spec in specializations)
values = [set() for _ in range(max_params)]
for typecodes in specializations:
for j, v in enumerate(typecodes):
values[j].add(v)
seen = set()
is_fused_code = [False] * len(values)
for j, v in enumerate(values):
vv = tuple(sorted(v))
if vv in seen:
continue
is_fused_code[j] = (len(v) > 1)
seen.add(vv)
# Check results
for typecodes in specializations:
# Pick the correct specialized function
signature = [CYTHON_SIGNATURE_MAP[code]
for j, code in enumerate(typecodes)
if is_fused_code[j]]
if signature:
cy_spec_func = cyfunc[tuple(signature)]
else:
signature = None
cy_spec_func = cyfunc
# Test it
pts = _generate_test_points(typecodes)
for pt in pts:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
pyval = pyfunc(*pt)
cyval = cy_spec_func(*pt)
assert_allclose(cyval, pyval, err_msg=f"{pt} {typecodes} {signature}")
| 18,656
| 51.852691
| 90
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_digamma.py
|
import numpy as np
from numpy import pi, log, sqrt
from numpy.testing import assert_, assert_equal
from scipy.special._testutils import FuncData
import scipy.special as sc
# Euler-Mascheroni constant
euler = 0.57721566490153286
def test_consistency():
# Make sure the implementation of digamma for real arguments
# agrees with the implementation of digamma for complex arguments.
# It's all poles after -1e16
x = np.r_[-np.logspace(15, -30, 200), np.logspace(-30, 300, 200)]
dataset = np.vstack((x + 0j, sc.digamma(x))).T
FuncData(sc.digamma, dataset, 0, 1, rtol=5e-14, nan_ok=True).check()
def test_special_values():
# Test special values from Gauss's digamma theorem. See
#
# https://en.wikipedia.org/wiki/Digamma_function
dataset = [(1, -euler),
(0.5, -2*log(2) - euler),
(1/3, -pi/(2*sqrt(3)) - 3*log(3)/2 - euler),
(1/4, -pi/2 - 3*log(2) - euler),
(1/6, -pi*sqrt(3)/2 - 2*log(2) - 3*log(3)/2 - euler),
(1/8, -pi/2 - 4*log(2) - (pi + log(2 + sqrt(2)) - log(2 - sqrt(2)))/sqrt(2) - euler)]
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
def test_nonfinite():
pts = [0.0, -0.0, np.inf]
std = [-np.inf, np.inf, np.inf]
assert_equal(sc.digamma(pts), std)
assert_(all(np.isnan(sc.digamma([-np.inf, -1]))))
| 1,394
| 31.44186
| 100
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_cdflib.py
|
"""
Test cdflib functions versus mpmath, if available.
The following functions still need tests:
- ncfdtr
- ncfdtri
- ncfdtridfn
- ncfdtridfd
- ncfdtrinc
- nbdtrik
- nbdtrin
- nrdtrimn
- nrdtrisd
- pdtrik
- nctdtr
- nctdtrit
- nctdtridf
- nctdtrinc
"""
import itertools
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import pytest
import scipy.special as sp
from scipy.special._testutils import (
MissingModule, check_version, FuncData)
from scipy.special._mptestutils import (
Arg, IntArg, get_args, mpf2float, assert_mpmath_equal)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
class ProbArg:
"""Generate a set of probabilities on [0, 1]."""
def __init__(self):
# Include the endpoints for compatibility with Arg et. al.
self.a = 0
self.b = 1
def values(self, n):
"""Return an array containing approximatively n numbers."""
m = max(1, n//3)
v1 = np.logspace(-30, np.log10(0.3), m)
v2 = np.linspace(0.3, 0.7, m + 1, endpoint=False)[1:]
v3 = 1 - np.logspace(np.log10(0.3), -15, m)
v = np.r_[v1, v2, v3]
return np.unique(v)
class EndpointFilter:
def __init__(self, a, b, rtol, atol):
self.a = a
self.b = b
self.rtol = rtol
self.atol = atol
def __call__(self, x):
mask1 = np.abs(x - self.a) < self.rtol*np.abs(self.a) + self.atol
mask2 = np.abs(x - self.b) < self.rtol*np.abs(self.b) + self.atol
return np.where(mask1 | mask2, False, True)
class _CDFData:
def __init__(self, spfunc, mpfunc, index, argspec, spfunc_first=True,
dps=20, n=5000, rtol=None, atol=None,
endpt_rtol=None, endpt_atol=None):
self.spfunc = spfunc
self.mpfunc = mpfunc
self.index = index
self.argspec = argspec
self.spfunc_first = spfunc_first
self.dps = dps
self.n = n
self.rtol = rtol
self.atol = atol
if not isinstance(argspec, list):
self.endpt_rtol = None
self.endpt_atol = None
elif endpt_rtol is not None or endpt_atol is not None:
if isinstance(endpt_rtol, list):
self.endpt_rtol = endpt_rtol
else:
self.endpt_rtol = [endpt_rtol]*len(self.argspec)
if isinstance(endpt_atol, list):
self.endpt_atol = endpt_atol
else:
self.endpt_atol = [endpt_atol]*len(self.argspec)
else:
self.endpt_rtol = None
self.endpt_atol = None
def idmap(self, *args):
if self.spfunc_first:
res = self.spfunc(*args)
if np.isnan(res):
return np.nan
args = list(args)
args[self.index] = res
with mpmath.workdps(self.dps):
res = self.mpfunc(*tuple(args))
# Imaginary parts are spurious
res = mpf2float(res.real)
else:
with mpmath.workdps(self.dps):
res = self.mpfunc(*args)
res = mpf2float(res.real)
args = list(args)
args[self.index] = res
res = self.spfunc(*tuple(args))
return res
def get_param_filter(self):
if self.endpt_rtol is None and self.endpt_atol is None:
return None
filters = []
for rtol, atol, spec in zip(self.endpt_rtol, self.endpt_atol, self.argspec):
if rtol is None and atol is None:
filters.append(None)
continue
elif rtol is None:
rtol = 0.0
elif atol is None:
atol = 0.0
filters.append(EndpointFilter(spec.a, spec.b, rtol, atol))
return filters
def check(self):
# Generate values for the arguments
args = get_args(self.argspec, self.n)
param_filter = self.get_param_filter()
param_columns = tuple(range(args.shape[1]))
result_columns = args.shape[1]
args = np.hstack((args, args[:,self.index].reshape(args.shape[0], 1)))
FuncData(self.idmap, args,
param_columns=param_columns, result_columns=result_columns,
rtol=self.rtol, atol=self.atol, vectorized=False,
param_filter=param_filter).check()
def _assert_inverts(*a, **kw):
d = _CDFData(*a, **kw)
d.check()
def _binomial_cdf(k, n, p):
k, n, p = mpmath.mpf(k), mpmath.mpf(n), mpmath.mpf(p)
if k <= 0:
return mpmath.mpf(0)
elif k >= n:
return mpmath.mpf(1)
onemp = mpmath.fsub(1, p, exact=True)
return mpmath.betainc(n - k, k + 1, x2=onemp, regularized=True)
def _f_cdf(dfn, dfd, x):
if x < 0:
return mpmath.mpf(0)
dfn, dfd, x = mpmath.mpf(dfn), mpmath.mpf(dfd), mpmath.mpf(x)
ub = dfn*x/(dfn*x + dfd)
res = mpmath.betainc(dfn/2, dfd/2, x2=ub, regularized=True)
return res
def _student_t_cdf(df, t, dps=None):
if dps is None:
dps = mpmath.mp.dps
with mpmath.workdps(dps):
df, t = mpmath.mpf(df), mpmath.mpf(t)
fac = mpmath.hyp2f1(0.5, 0.5*(df + 1), 1.5, -t**2/df)
fac *= t*mpmath.gamma(0.5*(df + 1))
fac /= mpmath.sqrt(mpmath.pi*df)*mpmath.gamma(0.5*df)
return 0.5 + fac
def _noncentral_chi_pdf(t, df, nc):
res = mpmath.besseli(df/2 - 1, mpmath.sqrt(nc*t))
res *= mpmath.exp(-(t + nc)/2)*(t/nc)**(df/4 - 1/2)/2
return res
def _noncentral_chi_cdf(x, df, nc, dps=None):
if dps is None:
dps = mpmath.mp.dps
x, df, nc = mpmath.mpf(x), mpmath.mpf(df), mpmath.mpf(nc)
with mpmath.workdps(dps):
res = mpmath.quad(lambda t: _noncentral_chi_pdf(t, df, nc), [0, x])
return res
def _tukey_lmbda_quantile(p, lmbda):
# For lmbda != 0
return (p**lmbda - (1 - p)**lmbda)/lmbda
@pytest.mark.slow
@check_version(mpmath, '0.19')
class TestCDFlib:
@pytest.mark.xfail(run=False)
def test_bdtrik(self):
_assert_inverts(
sp.bdtrik,
_binomial_cdf,
0, [ProbArg(), IntArg(1, 1000), ProbArg()],
rtol=1e-4)
def test_bdtrin(self):
_assert_inverts(
sp.bdtrin,
_binomial_cdf,
1, [IntArg(1, 1000), ProbArg(), ProbArg()],
rtol=1e-4, endpt_atol=[None, None, 1e-6])
def test_btdtria(self):
_assert_inverts(
sp.btdtria,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
0, [ProbArg(), Arg(0, 1e2, inclusive_a=False),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-6)
def test_btdtrib(self):
# Use small values of a or mpmath doesn't converge
_assert_inverts(
sp.btdtrib,
lambda a, b, x: mpmath.betainc(a, b, x2=x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1, inclusive_a=False, inclusive_b=False)],
rtol=1e-7, endpt_atol=[None, 1e-18, 1e-15])
@pytest.mark.xfail(run=False)
def test_fdtridfd(self):
_assert_inverts(
sp.fdtridfd,
_f_cdf,
1, [IntArg(1, 100), ProbArg(), Arg(0, 100, inclusive_a=False)],
rtol=1e-7)
def test_gdtria(self):
_assert_inverts(
sp.gdtria,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
0, [ProbArg(), Arg(0, 1e3, inclusive_a=False),
Arg(0, 1e4, inclusive_a=False)], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_gdtrib(self):
# Use small values of a and x or mpmath doesn't converge
_assert_inverts(
sp.gdtrib,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
1, [Arg(0, 1e2, inclusive_a=False), ProbArg(),
Arg(0, 1e3, inclusive_a=False)], rtol=1e-5)
def test_gdtrix(self):
_assert_inverts(
sp.gdtrix,
lambda a, b, x: mpmath.gammainc(b, b=a*x, regularized=True),
2, [Arg(0, 1e3, inclusive_a=False), Arg(0, 1e3, inclusive_a=False),
ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-7, 1e-10])
def test_stdtr(self):
# Ideally the left endpoint for Arg() should be 0.
assert_mpmath_equal(
sp.stdtr,
_student_t_cdf,
[IntArg(1, 100), Arg(1e-10, np.inf)], rtol=1e-7)
@pytest.mark.xfail(run=False)
def test_stdtridf(self):
_assert_inverts(
sp.stdtridf,
_student_t_cdf,
0, [ProbArg(), Arg()], rtol=1e-7)
def test_stdtrit(self):
_assert_inverts(
sp.stdtrit,
_student_t_cdf,
1, [IntArg(1, 100), ProbArg()], rtol=1e-7,
endpt_atol=[None, 1e-10])
def test_chdtriv(self):
_assert_inverts(
sp.chdtriv,
lambda v, x: mpmath.gammainc(v/2, b=x/2, regularized=True),
0, [ProbArg(), IntArg(1, 100)], rtol=1e-4)
@pytest.mark.xfail(run=False)
def test_chndtridf(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtridf,
_noncentral_chi_cdf,
1, [Arg(0, 100, inclusive_a=False), ProbArg(),
Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15)
@pytest.mark.xfail(run=False)
def test_chndtrinc(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrinc,
_noncentral_chi_cdf,
2, [Arg(0, 100, inclusive_a=False), IntArg(1, 100), ProbArg()],
n=1000, rtol=1e-4, atol=1e-15)
def test_chndtrix(self):
# Use a larger atol since mpmath is doing numerical integration
_assert_inverts(
sp.chndtrix,
_noncentral_chi_cdf,
0, [ProbArg(), IntArg(1, 100), Arg(0, 100, inclusive_a=False)],
n=1000, rtol=1e-4, atol=1e-15,
endpt_atol=[1e-6, None, None])
def test_tklmbda_zero_shape(self):
# When lmbda = 0 the CDF has a simple closed form
one = mpmath.mpf(1)
assert_mpmath_equal(
lambda x: sp.tklmbda(x, 0),
lambda x: one/(mpmath.exp(-x) + one),
[Arg()], rtol=1e-7)
def test_tklmbda_neg_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(-25, 0, inclusive_b=False)],
spfunc_first=False, rtol=1e-5,
endpt_atol=[1e-9, 1e-5])
@pytest.mark.xfail(run=False)
def test_tklmbda_pos_shape(self):
_assert_inverts(
sp.tklmbda,
_tukey_lmbda_quantile,
0, [ProbArg(), Arg(0, 100, inclusive_a=False)],
spfunc_first=False, rtol=1e-5)
# The values of lmdba are chosen so that 1/lmbda is exact.
@pytest.mark.parametrize('lmbda', [0.5, 1.0, 8.0])
def test_tklmbda_lmbda1(self, lmbda):
bound = 1/lmbda
assert_equal(sp.tklmbda([-bound, bound], lmbda), [0.0, 1.0])
def test_nonfinite():
funcs = [
("btdtria", 3),
("btdtrib", 3),
("bdtrik", 3),
("bdtrin", 3),
("chdtriv", 2),
("chndtr", 3),
("chndtrix", 3),
("chndtridf", 3),
("chndtrinc", 3),
("fdtridfd", 3),
("ncfdtr", 4),
("ncfdtri", 4),
("ncfdtridfn", 4),
("ncfdtridfd", 4),
("ncfdtrinc", 4),
("gdtrix", 3),
("gdtrib", 3),
("gdtria", 3),
("nbdtrik", 3),
("nbdtrin", 3),
("nrdtrimn", 3),
("nrdtrisd", 3),
("pdtrik", 2),
("stdtr", 2),
("stdtrit", 2),
("stdtridf", 2),
("nctdtr", 3),
("nctdtrit", 3),
("nctdtridf", 3),
("nctdtrinc", 3),
("tklmbda", 2),
]
np.random.seed(1)
for func, numargs in funcs:
func = getattr(sp, func)
args_choices = [(float(x), np.nan, np.inf, -np.inf) for x in
np.random.rand(numargs)]
for args in itertools.product(*args_choices):
res = func(*args)
if any(np.isnan(x) for x in args):
# Nan inputs should result to nan output
assert_equal(res, np.nan)
else:
# All other inputs should return something (but not
# raise exceptions or cause hangs)
pass
def test_chndtrix_gh2158():
# test that gh-2158 is resolved; previously this blew up
res = sp.chndtrix(0.999999, 2, np.arange(20.)+1e-6)
# Generated in R
# options(digits=16)
# ncp <- seq(0, 19) + 1e-6
# print(qchisq(0.999999, df = 2, ncp = ncp))
res_exp = [27.63103493142305, 35.25728589950540, 39.97396073236288,
43.88033702110538, 47.35206403482798, 50.54112500166103,
53.52720257322766, 56.35830042867810, 59.06600769498512,
61.67243118946381, 64.19376191277179, 66.64228141346548,
69.02756927200180, 71.35726934749408, 73.63759723904816,
75.87368842650227, 78.06984431185720, 80.22971052389806,
82.35640899964173, 84.45263768373256]
assert_allclose(res, res_exp)
| 13,419
| 30.136891
| 84
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_dd.py
|
# Tests for a few of the "double-double" C functions defined in cephes/dd_*.
import pytest
from numpy.testing import assert_allclose
from scipy.special._test_internal import _dd_exp, _dd_log, _dd_expm1
# Each tuple in test_data contains:
# (dd_func, xhi, xlo, expected_yhi, expected_ylo)
# The expected values were computed with mpmath, e.g.
#
# import mpmath
# mpmath.mp.dps = 100
# xhi = 10.0
# xlo = 0.0
# x = mpmath.mpf(xhi) + mpmath.mpf(xlo)
# y = mpmath.log(x)
# expected_yhi = float(y)
# expected_ylo = float(y - expected_yhi)
#
test_data = [
(_dd_exp, -0.3333333333333333, -1.850371707708594e-17,
0.7165313105737893, -2.0286948382455594e-17),
(_dd_exp, 0.0, 0.0, 1.0, 0.0),
(_dd_exp, 10.0, 0.0, 22026.465794806718, -1.3780134700517372e-12),
(_dd_log, 0.03125, 0.0, -3.4657359027997265, -4.930038229799327e-18),
(_dd_log, 10.0, 0.0, 2.302585092994046, -2.1707562233822494e-16),
(_dd_expm1, -1.25, 0.0, -0.7134952031398099, -4.7031321153650186e-17),
(_dd_expm1, -0.484375, 0.0, -0.3839178722093218, 7.609376052156984e-18),
(_dd_expm1, -0.25, 0.0, -0.22119921692859512, -1.0231869534531498e-17),
(_dd_expm1, -0.0625, 0.0, -0.06058693718652421, -7.077887227488846e-19),
(_dd_expm1, 0.0, 0.0, 0.0, 0.0),
(_dd_expm1, 0.0625, 3.5e-18, 0.06449445891785943, 1.4323095758164254e-18),
(_dd_expm1, 0.25, 0.0, 0.2840254166877415, -2.133257464457841e-17),
(_dd_expm1, 0.498046875, 0.0, 0.645504254608231, -9.198435524984236e-18),
(_dd_expm1, 1.25, 0.0, 2.4903429574618414, -4.604261945372796e-17)
]
@pytest.mark.parametrize('dd_func, xhi, xlo, expected_yhi, expected_ylo',
test_data)
def test_dd(dd_func, xhi, xlo, expected_yhi, expected_ylo):
yhi, ylo = dd_func(xhi, xlo)
assert yhi == expected_yhi, (f"high double ({yhi}) does not equal the "
f"expected value {expected_yhi}")
assert_allclose(ylo, expected_ylo, rtol=5e-15)
| 1,974
| 41.021277
| 78
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_orthogonal.py
|
import numpy as np
from numpy import array, sqrt
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_almost_equal, assert_allclose)
from pytest import raises as assert_raises
from scipy import integrate
import scipy.special as sc
from scipy.special import gamma
import scipy.special._orthogonal as orth
class TestCheby:
def test_chebyc(self):
C0 = orth.chebyc(0)
C1 = orth.chebyc(1)
with np.errstate(all='ignore'):
C2 = orth.chebyc(2)
C3 = orth.chebyc(3)
C4 = orth.chebyc(4)
C5 = orth.chebyc(5)
assert_array_almost_equal(C0.c,[2],13)
assert_array_almost_equal(C1.c,[1,0],13)
assert_array_almost_equal(C2.c,[1,0,-2],13)
assert_array_almost_equal(C3.c,[1,0,-3,0],13)
assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
def test_chebys(self):
S0 = orth.chebys(0)
S1 = orth.chebys(1)
S2 = orth.chebys(2)
S3 = orth.chebys(3)
S4 = orth.chebys(4)
S5 = orth.chebys(5)
assert_array_almost_equal(S0.c,[1],13)
assert_array_almost_equal(S1.c,[1,0],13)
assert_array_almost_equal(S2.c,[1,0,-1],13)
assert_array_almost_equal(S3.c,[1,0,-2,0],13)
assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
def test_chebyt(self):
T0 = orth.chebyt(0)
T1 = orth.chebyt(1)
T2 = orth.chebyt(2)
T3 = orth.chebyt(3)
T4 = orth.chebyt(4)
T5 = orth.chebyt(5)
assert_array_almost_equal(T0.c,[1],13)
assert_array_almost_equal(T1.c,[1,0],13)
assert_array_almost_equal(T2.c,[2,0,-1],13)
assert_array_almost_equal(T3.c,[4,0,-3,0],13)
assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
def test_chebyu(self):
U0 = orth.chebyu(0)
U1 = orth.chebyu(1)
U2 = orth.chebyu(2)
U3 = orth.chebyu(3)
U4 = orth.chebyu(4)
U5 = orth.chebyu(5)
assert_array_almost_equal(U0.c,[1],13)
assert_array_almost_equal(U1.c,[2,0],13)
assert_array_almost_equal(U2.c,[4,0,-1],13)
assert_array_almost_equal(U3.c,[8,0,-4,0],13)
assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
class TestGegenbauer:
def test_gegenbauer(self):
a = 5*np.random.random() - 0.5
if np.any(a == 0):
a = -0.2
Ca0 = orth.gegenbauer(0,a)
Ca1 = orth.gegenbauer(1,a)
Ca2 = orth.gegenbauer(2,a)
Ca3 = orth.gegenbauer(3,a)
Ca4 = orth.gegenbauer(4,a)
Ca5 = orth.gegenbauer(5,a)
assert_array_almost_equal(Ca0.c,array([1]),13)
assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
assert_array_almost_equal(Ca3.c,array([4*sc.poch(a,3),0,-6*a*(a+1),
0])/3.0,11)
assert_array_almost_equal(Ca4.c,array([4*sc.poch(a,4),0,-12*sc.poch(a,3),
0,3*a*(a+1)])/6.0,11)
assert_array_almost_equal(Ca5.c,array([4*sc.poch(a,5),0,-20*sc.poch(a,4),
0,15*sc.poch(a,3),0])/15.0,11)
class TestHermite:
def test_hermite(self):
H0 = orth.hermite(0)
H1 = orth.hermite(1)
H2 = orth.hermite(2)
H3 = orth.hermite(3)
H4 = orth.hermite(4)
H5 = orth.hermite(5)
assert_array_almost_equal(H0.c,[1],13)
assert_array_almost_equal(H1.c,[2,0],13)
assert_array_almost_equal(H2.c,[4,0,-2],13)
assert_array_almost_equal(H3.c,[8,0,-12,0],13)
assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
def test_hermitenorm(self):
# He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
psub = np.poly1d([1.0/sqrt(2),0])
H0 = orth.hermitenorm(0)
H1 = orth.hermitenorm(1)
H2 = orth.hermitenorm(2)
H3 = orth.hermitenorm(3)
H4 = orth.hermitenorm(4)
H5 = orth.hermitenorm(5)
he0 = orth.hermite(0)(psub)
he1 = orth.hermite(1)(psub) / sqrt(2)
he2 = orth.hermite(2)(psub) / 2.0
he3 = orth.hermite(3)(psub) / (2*sqrt(2))
he4 = orth.hermite(4)(psub) / 4.0
he5 = orth.hermite(5)(psub) / (4.0*sqrt(2))
assert_array_almost_equal(H0.c,he0.c,13)
assert_array_almost_equal(H1.c,he1.c,13)
assert_array_almost_equal(H2.c,he2.c,13)
assert_array_almost_equal(H3.c,he3.c,13)
assert_array_almost_equal(H4.c,he4.c,13)
assert_array_almost_equal(H5.c,he5.c,13)
class TestShLegendre:
def test_sh_legendre(self):
# P*_n(x) = P_n(2x-1)
psub = np.poly1d([2,-1])
Ps0 = orth.sh_legendre(0)
Ps1 = orth.sh_legendre(1)
Ps2 = orth.sh_legendre(2)
Ps3 = orth.sh_legendre(3)
Ps4 = orth.sh_legendre(4)
Ps5 = orth.sh_legendre(5)
pse0 = orth.legendre(0)(psub)
pse1 = orth.legendre(1)(psub)
pse2 = orth.legendre(2)(psub)
pse3 = orth.legendre(3)(psub)
pse4 = orth.legendre(4)(psub)
pse5 = orth.legendre(5)(psub)
assert_array_almost_equal(Ps0.c,pse0.c,13)
assert_array_almost_equal(Ps1.c,pse1.c,13)
assert_array_almost_equal(Ps2.c,pse2.c,13)
assert_array_almost_equal(Ps3.c,pse3.c,13)
assert_array_almost_equal(Ps4.c,pse4.c,12)
assert_array_almost_equal(Ps5.c,pse5.c,12)
class TestShChebyt:
def test_sh_chebyt(self):
# T*_n(x) = T_n(2x-1)
psub = np.poly1d([2,-1])
Ts0 = orth.sh_chebyt(0)
Ts1 = orth.sh_chebyt(1)
Ts2 = orth.sh_chebyt(2)
Ts3 = orth.sh_chebyt(3)
Ts4 = orth.sh_chebyt(4)
Ts5 = orth.sh_chebyt(5)
tse0 = orth.chebyt(0)(psub)
tse1 = orth.chebyt(1)(psub)
tse2 = orth.chebyt(2)(psub)
tse3 = orth.chebyt(3)(psub)
tse4 = orth.chebyt(4)(psub)
tse5 = orth.chebyt(5)(psub)
assert_array_almost_equal(Ts0.c,tse0.c,13)
assert_array_almost_equal(Ts1.c,tse1.c,13)
assert_array_almost_equal(Ts2.c,tse2.c,13)
assert_array_almost_equal(Ts3.c,tse3.c,13)
assert_array_almost_equal(Ts4.c,tse4.c,12)
assert_array_almost_equal(Ts5.c,tse5.c,12)
class TestShChebyu:
def test_sh_chebyu(self):
# U*_n(x) = U_n(2x-1)
psub = np.poly1d([2,-1])
Us0 = orth.sh_chebyu(0)
Us1 = orth.sh_chebyu(1)
Us2 = orth.sh_chebyu(2)
Us3 = orth.sh_chebyu(3)
Us4 = orth.sh_chebyu(4)
Us5 = orth.sh_chebyu(5)
use0 = orth.chebyu(0)(psub)
use1 = orth.chebyu(1)(psub)
use2 = orth.chebyu(2)(psub)
use3 = orth.chebyu(3)(psub)
use4 = orth.chebyu(4)(psub)
use5 = orth.chebyu(5)(psub)
assert_array_almost_equal(Us0.c,use0.c,13)
assert_array_almost_equal(Us1.c,use1.c,13)
assert_array_almost_equal(Us2.c,use2.c,13)
assert_array_almost_equal(Us3.c,use3.c,13)
assert_array_almost_equal(Us4.c,use4.c,12)
assert_array_almost_equal(Us5.c,use5.c,11)
class TestShJacobi:
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
def conv(n, p):
return gamma(n + 1) * gamma(n + p) / gamma(2 * n + p)
psub = np.poly1d([2,-1])
q = 4 * np.random.random()
p = q-1 + 2*np.random.random()
# print("shifted jacobi p,q = ", p, q)
G0 = orth.sh_jacobi(0,p,q)
G1 = orth.sh_jacobi(1,p,q)
G2 = orth.sh_jacobi(2,p,q)
G3 = orth.sh_jacobi(3,p,q)
G4 = orth.sh_jacobi(4,p,q)
G5 = orth.sh_jacobi(5,p,q)
ge0 = orth.jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = orth.jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = orth.jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = orth.jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = orth.jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = orth.jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_array_almost_equal(G0.c,ge0.c,13)
assert_array_almost_equal(G1.c,ge1.c,13)
assert_array_almost_equal(G2.c,ge2.c,13)
assert_array_almost_equal(G3.c,ge3.c,13)
assert_array_almost_equal(G4.c,ge4.c,13)
assert_array_almost_equal(G5.c,ge5.c,13)
class TestCall:
def test_call(self):
poly = []
for n in range(5):
poly.extend([x.strip() for x in
("""
orth.jacobi(%(n)d,0.3,0.9)
orth.sh_jacobi(%(n)d,0.3,0.9)
orth.genlaguerre(%(n)d,0.3)
orth.laguerre(%(n)d)
orth.hermite(%(n)d)
orth.hermitenorm(%(n)d)
orth.gegenbauer(%(n)d,0.3)
orth.chebyt(%(n)d)
orth.chebyu(%(n)d)
orth.chebyc(%(n)d)
orth.chebys(%(n)d)
orth.sh_chebyt(%(n)d)
orth.sh_chebyu(%(n)d)
orth.legendre(%(n)d)
orth.sh_legendre(%(n)d)
""" % dict(n=n)).split()
])
with np.errstate(all='ignore'):
for pstr in poly:
p = eval(pstr)
assert_almost_equal(p(0.315), np.poly1d(p.coef)(0.315),
err_msg=pstr)
class TestGenlaguerre:
def test_regression(self):
assert_equal(orth.genlaguerre(1, 1, monic=False)(0), 2.)
assert_equal(orth.genlaguerre(1, 1, monic=True)(0), -2.)
assert_equal(orth.genlaguerre(1, 1, monic=False), np.poly1d([-1, 2]))
assert_equal(orth.genlaguerre(1, 1, monic=True), np.poly1d([1, -2]))
def verify_gauss_quad(root_func, eval_func, weight_func, a, b, N,
rtol=1e-15, atol=5e-14):
# this test is copied from numpy's TestGauss in test_hermite.py
x, w, mu = root_func(N, True)
n = np.arange(N)
v = eval_func(n[:,np.newaxis], x)
vv = np.dot(v*w, v.T)
vd = 1 / np.sqrt(vv.diagonal())
vv = vd[:, np.newaxis] * vv * vd
assert_allclose(vv, np.eye(N), rtol, atol)
# check that the integral of 1 is correct
assert_allclose(w.sum(), mu, rtol, atol)
# compare the results of integrating a function with quad.
def f(x):
return x ** 3 - 3 * x ** 2 + x - 2
resI = integrate.quad(lambda x: f(x)*weight_func(x), a, b)
resG = np.vdot(f(x), w)
rtol = 1e-6 if 1e-6 < resI[1] else resI[1] * 10
assert_allclose(resI[0], resG, rtol=rtol)
def test_roots_jacobi():
def rf(a, b):
return lambda n, mu: sc.roots_jacobi(n, a, b, mu)
def ef(a, b):
return lambda n, x: sc.eval_jacobi(n, a, b, x)
def wf(a, b):
return lambda x: (1 - x) ** a * (1 + x) ** b
vgq = verify_gauss_quad
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1., 5)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
25, atol=1e-12)
vgq(rf(-0.5, -0.75), ef(-0.5, -0.75), wf(-0.5, -0.75), -1., 1.,
100, atol=1e-11)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 5)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 25, atol=1.5e-13)
vgq(rf(0.5, -0.5), ef(0.5, -0.5), wf(0.5, -0.5), -1., 1., 100, atol=2e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 5, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 25, atol=2e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), -1., 1., 100, atol=1e-12)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 5)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 25, atol=1e-13)
vgq(rf(0.9, 2), ef(0.9, 2), wf(0.9, 2), -1., 1., 100, atol=3e-13)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 5)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1., 25,
atol=1.1e-14)
vgq(rf(18.24, 27.3), ef(18.24, 27.3), wf(18.24, 27.3), -1., 1.,
100, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 5, atol=1e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1., 25, atol=2e-13)
vgq(rf(47.1, -0.2), ef(47.1, -0.2), wf(47.1, -0.2), -1., 1.,
100, atol=1e-11)
vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 5, atol=2e-13)
vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 25, atol=1e-12)
vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 100, atol=1e-11)
vgq(rf(1., 658.), ef(1., 658.), wf(1., 658.), -1., 1., 250, atol=1e-11)
vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 5,
atol=1e-12)
vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 25,
atol=1e-11)
vgq(rf(511., 511.), ef(511., 511.), wf(511., 511.), -1., 1., 100,
atol=1e-10)
vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 5,
atol=1e-12)
vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 25,
atol=1e-11)
vgq(rf(511., 512.), ef(511., 512.), wf(511., 512.), -1., 1., 100,
atol=1e-10)
vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 5,
atol=1e-12)
vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 25,
atol=1e-11)
vgq(rf(1000., 500.), ef(1000., 500.), wf(1000., 500.), -1., 1., 100,
atol=1e-10)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 5)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 25,
atol=1e-13)
vgq(rf(2.25, 68.9), ef(2.25, 68.9), wf(2.25, 68.9), -1., 1., 100,
atol=1e-13)
# when alpha == beta == 0, P_n^{a,b}(x) == P_n(x)
xj, wj = sc.roots_jacobi(6, 0.0, 0.0)
xl, wl = sc.roots_legendre(6)
assert_allclose(xj, xl, 1e-14, 1e-14)
assert_allclose(wj, wl, 1e-14, 1e-14)
# when alpha == beta != 0, P_n^{a,b}(x) == C_n^{alpha+0.5}(x)
xj, wj = sc.roots_jacobi(6, 4.0, 4.0)
xc, wc = sc.roots_gegenbauer(6, 4.5)
assert_allclose(xj, xc, 1e-14, 1e-14)
assert_allclose(wj, wc, 1e-14, 1e-14)
x, w = sc.roots_jacobi(5, 2, 3, False)
y, v, m = sc.roots_jacobi(5, 2, 3, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(2,3), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, 1)
assert_raises(ValueError, sc.roots_jacobi, 3, 1, -2)
assert_raises(ValueError, sc.roots_jacobi, 3, -2, -2)
def test_roots_sh_jacobi():
def rf(a, b):
return lambda n, mu: sc.roots_sh_jacobi(n, a, b, mu)
def ef(a, b):
return lambda n, x: sc.eval_sh_jacobi(n, a, b, x)
def wf(a, b):
return lambda x: (1.0 - x) ** (a - b) * x ** (b - 1.0)
vgq = verify_gauss_quad
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1., 5)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
25, atol=1e-12)
vgq(rf(-0.5, 0.25), ef(-0.5, 0.25), wf(-0.5, 0.25), 0., 1.,
100, atol=1e-11)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 5)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 25, atol=1e-13)
vgq(rf(0.5, 0.5), ef(0.5, 0.5), wf(0.5, 0.5), 0., 1., 100, atol=1e-12)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 5)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 25, atol=1.5e-13)
vgq(rf(1, 0.5), ef(1, 0.5), wf(1, 0.5), 0., 1., 100, atol=2e-12)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 5)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 25, atol=1e-13)
vgq(rf(2, 0.9), ef(2, 0.9), wf(2, 0.9), 0., 1., 100, atol=1e-12)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 5)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1., 25)
vgq(rf(27.3, 18.24), ef(27.3, 18.24), wf(27.3, 18.24), 0., 1.,
100, atol=1e-13)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 5, atol=1e-12)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 25, atol=1e-11)
vgq(rf(47.1, 0.2), ef(47.1, 0.2), wf(47.1, 0.2), 0., 1., 100, atol=1e-10)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 5, atol=3.5e-14)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1., 25, atol=2e-13)
vgq(rf(68.9, 2.25), ef(68.9, 2.25), wf(68.9, 2.25), 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_jacobi(5, 3, 2, False)
y, v, m = sc.roots_sh_jacobi(5, 3, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(wf(3,2), 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_jacobi, 0, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3.3, 1, 1)
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 1, 2) # p - q <= -1
assert_raises(ValueError, sc.roots_sh_jacobi, 3, 2, -1) # q <= 0
assert_raises(ValueError, sc.roots_sh_jacobi, 3, -2, -1) # both
def test_roots_hermite():
rootf = sc.roots_hermite
evalf = sc.eval_hermite
weightf = orth.hermite(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
# Golub-Welsch branch
x, w = sc.roots_hermite(5, False)
y, v, m = sc.roots_hermite(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
# Asymptotic branch (switch over at n >= 150)
x, w = sc.roots_hermite(200, False)
y, v, m = sc.roots_hermite(200, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
assert_allclose(sum(v), m, 1e-14, 1e-14)
assert_raises(ValueError, sc.roots_hermite, 0)
assert_raises(ValueError, sc.roots_hermite, 3.3)
def test_roots_hermite_asy():
# Recursion for Hermite functions
def hermite_recursion(n, nodes):
H = np.zeros((n, nodes.size))
H[0,:] = np.pi**(-0.25) * np.exp(-0.5*nodes**2)
if n > 1:
H[1,:] = sqrt(2.0) * nodes * H[0,:]
for k in range(2, n):
H[k,:] = sqrt(2.0/k) * nodes * H[k-1,:] - sqrt((k-1.0)/k) * H[k-2,:]
return H
# This tests only the nodes
def test(N, rtol=1e-15, atol=1e-14):
x, w = orth._roots_hermite_asy(N)
H = hermite_recursion(N+1, x)
assert_allclose(H[-1,:], np.zeros(N), rtol, atol)
assert_allclose(sum(w), sqrt(np.pi), rtol, atol)
test(150, atol=1e-12)
test(151, atol=1e-12)
test(300, atol=1e-12)
test(301, atol=1e-12)
test(500, atol=1e-12)
test(501, atol=1e-12)
test(999, atol=1e-12)
test(1000, atol=1e-12)
test(2000, atol=1e-12)
test(5000, atol=1e-12)
def test_roots_hermitenorm():
rootf = sc.roots_hermitenorm
evalf = sc.eval_hermitenorm
weightf = orth.hermitenorm(5).weight_func
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 5)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 25, atol=1e-13)
verify_gauss_quad(rootf, evalf, weightf, -np.inf, np.inf, 100, atol=1e-12)
x, w = sc.roots_hermitenorm(5, False)
y, v, m = sc.roots_hermitenorm(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -np.inf, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_hermitenorm, 0)
assert_raises(ValueError, sc.roots_hermitenorm, 3.3)
def test_roots_gegenbauer():
def rootf(a):
return lambda n, mu: sc.roots_gegenbauer(n, a, mu)
def evalf(a):
return lambda n, x: sc.eval_gegenbauer(n, a, x)
def weightf(a):
return lambda x: (1 - x ** 2) ** (a - 0.5)
vgq = verify_gauss_quad
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 5)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 25, atol=1e-12)
vgq(rootf(-0.25), evalf(-0.25), weightf(-0.25), -1., 1., 100, atol=1e-11)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), -1., 1., 100, atol=1e-12)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 5)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), -1., 1., 100, atol=1e-12)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 5)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), -1., 1., 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 5, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 25, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), -1., 1., 100, atol=1e-11)
# Alpha=170 is where the approximation used in roots_gegenbauer changes
vgq(rootf(170), evalf(170), weightf(170), -1., 1., 5, atol=1e-13)
vgq(rootf(170), evalf(170), weightf(170), -1., 1., 25, atol=1e-12)
vgq(rootf(170), evalf(170), weightf(170), -1., 1., 100, atol=1e-11)
vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 5, atol=1.25e-13)
vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 25, atol=1e-12)
vgq(rootf(170.5), evalf(170.5), weightf(170.5), -1., 1., 100, atol=1e-11)
# Test for failures, e.g. overflows, resulting from large alphas
vgq(rootf(238), evalf(238), weightf(238), -1., 1., 5, atol=1e-13)
vgq(rootf(238), evalf(238), weightf(238), -1., 1., 25, atol=1e-12)
vgq(rootf(238), evalf(238), weightf(238), -1., 1., 100, atol=1e-11)
vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 5, atol=1e-12)
vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 25, atol=1e-11)
vgq(rootf(512.5), evalf(512.5), weightf(512.5), -1., 1., 100, atol=1e-10)
# this is a special case that the old code supported.
# when alpha = 0, the gegenbauer polynomial is uniformly 0. but it goes
# to a scaled down copy of T_n(x) there.
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 5)
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 25)
vgq(rootf(0), sc.eval_chebyt, weightf(0), -1., 1., 100, atol=1e-12)
x, w = sc.roots_gegenbauer(5, 2, False)
y, v, m = sc.roots_gegenbauer(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2), -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_gegenbauer, 0, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3.3, 2)
assert_raises(ValueError, sc.roots_gegenbauer, 3, -.75)
def test_roots_chebyt():
weightf = orth.chebyt(5).weight_func
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyt, sc.eval_chebyt, weightf, -1., 1., 100, atol=1e-12)
x, w = sc.roots_chebyt(5, False)
y, v, m = sc.roots_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyt, 0)
assert_raises(ValueError, sc.roots_chebyt, 3.3)
def test_chebyt_symmetry():
x, w = sc.roots_chebyt(21)
pos, neg = x[:10], x[11:]
assert_equal(neg, -pos[::-1])
assert_equal(x[10], 0)
def test_roots_chebyu():
weightf = orth.chebyu(5).weight_func
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 25)
verify_gauss_quad(sc.roots_chebyu, sc.eval_chebyu, weightf, -1., 1., 100)
x, w = sc.roots_chebyu(5, False)
y, v, m = sc.roots_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyu, 0)
assert_raises(ValueError, sc.roots_chebyu, 3.3)
def test_roots_chebyc():
weightf = orth.chebyc(5).weight_func
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebyc, sc.eval_chebyc, weightf, -2., 2., 100, atol=1e-12)
x, w = sc.roots_chebyc(5, False)
y, v, m = sc.roots_chebyc(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebyc, 0)
assert_raises(ValueError, sc.roots_chebyc, 3.3)
def test_roots_chebys():
weightf = orth.chebys(5).weight_func
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 5)
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 25)
verify_gauss_quad(sc.roots_chebys, sc.eval_chebys, weightf, -2., 2., 100)
x, w = sc.roots_chebys(5, False)
y, v, m = sc.roots_chebys(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -2, 2)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_chebys, 0)
assert_raises(ValueError, sc.roots_chebys, 3.3)
def test_roots_sh_chebyt():
weightf = orth.sh_chebyt(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyt, sc.eval_sh_chebyt, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyt(5, False)
y, v, m = sc.roots_sh_chebyt(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyt, 0)
assert_raises(ValueError, sc.roots_sh_chebyt, 3.3)
def test_roots_sh_chebyu():
weightf = orth.sh_chebyu(5).weight_func
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1., 25)
verify_gauss_quad(sc.roots_sh_chebyu, sc.eval_sh_chebyu, weightf, 0., 1.,
100, atol=1e-13)
x, w = sc.roots_sh_chebyu(5, False)
y, v, m = sc.roots_sh_chebyu(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_chebyu, 0)
assert_raises(ValueError, sc.roots_sh_chebyu, 3.3)
def test_roots_legendre():
weightf = orth.legendre(5).weight_func
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1., 5)
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_legendre, sc.eval_legendre, weightf, -1., 1.,
100, atol=1e-12)
x, w = sc.roots_legendre(5, False)
y, v, m = sc.roots_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, -1, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_legendre, 0)
assert_raises(ValueError, sc.roots_legendre, 3.3)
def test_roots_sh_legendre():
weightf = orth.sh_legendre(5).weight_func
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1., 5)
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
25, atol=1e-13)
verify_gauss_quad(sc.roots_sh_legendre, sc.eval_sh_legendre, weightf, 0., 1.,
100, atol=1e-12)
x, w = sc.roots_sh_legendre(5, False)
y, v, m = sc.roots_sh_legendre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, 1)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_sh_legendre, 0)
assert_raises(ValueError, sc.roots_sh_legendre, 3.3)
def test_roots_laguerre():
weightf = orth.laguerre(5).weight_func
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf, 5)
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
25, atol=1e-13)
verify_gauss_quad(sc.roots_laguerre, sc.eval_laguerre, weightf, 0., np.inf,
100, atol=1e-12)
x, w = sc.roots_laguerre(5, False)
y, v, m = sc.roots_laguerre(5, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf, 0, np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_laguerre, 0)
assert_raises(ValueError, sc.roots_laguerre, 3.3)
def test_roots_genlaguerre():
def rootf(a):
return lambda n, mu: sc.roots_genlaguerre(n, a, mu)
def evalf(a):
return lambda n, x: sc.eval_genlaguerre(n, a, x)
def weightf(a):
return lambda x: x ** a * np.exp(-x)
vgq = verify_gauss_quad
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 5)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 25, atol=1e-13)
vgq(rootf(-0.5), evalf(-0.5), weightf(-0.5), 0., np.inf, 100, atol=1e-12)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 5)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(0.1), evalf(0.1), weightf(0.1), 0., np.inf, 100, atol=1.6e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 5)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 25, atol=1e-13)
vgq(rootf(1), evalf(1), weightf(1), 0., np.inf, 100, atol=1.03e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 5)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 25, atol=1e-13)
vgq(rootf(10), evalf(10), weightf(10), 0., np.inf, 100, atol=1e-12)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 5)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 25, atol=1e-13)
vgq(rootf(50), evalf(50), weightf(50), 0., np.inf, 100, rtol=1e-14, atol=2e-13)
x, w = sc.roots_genlaguerre(5, 2, False)
y, v, m = sc.roots_genlaguerre(5, 2, True)
assert_allclose(x, y, 1e-14, 1e-14)
assert_allclose(w, v, 1e-14, 1e-14)
muI, muI_err = integrate.quad(weightf(2.), 0., np.inf)
assert_allclose(m, muI, rtol=muI_err)
assert_raises(ValueError, sc.roots_genlaguerre, 0, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3.3, 2)
assert_raises(ValueError, sc.roots_genlaguerre, 3, -1.1)
def test_gh_6721():
# Regresssion test for gh_6721. This should not raise.
sc.chebyt(65)(0.2)
| 31,471
| 38.193026
| 89
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_ndtr.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
def test_ndtr():
assert_equal(sc.ndtr(0), 0.5)
assert_allclose(sc.ndtr(1), 0.8413447460685429)
class TestNdtri:
def test_zero(self):
assert sc.ndtri(0.5) == 0.0
def test_asymptotes(self):
assert_equal(sc.ndtri([0.0, 1.0]), [-np.inf, np.inf])
def test_outside_of_domain(self):
assert all(np.isnan(sc.ndtri([-1.5, 1.5])))
class TestLogNdtr:
# The expected values in these tests were computed with mpmath:
#
# def log_ndtr_mp(x):
# return mpmath.log(mpmath.ncdf(x))
#
def test_log_ndtr_moderate_le8(self):
x = np.array([-0.75, -0.25, 0, 0.5, 1.5, 2.5, 3, 4, 5, 7, 8])
expected = np.array([-1.4844482299196562,
-0.9130617648111351,
-0.6931471805599453,
-0.3689464152886564,
-0.06914345561223398,
-0.006229025485860002,
-0.0013508099647481938,
-3.167174337748927e-05,
-2.866516129637636e-07,
-1.279812543886654e-12,
-6.220960574271786e-16])
y = sc.log_ndtr(x)
assert_allclose(y, expected, rtol=1e-14)
def test_log_ndtr_values_8_16(self):
x = np.array([8.001, 8.06, 8.15, 8.5, 10, 12, 14, 16])
expected = [-6.170639424817055e-16,
-3.814722443652823e-16,
-1.819621363526629e-16,
-9.479534822203318e-18,
-7.619853024160525e-24,
-1.776482112077679e-33,
-7.7935368191928e-45,
-6.388754400538087e-58]
y = sc.log_ndtr(x)
assert_allclose(y, expected, rtol=5e-14)
def test_log_ndtr_values_16_31(self):
x = np.array([16.15, 20.3, 21.4, 26.2, 30.9])
expected = [-5.678084565148492e-59,
-6.429244467698346e-92,
-6.680402412553295e-102,
-1.328698078458869e-151,
-5.972288641838264e-210]
y = sc.log_ndtr(x)
assert_allclose(y, expected, rtol=2e-13)
def test_log_ndtr_values_gt31(self):
x = np.array([31.6, 32.8, 34.9, 37.1])
expected = [-1.846036234858162e-219,
-2.9440539964066835e-236,
-3.71721649450857e-267,
-1.4047119663106221e-301]
y = sc.log_ndtr(x)
assert_allclose(y, expected, rtol=3e-13)
| 2,680
| 33.371795
| 69
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_nan_inputs.py
|
"""Test how the ufuncs in special handle nan inputs.
"""
from typing import Callable, Dict
import numpy as np
from numpy.testing import assert_array_equal, assert_, suppress_warnings
import pytest
import scipy.special as sc
KNOWNFAILURES: Dict[str, Callable] = {}
POSTPROCESSING: Dict[str, Callable] = {}
def _get_ufuncs():
ufuncs = []
ufunc_names = []
for name in sorted(sc.__dict__):
obj = sc.__dict__[name]
if not isinstance(obj, np.ufunc):
continue
msg = KNOWNFAILURES.get(obj)
if msg is None:
ufuncs.append(obj)
ufunc_names.append(name)
else:
fail = pytest.mark.xfail(run=False, reason=msg)
ufuncs.append(pytest.param(obj, marks=fail))
ufunc_names.append(name)
return ufuncs, ufunc_names
UFUNCS, UFUNC_NAMES = _get_ufuncs()
@pytest.mark.parametrize("func", UFUNCS, ids=UFUNC_NAMES)
def test_nan_inputs(func):
args = (np.nan,)*func.nin
with suppress_warnings() as sup:
# Ignore warnings about unsafe casts from legacy wrappers
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
try:
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
res = func(*args)
except TypeError:
# One of the arguments doesn't take real inputs
return
if func in POSTPROCESSING:
res = POSTPROCESSING[func](*res)
msg = f"got {res} instead of nan"
assert_array_equal(np.isnan(res), True, err_msg=msg)
def test_legacy_cast():
with suppress_warnings() as sup:
sup.filter(RuntimeWarning,
"floating point number truncated to an integer")
res = sc.bdtrc(np.nan, 1, 0.5)
assert_(np.isnan(res))
| 1,837
| 27.276923
| 72
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_basic.py
|
# this program corresponds to special.py
### Means test is not done yet
# E Means test is giving error (E)
# F Means test is failing (F)
# EF Means test is giving error and Failing
#! Means test is segfaulting
# 8 Means test runs forever
### test_besselpoly
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
import functools
import itertools
import operator
import platform
import sys
import numpy as np
from numpy import (array, isnan, r_, arange, finfo, pi, sin, cos, tan, exp,
log, zeros, sqrt, asarray, inf, nan_to_num, real, arctan, float_)
import pytest
from pytest import raises as assert_raises
from numpy.testing import (assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_approx_equal,
assert_, assert_allclose, assert_array_almost_equal_nulp,
suppress_warnings)
from scipy import special
import scipy.special._ufuncs as cephes
from scipy.special import ellipe, ellipk, ellipkm1
from scipy.special import elliprc, elliprd, elliprf, elliprg, elliprj
from scipy.special import mathieu_odd_coef, mathieu_even_coef
from scipy._lib.deprecation import _NoValue
from scipy.special._basic import _FACTORIALK_LIMITS_64BITS, \
_FACTORIALK_LIMITS_32BITS
from scipy.special._testutils import with_special_errors, \
assert_func_equal, FuncData
import math
class TestCephes:
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_binom(self):
n = np.array([0.264, 4, 5.2, 17])
k = np.array([2, 0.4, 7, 3.3])
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
rknown = np.array([[-0.097152, 0.9263051596159367, 0.01858423645695389,
-0.007581020651518199],[6, 2.0214389119675666, 0, 2.9827344527963846],
[10.92, 2.22993515861399, -0.00585728, 10.468891352063146],
[136, 3.5252179590758828, 19448, 1024.5526916174495]])
assert_func_equal(cephes.binom, rknown.ravel(), nk, rtol=1e-13)
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.arange(-7, 30), 1000*np.random.rand(30) - 500]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_2(self):
# Test branches in implementation
np.random.seed(1234)
n = np.r_[np.logspace(1, 300, 20)]
k = np.arange(0, 102)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
assert_func_equal(cephes.binom,
cephes.binom(nk[:,0], nk[:,1] * (1 + 1e-15)),
nk,
atol=1e-10, rtol=1e-10)
def test_binom_exact(self):
@np.vectorize
def binom_int(n, k):
n = int(n)
k = int(k)
num = int(1)
den = int(1)
for i in range(1, k+1):
num *= i + n - k
den *= i
return float(num/den)
np.random.seed(1234)
n = np.arange(1, 15)
k = np.arange(0, 15)
nk = np.array(np.broadcast_arrays(n[:,None], k[None,:])
).reshape(2, -1).T
nk = nk[nk[:,0] >= nk[:,1]]
assert_func_equal(cephes.binom,
binom_int(nk[:,0], nk[:,1]),
nk,
atol=0, rtol=0)
def test_binom_nooverflow_8346(self):
# Test (binom(n, k) doesn't overflow prematurely */
dataset = [
(1000, 500, 2.70288240945436551e+299),
(1002, 501, 1.08007396880791225e+300),
(1004, 502, 4.31599279169058121e+300),
(1006, 503, 1.72468101616263781e+301),
(1008, 504, 6.89188009236419153e+301),
(1010, 505, 2.75402257948335448e+302),
(1012, 506, 1.10052048531923757e+303),
(1014, 507, 4.39774063758732849e+303),
(1016, 508, 1.75736486108312519e+304),
(1018, 509, 7.02255427788423734e+304),
(1020, 510, 2.80626776829962255e+305),
(1022, 511, 1.12140876377061240e+306),
(1024, 512, 4.48125455209897109e+306),
(1026, 513, 1.79075474304149900e+307),
(1028, 514, 7.15605105487789676e+307)
]
dataset = np.asarray(dataset)
FuncData(cephes.binom, dataset, (0, 1), 2, rtol=1e-12).check()
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
assert_allclose(cephes.beta(-100.3, 1e-200), cephes.gamma(1e-200))
assert_allclose(cephes.beta(0.0342, 171), 24.070498359873497,
rtol=1e-13, atol=0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
assert_allclose(cephes.betainc(0.0342, 171, 1e-10), 0.55269916901806648)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
assert_allclose(cephes.betaln(-100.3, 1e-200), cephes.gammaln(1e-200))
assert_allclose(cephes.betaln(0.0342, 170), 3.1811881124242447,
rtol=1e-14, atol=0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
assert_allclose(cephes.betaincinv(0.0342, 171, 0.25),
8.4231316935498957e-21, rtol=3e-12, atol=0)
def test_beta_inf(self):
assert_(np.isinf(special.beta(-1, 2)))
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
# Each row holds (x, nu, lam, expected_value)
# These values were computed using Wolfram Alpha with
# CDF[NoncentralChiSquareDistribution[nu, lam], x]
values = np.array([
[25.00, 20.0, 400, 4.1210655112396197139e-57],
[25.00, 8.00, 250, 2.3988026526832425878e-29],
[0.001, 8.00, 40., 5.3761806201366039084e-24],
[0.010, 8.00, 40., 5.45396231055999457039e-20],
[20.00, 2.00, 107, 1.39390743555819597802e-9],
[22.50, 2.00, 107, 7.11803307138105870671e-9],
[25.00, 2.00, 107, 3.11041244829864897313e-8],
[3.000, 2.00, 1.0, 0.62064365321954362734],
[350.0, 300., 10., 0.93880128006276407710],
[100.0, 13.5, 10., 0.99999999650104210949],
[700.0, 20.0, 400, 0.99999999925680650105],
[150.0, 13.5, 10., 0.99999999999999983046],
[160.0, 13.5, 10., 0.99999999999999999518], # 1.0
])
cdf = cephes.chndtr(values[:, 0], values[:, 1], values[:, 2])
assert_allclose(cdf, values[:, 3], rtol=1e-12)
assert_almost_equal(cephes.chndtr(np.inf, np.inf, 0), 2.0)
assert_almost_equal(cephes.chndtr(2, 1, np.inf), 0.0)
assert_(np.isnan(cephes.chndtr(np.nan, 1, 2)))
assert_(np.isnan(cephes.chndtr(5, np.nan, 2)))
assert_(np.isnan(cephes.chndtr(5, 1, np.nan)))
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
assert_allclose(cephes.dawsn(1.23), 0.50053727749081767)
def test_diric(self):
# Test behavior near multiples of 2pi. Regression test for issue
# described in gh-4001.
n_odd = [1, 5, 25]
x = np.array(2*np.pi + 5e-5).astype(np.float32)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=7)
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
x = np.array(2*np.pi + 1e-15).astype(np.float64)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=15)
if hasattr(np, 'float128'):
# No float128 available in 32-bit numpy
x = np.array(2*np.pi + 1e-12).astype(np.float128)
assert_almost_equal(special.diric(x, n_odd), 1.0, decimal=19)
n_even = [2, 4, 24]
x = np.array(2*np.pi + 1e-9).astype(np.float64)
assert_almost_equal(special.diric(x, n_even), -1.0, decimal=15)
# Test at some values not near a multiple of pi
x = np.arange(0.2*np.pi, 1.0*np.pi, 0.2*np.pi)
octave_result = [0.872677996249965, 0.539344662916632,
0.127322003750035, -0.206011329583298]
assert_almost_equal(special.diric(x, 3), octave_result, decimal=15)
def test_diric_broadcasting(self):
x = np.arange(5)
n = np.array([1, 3, 7])
assert_(special.diric(x[:, np.newaxis], n).shape == (x.size, n.size))
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
assert_allclose(ellipk(0), pi/2)
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0), 0.0)
def test_erf_symmetry(self):
x = 5.905732037710919
assert_equal(cephes.erf(x) + cephes.erf(-x), 0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0), 1.0)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
assert_equal(cephes.expm1(np.inf), np.inf)
assert_equal(cephes.expm1(-np.inf), -1)
assert_equal(cephes.expm1(np.nan), np.nan)
def test_expm1_complex(self):
expm1 = cephes.expm1
assert_equal(expm1(0 + 0j), 0 + 0j)
assert_equal(expm1(complex(np.inf, 0)), complex(np.inf, 0))
assert_equal(expm1(complex(np.inf, 1)), complex(np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 2)), complex(-np.inf, np.inf))
assert_equal(expm1(complex(np.inf, 4)), complex(-np.inf, -np.inf))
assert_equal(expm1(complex(np.inf, 5)), complex(np.inf, -np.inf))
assert_equal(expm1(complex(1, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(0, np.inf)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.inf, np.inf)), complex(np.inf, np.nan))
assert_equal(expm1(complex(-np.inf, np.inf)), complex(-1, 0))
assert_equal(expm1(complex(-np.inf, np.nan)), complex(-1, 0))
assert_equal(expm1(complex(np.inf, np.nan)), complex(np.inf, np.nan))
assert_equal(expm1(complex(0, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(1, np.nan)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, 1)), complex(np.nan, np.nan))
assert_equal(expm1(complex(np.nan, np.nan)), complex(np.nan, np.nan))
@pytest.mark.xfail(reason='The real part of expm1(z) bad at these points')
def test_expm1_complex_hard(self):
# The real part of this function is difficult to evaluate when
# z.real = -log(cos(z.imag)).
y = np.array([0.1, 0.2, 0.3, 5, 11, 20])
x = -np.log(np.cos(y))
z = x + 1j*y
# evaluate using mpmath.expm1 with dps=1000
expected = np.array([-5.5507901846769623e-17+0.10033467208545054j,
2.4289354732893695e-18+0.20271003550867248j,
4.5235500262585768e-17+0.30933624960962319j,
7.8234305217489006e-17-3.3805150062465863j,
-1.3685191953697676e-16-225.95084645419513j,
8.7175620481291045e-17+2.2371609442247422j])
found = cephes.expm1(z)
# this passes.
assert_array_almost_equal_nulp(found.imag, expected.imag, 3)
# this fails.
assert_array_almost_equal_nulp(found.real, expected.real, 20)
def test_fdtr(self):
assert_equal(cephes.fdtr(1, 1, 0), 0.0)
# Computed using Wolfram Alpha: CDF[FRatioDistribution[1e-6, 5], 10]
assert_allclose(cephes.fdtr(1e-6, 5, 10), 0.9999940790193488,
rtol=1e-12)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1, 1, 0), 1.0)
# Computed using Wolfram Alpha:
# 1 - CDF[FRatioDistribution[2, 1/10], 1e10]
assert_allclose(cephes.fdtrc(2, 0.1, 1e10), 0.27223784621293512,
rtol=1e-12)
def test_fdtri(self):
assert_allclose(cephes.fdtri(1, 1, [0.499, 0.501]),
array([0.9937365, 1.00630298]), rtol=1e-6)
# From Wolfram Alpha:
# CDF[FRatioDistribution[1/10, 1], 3] = 0.8756751669632105666874...
p = 0.8756751669632105666874
assert_allclose(cephes.fdtri(0.1, 1, p), 3, rtol=1e-12)
@pytest.mark.xfail(reason='Returns nan on i686.')
def test_fdtri_mysterious_failure(self):
assert_allclose(cephes.fdtri(1, 1, 0.5), 1)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gammasgn(self):
vals = np.array([-4, -3.5, -2.3, 1, 4.2], np.float64)
assert_array_equal(cephes.gammasgn(vals), np.sign(cephes.rgamma(vals)))
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtr_inf(self):
assert_equal(cephes.gdtr(1,1,np.inf),1.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
# assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def test_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def test_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def test_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
assert_(np.isnan(cephes.kolmogi(np.nan)))
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0), 1.0)
def test_kolmogp(self):
assert_equal(cephes._kolmogp(0), -0.0)
def test_kolmogc(self):
assert_equal(cephes._kolmogc(0), 0.0)
def test_kolmogci(self):
assert_equal(cephes._kolmogci(0), 0.0)
assert_(np.isnan(cephes._kolmogci(np.nan)))
def test_kv(self):
cephes.kv(1,1)
def test_kve(self):
cephes.kve(1,1)
def test_log1p(self):
log1p = cephes.log1p
assert_equal(log1p(0), 0.0)
assert_equal(log1p(-1), -np.inf)
assert_equal(log1p(-2), np.nan)
assert_equal(log1p(np.inf), np.inf)
def test_log1p_complex(self):
log1p = cephes.log1p
c = complex
assert_equal(log1p(0 + 0j), 0 + 0j)
assert_equal(log1p(c(-1, 0)), c(-np.inf, 0))
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(log1p(c(1, np.inf)), c(np.inf, np.pi/2))
assert_equal(log1p(c(1, np.nan)), c(np.nan, np.nan))
assert_allclose(log1p(c(-np.inf, 1)), c(np.inf, np.pi))
assert_equal(log1p(c(np.inf, 1)), c(np.inf, 0))
assert_allclose(log1p(c(-np.inf, np.inf)), c(np.inf, 3*np.pi/4))
assert_allclose(log1p(c(np.inf, np.inf)), c(np.inf, np.pi/4))
assert_equal(log1p(c(np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(-np.inf, np.nan)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, np.inf)), c(np.inf, np.nan))
assert_equal(log1p(c(np.nan, 1)), c(np.nan, np.nan))
assert_equal(log1p(c(np.nan, np.nan)), c(np.nan, np.nan))
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
# Test AMS 20.2.27
@np.vectorize
def ce_smallq(m, q, z):
z *= np.pi/180
if m == 0:
return 2**(-0.5) * (1 - .5*q*cos(2*z)) # + O(q^2)
elif m == 1:
return cos(z) - q/8 * cos(3*z) # + O(q^2)
elif m == 2:
return cos(2*z) - q*(cos(4*z)/12 - 1/4) # + O(q^2)
else:
return cos(m*z) - q*(cos((m+2)*z)/(4*(m+1)) - cos((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(0, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_cem(m[:,None], q[None,:], 0.123)[0],
ce_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
# Test AMS 20.2.27
@np.vectorize
def se_smallq(m, q, z):
z *= np.pi/180
if m == 1:
return sin(z) - q/8 * sin(3*z) # + O(q^2)
elif m == 2:
return sin(2*z) - q*sin(4*z)/12 # + O(q^2)
else:
return sin(m*z) - q*(sin((m+2)*z)/(4*(m+1)) - sin((m-2)*z)/(4*(m-1))) # + O(q^2)
m = np.arange(1, 100)
q = np.r_[0, np.logspace(-30, -9, 10)]
assert_allclose(cephes.mathieu_sem(m[:,None], q[None,:], 0.123)[0],
se_smallq(m[:,None], q[None,:], 0.123),
rtol=1e-14, atol=0)
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
# Test reflection relation AMS 20.6.19
m = np.arange(0, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modcem2(m, q, -z)[0]
fr = -cephes.mathieu_modcem2(m, q, 0)[0] / cephes.mathieu_modcem1(m, q, 0)[0]
y2 = -cephes.mathieu_modcem2(m, q, z)[0] - 2*fr*cephes.mathieu_modcem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
# Test reflection relation AMS 20.6.20
m = np.arange(1, 4)[:,None,None]
q = np.r_[np.logspace(-2, 2, 10)][None,:,None]
z = np.linspace(0, 1, 7)[None,None,:]
y1 = cephes.mathieu_modsem2(m, q, -z)[0]
fr = cephes.mathieu_modsem2(m, q, 0)[1] / cephes.mathieu_modsem1(m, q, 0)[1]
y2 = cephes.mathieu_modsem2(m, q, z)[0] - 2*fr*cephes.mathieu_modsem1(m, q, z)[0]
assert_allclose(y1, y2, rtol=1e-10)
def test_mathieu_overflow(self):
# Check that these return NaNs instead of causing a SEGV
assert_equal(cephes.mathieu_cem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 0, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_cem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_sem(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem1(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modcem2(10000, 1.5, 1.3), (np.nan, np.nan))
assert_equal(cephes.mathieu_modsem2(10000, 1.5, 1.3), (np.nan, np.nan))
def test_mathieu_ticket_1847(self):
# Regression test --- this call had some out-of-bounds access
# and could return nan occasionally
for k in range(60):
v = cephes.mathieu_modsem2(2, 100, -1)
# Values from ACM TOMS 804 (derivate by numerical differentiation)
assert_allclose(v[0], 0.1431742913063671074347, rtol=1e-10)
assert_allclose(v[1], 0.9017807375832909144719, rtol=1e-4)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def test_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def test_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1, 1, 1, 0), 0.0)
f = [0.5, 1, 1.5]
p = cephes.ncfdtr(2, 3, 1.5, f)
assert_allclose(cephes.ncfdtri(2, 3, 1.5, p), f)
def test_ncfdtridfd(self):
dfd = [1, 2, 3]
p = cephes.ncfdtr(2, dfd, 0.25, 15)
assert_allclose(cephes.ncfdtridfd(2, p, 0.25, 15), dfd)
def test_ncfdtridfn(self):
dfn = [0.1, 1, 2, 3, 1e4]
p = cephes.ncfdtr(dfn, 2, 0.25, 15)
assert_allclose(cephes.ncfdtridfn(p, 2, 0.25, 15), dfn, rtol=1e-5)
def test_ncfdtrinc(self):
nc = [0.5, 1.5, 2.0]
p = cephes.ncfdtr(2, 3, nc, 15)
assert_allclose(cephes.ncfdtrinc(2, 3, p, 15), nc)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
assert_equal(cephes.nctdtr(9, 65536, 45), 0.0)
assert_approx_equal(cephes.nctdtr(np.inf, 1., 1.), 0.5, 5)
assert_(np.isnan(cephes.nctdtr(2., np.inf, 10.)))
assert_approx_equal(cephes.nctdtr(2., 1., np.inf), 1.)
assert_(np.isnan(cephes.nctdtr(np.nan, 1., 1.)))
assert_(np.isnan(cephes.nctdtr(2., np.nan, 1.)))
assert_(np.isnan(cephes.nctdtr(2., 1., np.nan)))
def test_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_allclose(cephes.nrdtrisd(0.5,0.5,0.5), 0.0,
atol=0, rtol=0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def test_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
val = cephes.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtr([0, 1, 2], 0)
assert_array_equal(val, [1, 1, 1])
def test_pdtrc(self):
val = cephes.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
# Edge case: m = 0.
val = cephes.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_pdtri(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
k = cephes.pdtrik(0.5, 1)
assert_almost_equal(cephes.gammaincc(k + 1, 1), 0.5)
# Edge case: m = 0 or very small.
k = cephes.pdtrik([[0], [0.25], [0.95]], [0, 1e-20, 1e-6])
assert_array_equal(k, np.zeros((3, 3)))
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def test_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
s, c = cephes.sici(np.inf)
assert_almost_equal(s, np.pi * 0.5)
assert_almost_equal(c, 0)
s, c = cephes.sici(-np.inf)
assert_almost_equal(s, -np.pi * 0.5)
assert_(np.isnan(c), "cosine integral(-inf) is not nan")
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
assert_(np.isnan(cephes.smirnov(1,np.nan)))
def test_smirnovp(self):
assert_equal(cephes._smirnovp(1, .1), -1)
assert_equal(cephes._smirnovp(2, 0.75), -2*(0.25)**(2-1))
assert_equal(cephes._smirnovp(3, 0.75), -3*(0.25)**(3-1))
assert_(np.isnan(cephes._smirnovp(1, np.nan)))
def test_smirnovc(self):
assert_equal(cephes._smirnovc(1,.1),0.1)
assert_(np.isnan(cephes._smirnovc(1,np.nan)))
x10 = np.linspace(0, 1, 11, endpoint=True)
assert_almost_equal(cephes._smirnovc(3, x10), 1-cephes.smirnov(3, x10))
x4 = np.linspace(0, 1, 5, endpoint=True)
assert_almost_equal(cephes._smirnovc(4, x4), 1-cephes.smirnov(4, x4))
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
assert_(np.isnan(cephes.smirnovi(1,np.nan)))
def test_smirnovci(self):
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.4)),0.4)
assert_almost_equal(cephes._smirnovc(1,cephes._smirnovci(1,0.6)),0.6)
assert_(np.isnan(cephes._smirnovci(1,np.nan)))
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
assert_almost_equal(cephes.stdtr(1,1), 0.75)
assert_almost_equal(cephes.stdtr(1,2), 0.852416382349)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def test_yve(self):
cephes.yve(1,1)
def test_wofz(self):
z = [complex(624.2,-0.26123), complex(-0.4,3.), complex(0.6,2.),
complex(-1.,1.), complex(-1.,-9.), complex(-1.,9.),
complex(-0.0000000234545,1.1234), complex(-3.,5.1),
complex(-53,30.1), complex(0.0,0.12345),
complex(11,1), complex(-22,-2), complex(9,-28),
complex(21,-33), complex(1e5,1e5), complex(1e14,1e14)
]
w = [
complex(-3.78270245518980507452677445620103199303131110e-7,
0.000903861276433172057331093754199933411710053155),
complex(0.1764906227004816847297495349730234591778719532788,
-0.02146550539468457616788719893991501311573031095617),
complex(0.2410250715772692146133539023007113781272362309451,
0.06087579663428089745895459735240964093522265589350),
complex(0.30474420525691259245713884106959496013413834051768,
-0.20821893820283162728743734725471561394145872072738),
complex(7.317131068972378096865595229600561710140617977e34,
8.321873499714402777186848353320412813066170427e34),
complex(0.0615698507236323685519612934241429530190806818395,
-0.00676005783716575013073036218018565206070072304635),
complex(0.3960793007699874918961319170187598400134746631,
-5.593152259116644920546186222529802777409274656e-9),
complex(0.08217199226739447943295069917990417630675021771804,
-0.04701291087643609891018366143118110965272615832184),
complex(0.00457246000350281640952328010227885008541748668738,
-0.00804900791411691821818731763401840373998654987934),
complex(0.8746342859608052666092782112565360755791467973338452,
0.),
complex(0.00468190164965444174367477874864366058339647648741,
0.0510735563901306197993676329845149741675029197050),
complex(-0.0023193175200187620902125853834909543869428763219,
-0.025460054739731556004902057663500272721780776336),
complex(9.11463368405637174660562096516414499772662584e304,
3.97101807145263333769664875189354358563218932e305),
complex(-4.4927207857715598976165541011143706155432296e281,
-2.8019591213423077494444700357168707775769028e281),
complex(2.820947917809305132678577516325951485807107151e-6,
2.820947917668257736791638444590253942253354058e-6),
complex(2.82094791773878143474039725787438662716372268e-15,
2.82094791773878143474039725773333923127678361e-15)
]
assert_func_equal(cephes.wofz, w, z, rtol=1e-13)
class TestAiry:
def test_airy(self):
# This tests the airy function to ensure 8 place accuracy in computation
x = special.airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = special.airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = special.airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = special.airye(0.01)
b = special.airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = special.bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([0.60195789, -0.76031014]))
assert_array_almost_equal(bi,bia,4)
bi = special.bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),10)
def test_ai_zeros(self):
ai = special.ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([0.5357]),
array([0.7012])),4)
def test_ai_zeros_big(self):
z, zp, ai_zpx, aip_zx = special.ai_zeros(50000)
ai_z, aip_z, _, _ = special.airy(z)
ai_zp, aip_zp, _, _ = special.airy(zp)
ai_envelope = 1/abs(z)**(1./4)
aip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(ai_zpx, ai_zp, rtol=1e-10)
assert_allclose(aip_zx, aip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(ai_z/ai_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(aip_zp/aip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.1
assert_allclose(z[:6],
[-2.3381074105, -4.0879494441, -5.5205598281,
-6.7867080901, -7.9441335871, -9.0226508533], rtol=1e-10)
assert_allclose(zp[:6],
[-1.0187929716, -3.2481975822, -4.8200992112,
-6.1633073556, -7.3721772550, -8.4884867340], rtol=1e-10)
def test_bi_zeros_big(self):
z, zp, bi_zpx, bip_zx = special.bi_zeros(50000)
_, _, bi_z, bip_z = special.airy(z)
_, _, bi_zp, bip_zp = special.airy(zp)
bi_envelope = 1/abs(z)**(1./4)
bip_envelope = abs(zp)**(1./4)
# Check values
assert_allclose(bi_zpx, bi_zp, rtol=1e-10)
assert_allclose(bip_zx, bip_z, rtol=1e-10)
# Check they are zeros
assert_allclose(bi_z/bi_envelope, 0, atol=1e-10, rtol=0)
assert_allclose(bip_zp/bip_envelope, 0, atol=1e-10, rtol=0)
# Check first zeros, DLMF 9.9.2
assert_allclose(z[:6],
[-1.1737132227, -3.2710933028, -4.8307378417,
-6.1698521283, -7.3767620794, -8.4919488465], rtol=1e-10)
assert_allclose(zp[:6],
[-2.2944396826, -4.0731550891, -5.5123957297,
-6.7812944460, -7.9401786892, -9.0195833588], rtol=1e-10)
class TestAssocLaguerre:
def test_assoc_laguerre(self):
a1 = special.genlaguerre(11,1)
a2 = special.assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = special.assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly:
def test_besselpoly(self):
pass
class TestKelvin:
def test_bei(self):
mbei = special.bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5) # this may not be exact
def test_beip(self):
mbeip = special.beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5) # this may not be exact
def test_ber(self):
mber = special.ber(2)
assert_almost_equal(mber,0.75173418271380821,5) # this may not be exact
def test_berp(self):
mberp = special.berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5) # this may not be exact
def test_bei_zeros(self):
# Abramowitz & Stegun, Table 9.12
bi = special.bei_zeros(5)
assert_array_almost_equal(bi,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
def test_beip_zeros(self):
bip = special.beip_zeros(5)
assert_array_almost_equal(bip,array([3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),8)
def test_ber_zeros(self):
ber = special.ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = special.berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = special.kelvin(2)
assert_array_almost_equal(mkelv,(special.ber(2) + special.bei(2)*1j,
special.ker(2) + special.kei(2)*1j,
special.berp(2) + special.beip(2)*1j,
special.kerp(2) + special.keip(2)*1j),8)
def test_kei(self):
mkei = special.kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = special.keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = special.ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = special.kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = special.kei_zeros(5)
assert_array_almost_equal(kei,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = special.keip_zeros(5)
assert_array_almost_equal(keip,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = special.kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = special.ker_zeros(5)
assert_array_almost_equal(ker,array([1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = special.kerp_zeros(5)
assert_array_almost_equal(kerp,array([2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli:
def test_bernoulli(self):
brn = special.bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta:
def test_beta(self):
bet = special.beta(2,4)
betg = (special.gamma(2)*special.gamma(4))/special.gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = special.betaln(2,4)
bet = log(abs(special.beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = special.betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = special.betaincinv(2,4,.5)
comp = special.betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCombinatorics:
def test_comb(self):
assert_array_almost_equal(special.comb([10, 10], [3, 4]), [120., 210.])
assert_almost_equal(special.comb(10, 3), 120.)
assert_equal(special.comb(10, 3, exact=True), 120)
assert_equal(special.comb(10, 3, exact=True, repetition=True), 220)
assert_allclose([special.comb(20, k, exact=True) for k in range(21)],
special.comb(20, list(range(21))), atol=1e-15)
ii = np.iinfo(int).max + 1
assert_equal(special.comb(ii, ii-1, exact=True), ii)
expected = 100891344545564193334812497256
assert special.comb(100, 50, exact=True) == expected
@pytest.mark.parametrize("repetition", [True, False])
@pytest.mark.parametrize("legacy", [True, False, _NoValue])
@pytest.mark.parametrize("k", [3.5, 3])
@pytest.mark.parametrize("N", [4.5, 4])
def test_comb_legacy(self, N, k, legacy, repetition):
# test is only relevant for exact=True
if legacy is not _NoValue:
with pytest.warns(
DeprecationWarning,
match=r"Using 'legacy' keyword is deprecated"
):
result = special.comb(N, k, exact=True, legacy=legacy,
repetition=repetition)
else:
result = special.comb(N, k, exact=True, legacy=legacy,
repetition=repetition)
if legacy:
# for exact=True and legacy=True, cast input arguments, else don't
if repetition:
# the casting in legacy mode happens AFTER transforming N & k,
# so rounding can change (e.g. both floats, but sum to int);
# hence we need to emulate the repetition-transformation here
N, k = int(N + k - 1), int(k)
repetition = False
else:
N, k = int(N), int(k)
# expected result is the same as with exact=False
with suppress_warnings() as sup:
if legacy is not _NoValue:
sup.filter(DeprecationWarning)
expected = special.comb(N, k, legacy=legacy, repetition=repetition)
assert_equal(result, expected)
def test_comb_with_np_int64(self):
n = 70
k = 30
np_n = np.int64(n)
np_k = np.int64(k)
res_np = special.comb(np_n, np_k, exact=True)
res_py = special.comb(n, k, exact=True)
assert res_np == res_py
def test_comb_zeros(self):
assert_equal(special.comb(2, 3, exact=True), 0)
assert_equal(special.comb(-1, 3, exact=True), 0)
assert_equal(special.comb(2, -1, exact=True), 0)
assert_equal(special.comb(2, -1, exact=False), 0)
assert_array_almost_equal(special.comb([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 120.])
def test_perm(self):
assert_array_almost_equal(special.perm([10, 10], [3, 4]), [720., 5040.])
assert_almost_equal(special.perm(10, 3), 720.)
assert_equal(special.perm(10, 3, exact=True), 720)
def test_perm_zeros(self):
assert_equal(special.perm(2, 3, exact=True), 0)
assert_equal(special.perm(-1, 3, exact=True), 0)
assert_equal(special.perm(2, -1, exact=True), 0)
assert_equal(special.perm(2, -1, exact=False), 0)
assert_array_almost_equal(special.perm([2, -1, 2, 10], [3, 3, -1, 3]),
[0., 0., 0., 720.])
class TestTrigonometric:
def test_cbrt(self):
cb = special.cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = special.cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = special.cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = special.cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (special.cosm1(0),special.cosm1(.3),special.cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = special.cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = special.cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.cotdg(45), 1.0, 14)
assert_almost_equal(special.cotdg(-45), -1.0, 14)
assert_almost_equal(special.cotdg(90), 0.0, 14)
assert_almost_equal(special.cotdg(-90), 0.0, 14)
assert_almost_equal(special.cotdg(135), -1.0, 14)
assert_almost_equal(special.cotdg(-135), 1.0, 14)
assert_almost_equal(special.cotdg(225), 1.0, 14)
assert_almost_equal(special.cotdg(-225), -1.0, 14)
assert_almost_equal(special.cotdg(270), 0.0, 14)
assert_almost_equal(special.cotdg(-270), 0.0, 14)
assert_almost_equal(special.cotdg(315), -1.0, 14)
assert_almost_equal(special.cotdg(-315), 1.0, 14)
assert_almost_equal(special.cotdg(765), 1.0, 14)
def test_sinc(self):
# the sinc implementation and more extensive sinc tests are in numpy
assert_array_equal(special.sinc([0]), 1)
assert_equal(special.sinc(0.0), 1.0)
def test_sindg(self):
sn = special.sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = special.sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = special.sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg:
def test_tandg(self):
tn = special.tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = special.tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = special.tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(special.tandg(0), 0.0, 14)
assert_almost_equal(special.tandg(45), 1.0, 14)
assert_almost_equal(special.tandg(-45), -1.0, 14)
assert_almost_equal(special.tandg(135), -1.0, 14)
assert_almost_equal(special.tandg(-135), 1.0, 14)
assert_almost_equal(special.tandg(180), 0.0, 14)
assert_almost_equal(special.tandg(-180), 0.0, 14)
assert_almost_equal(special.tandg(225), 1.0, 14)
assert_almost_equal(special.tandg(-225), -1.0, 14)
assert_almost_equal(special.tandg(315), -1.0, 14)
assert_almost_equal(special.tandg(-315), 1.0, 14)
class TestEllip:
def test_ellipj_nan(self):
"""Regression test for #912."""
special.ellipj(0.5, np.nan)
def test_ellipj(self):
el = special.ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = special.ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
assert_equal(special.ellipkm1(0.0), np.inf)
assert_equal(special.ellipkm1(1.0), pi/2)
assert_equal(special.ellipkm1(np.inf), 0.0)
assert_equal(special.ellipkm1(np.nan), np.nan)
assert_equal(special.ellipkm1(-1), np.nan)
assert_allclose(special.ellipk(-10), 0.7908718902387385)
def test_ellipkinc(self):
elkinc = special.ellipkinc(pi/2,.2)
elk = special.ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = special.ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
assert_equal(special.ellipkinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipkinc(pi/2, 1.0), np.inf)
assert_equal(special.ellipkinc(pi/2, -np.inf), 0.0)
assert_equal(special.ellipkinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipkinc(pi/2, 2), np.nan)
assert_equal(special.ellipkinc(0, 0.5), 0.0)
assert_equal(special.ellipkinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipkinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipkinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, -np.inf), np.nan)
assert_equal(special.ellipkinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipkinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipkinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipkinc(0.38974112035318718, 1), 0.4, rtol=1e-14)
assert_allclose(special.ellipkinc(1.5707, -10), 0.79084284661724946)
def test_ellipkinc_2(self):
# Regression test for gh-3550
# ellipkinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipkinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 1.0259330100195334), 1)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipkinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 5.1296650500976675), 2)
def test_ellipkinc_singular(self):
# ellipkinc(phi, 1) has closed form and is finite only for phi in (-pi/2, pi/2)
xlog = np.logspace(-300, -17, 25)
xlin = np.linspace(1e-17, 0.1, 25)
xlin2 = np.linspace(0.1, pi/2, 25, endpoint=False)
assert_allclose(special.ellipkinc(xlog, 1), np.arcsinh(np.tan(xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin, 1), np.arcsinh(np.tan(xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(xlin2, 1), np.arcsinh(np.tan(xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(np.pi/2, 1), np.inf)
assert_allclose(special.ellipkinc(-xlog, 1), np.arcsinh(np.tan(-xlog)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin, 1), np.arcsinh(np.tan(-xlin)), rtol=1e14)
assert_allclose(special.ellipkinc(-xlin2, 1), np.arcsinh(np.tan(-xlin2)), rtol=1e14)
assert_equal(special.ellipkinc(-np.pi/2, 1), np.inf)
def test_ellipe(self):
ele = special.ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
assert_equal(special.ellipe(0.0), pi/2)
assert_equal(special.ellipe(1.0), 1.0)
assert_equal(special.ellipe(-np.inf), np.inf)
assert_equal(special.ellipe(np.nan), np.nan)
assert_equal(special.ellipe(2), np.nan)
assert_allclose(special.ellipe(-10), 3.6391380384177689)
def test_ellipeinc(self):
eleinc = special.ellipeinc(pi/2,.2)
ele = special.ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = special.ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
assert_equal(special.ellipeinc(pi/2, 0.0), pi/2)
assert_equal(special.ellipeinc(pi/2, 1.0), 1.0)
assert_equal(special.ellipeinc(pi/2, -np.inf), np.inf)
assert_equal(special.ellipeinc(pi/2, np.nan), np.nan)
assert_equal(special.ellipeinc(pi/2, 2), np.nan)
assert_equal(special.ellipeinc(0, 0.5), 0.0)
assert_equal(special.ellipeinc(np.inf, 0.5), np.inf)
assert_equal(special.ellipeinc(-np.inf, 0.5), -np.inf)
assert_equal(special.ellipeinc(np.inf, -np.inf), np.inf)
assert_equal(special.ellipeinc(-np.inf, -np.inf), -np.inf)
assert_equal(special.ellipeinc(np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(-np.inf, np.inf), np.nan)
assert_equal(special.ellipeinc(np.nan, 0.5), np.nan)
assert_equal(special.ellipeinc(np.nan, np.nan), np.nan)
assert_allclose(special.ellipeinc(1.5707, -10), 3.6388185585822876)
def test_ellipeinc_2(self):
# Regression test for gh-3550
# ellipeinc(phi, mbad) was NaN and mvals[2:6] were twice the correct value
mbad = 0.68359375000000011
phi = 0.9272952180016123
m = np.nextafter(mbad, 0)
mvals = []
for j in range(10):
mvals.append(m)
m = np.nextafter(m, 1)
f = special.ellipeinc(phi, mvals)
assert_array_almost_equal_nulp(f, np.full_like(f, 0.84442884574781019), 2)
# this bug also appears at phi + n * pi for at least small n
f1 = special.ellipeinc(phi + pi, mvals)
assert_array_almost_equal_nulp(f1, np.full_like(f1, 3.3471442287390509), 4)
class TestEllipCarlson:
"""Test for Carlson elliptic integrals ellipr[cdfgj].
The special values used in these tests can be found in Sec. 3 of Carlson
(1994), https://arxiv.org/abs/math/9409227
"""
def test_elliprc(self):
assert_allclose(elliprc(1, 1), 1)
assert elliprc(1, inf) == 0.0
assert isnan(elliprc(1, 0))
assert elliprc(1, complex(1, inf)) == 0.0
args = array([[0.0, 0.25],
[2.25, 2.0],
[0.0, 1.0j],
[-1.0j, 1.0j],
[0.25, -2.0],
[1.0j, -1.0]])
expected_results = array([np.pi,
np.log(2.0),
1.1107207345396 * (1.0-1.0j),
1.2260849569072-0.34471136988768j,
np.log(2.0) / 3.0,
0.77778596920447+0.19832484993429j])
for i, arr in enumerate(args):
assert_allclose(elliprc(*arr), expected_results[i])
def test_elliprd(self):
assert_allclose(elliprd(1, 1, 1), 1)
assert_allclose(elliprd(0, 2, 1) / 3.0, 0.59907011736779610371)
assert elliprd(1, 1, inf) == 0.0
assert np.isinf(elliprd(1, 1, 0))
assert np.isinf(elliprd(1, 1, complex(0, 0)))
assert np.isinf(elliprd(0, 1, complex(0, 0)))
assert isnan(elliprd(1, 1, -np.finfo(np.double).tiny / 2.0))
assert isnan(elliprd(1, 1, complex(-1, 0)))
args = array([[0.0, 2.0, 1.0],
[2.0, 3.0, 4.0],
[1.0j, -1.0j, 2.0],
[0.0, 1.0j, -1.0j],
[0.0, -1.0+1.0j, 1.0j],
[-2.0-1.0j, -1.0j, -1.0+1.0j]])
expected_results = array([1.7972103521034,
0.16510527294261,
0.65933854154220,
1.2708196271910+2.7811120159521j,
-1.8577235439239-0.96193450888839j,
1.8249027393704-1.2218475784827j])
for i, arr in enumerate(args):
assert_allclose(elliprd(*arr), expected_results[i])
def test_elliprf(self):
assert_allclose(elliprf(1, 1, 1), 1)
assert_allclose(elliprf(0, 1, 2), 1.31102877714605990523)
assert elliprf(1, inf, 1) == 0.0
assert np.isinf(elliprf(0, 1, 0))
assert isnan(elliprf(1, 1, -1))
assert elliprf(complex(inf), 0, 1) == 0.0
assert isnan(elliprf(1, 1, complex(-inf, 1)))
args = array([[1.0, 2.0, 0.0],
[1.0j, -1.0j, 0.0],
[0.5, 1.0, 0.0],
[-1.0+1.0j, 1.0j, 0.0],
[2.0, 3.0, 4.0],
[1.0j, -1.0j, 2.0],
[-1.0+1.0j, 1.0j, 1.0-1.0j]])
expected_results = array([1.3110287771461,
1.8540746773014,
1.8540746773014,
0.79612586584234-1.2138566698365j,
0.58408284167715,
1.0441445654064,
0.93912050218619-0.53296252018635j])
for i, arr in enumerate(args):
assert_allclose(elliprf(*arr), expected_results[i])
def test_elliprg(self):
assert_allclose(elliprg(1, 1, 1), 1)
assert_allclose(elliprg(0, 0, 1), 0.5)
assert_allclose(elliprg(0, 0, 0), 0)
assert np.isinf(elliprg(1, inf, 1))
assert np.isinf(elliprg(complex(inf), 1, 1))
args = array([[0.0, 16.0, 16.0],
[2.0, 3.0, 4.0],
[0.0, 1.0j, -1.0j],
[-1.0+1.0j, 1.0j, 0.0],
[-1.0j, -1.0+1.0j, 1.0j],
[0.0, 0.0796, 4.0]])
expected_results = array([np.pi,
1.7255030280692,
0.42360654239699,
0.44660591677018+0.70768352357515j,
0.36023392184473+0.40348623401722j,
1.0284758090288])
for i, arr in enumerate(args):
assert_allclose(elliprg(*arr), expected_results[i])
def test_elliprj(self):
assert_allclose(elliprj(1, 1, 1, 1), 1)
assert elliprj(1, 1, inf, 1) == 0.0
assert isnan(elliprj(1, 0, 0, 0))
assert isnan(elliprj(-1, 1, 1, 1))
assert elliprj(1, 1, 1, inf) == 0.0
args = array([[0.0, 1.0, 2.0, 3.0],
[2.0, 3.0, 4.0, 5.0],
[2.0, 3.0, 4.0, -1.0+1.0j],
[1.0j, -1.0j, 0.0, 2.0],
[-1.0+1.0j, -1.0-1.0j, 1.0, 2.0],
[1.0j, -1.0j, 0.0, 1.0-1.0j],
[-1.0+1.0j, -1.0-1.0j, 1.0, -3.0+1.0j],
[2.0, 3.0, 4.0, -0.5], # Cauchy principal value
[2.0, 3.0, 4.0, -5.0]]) # Cauchy principal value
expected_results = array([0.77688623778582,
0.14297579667157,
0.13613945827771-0.38207561624427j,
1.6490011662711,
0.94148358841220,
1.8260115229009+1.2290661908643j,
-0.61127970812028-1.0684038390007j,
0.24723819703052, # Cauchy principal value
-0.12711230042964]) # Caucny principal value
for i, arr in enumerate(args):
assert_allclose(elliprj(*arr), expected_results[i])
@pytest.mark.xfail(reason="Insufficient accuracy on 32-bit")
def test_elliprj_hard(self):
assert_allclose(elliprj(6.483625725195452e-08,
1.1649136528196886e-27,
3.6767340167168e+13,
0.493704617023468),
8.63426920644241857617477551054e-6,
rtol=5e-15, atol=1e-20)
assert_allclose(elliprj(14.375105857849121,
9.993988969725365e-11,
1.72844262269944e-26,
5.898871222598245e-06),
829774.1424801627252574054378691828,
rtol=5e-15, atol=1e-20)
class TestEllipLegendreCarlsonIdentities:
"""Test identities expressing the Legendre elliptic integrals in terms
of Carlson's symmetric integrals. These identities can be found
in the DLMF https://dlmf.nist.gov/19.25#i .
"""
def setup_class(self):
self.m_n1_1 = np.arange(-1., 1., 0.01)
# For double, this is -(2**1024)
self.max_neg = finfo(float_).min
# Lots of very negative numbers
self.very_neg_m = -1. * 2.**arange(-1 +
np.log2(-self.max_neg), 0.,
-1.)
self.ms_up_to_1 = np.concatenate(([self.max_neg],
self.very_neg_m,
self.m_n1_1))
def test_k(self):
"""Test identity:
K(m) = R_F(0, 1-m, 1)
"""
m = self.ms_up_to_1
assert_allclose(ellipk(m), elliprf(0., 1.-m, 1.))
def test_km1(self):
"""Test identity:
K(m) = R_F(0, 1-m, 1)
But with the ellipkm1 function
"""
# For double, this is 2**-1022
tiny = finfo(float_).tiny
# All these small powers of 2, up to 2**-1
m1 = tiny * 2.**arange(0., -np.log2(tiny))
assert_allclose(ellipkm1(m1), elliprf(0., m1, 1.))
def test_e(self):
"""Test identity:
E(m) = 2*R_G(0, 1-k^2, 1)
"""
m = self.ms_up_to_1
assert_allclose(ellipe(m), 2.*elliprg(0., 1.-m, 1.))
class TestErf:
def test_erf(self):
er = special.erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = special.erf_zeros(5)
erzr = array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def _check_variant_func(self, func, other_func, rtol, atol=0):
np.random.seed(1234)
n = 10000
x = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
y = np.random.pareto(0.02, n) * (2*np.random.randint(0, 2, n) - 1)
z = x + 1j*y
with np.errstate(all='ignore'):
w = other_func(z)
w_real = other_func(x).real
mask = np.isfinite(w)
w = w[mask]
z = z[mask]
mask = np.isfinite(w_real)
w_real = w_real[mask]
x = x[mask]
# test both real and complex variants
assert_func_equal(func, w, z, rtol=rtol, atol=atol)
assert_func_equal(func, w_real, x, rtol=rtol, atol=atol)
def test_erfc_consistent(self):
self._check_variant_func(
cephes.erfc,
lambda z: 1 - cephes.erf(z),
rtol=1e-12,
atol=1e-14 # <- the test function loses precision
)
def test_erfcx_consistent(self):
self._check_variant_func(
cephes.erfcx,
lambda z: np.exp(z*z) * cephes.erfc(z),
rtol=1e-12
)
def test_erfi_consistent(self):
self._check_variant_func(
cephes.erfi,
lambda z: -1j * cephes.erf(1j*z),
rtol=1e-12
)
def test_dawsn_consistent(self):
self._check_variant_func(
cephes.dawsn,
lambda z: sqrt(pi)/2 * np.exp(-z*z) * cephes.erfi(z),
rtol=1e-12
)
def test_erf_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -1, 1]
assert_allclose(special.erf(vals), expected, rtol=1e-15)
def test_erfc_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, 2, 0]
assert_allclose(special.erfc(vals), expected, rtol=1e-15)
def test_erfcx_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, np.inf, 0]
assert_allclose(special.erfcx(vals), expected, rtol=1e-15)
def test_erfi_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -np.inf, np.inf]
assert_allclose(special.erfi(vals), expected, rtol=1e-15)
def test_dawsn_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan, -0.0, 0.0]
assert_allclose(special.dawsn(vals), expected, rtol=1e-15)
def test_wofz_nan_inf(self):
vals = [np.nan, -np.inf, np.inf]
expected = [np.nan + np.nan * 1.j, 0.-0.j, 0.+0.j]
assert_allclose(special.wofz(vals), expected, rtol=1e-15)
class TestEuler:
def test_euler(self):
eu0 = special.euler(0)
eu1 = special.euler(1)
eu2 = special.euler(2) # just checking segfaults
assert_allclose(eu0, [1], rtol=1e-15)
assert_allclose(eu1, [1, 0], rtol=1e-15)
assert_allclose(eu2, [1, 0, -1], rtol=1e-15)
eu24 = special.euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145,2404879675441,
370371188237525,69348874393137901,
15514534163557086905]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
with np.errstate(all='ignore'):
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
assert_almost_equal(errmax, 0.0, 14)
class TestExp:
def test_exp2(self):
ex = special.exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = special.exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = special.exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = special.exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (special.expm1(2),special.expm1(3),special.expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (special.expm1(2),special.expm1(2.1),special.expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFactorialFunctions:
@pytest.mark.parametrize("exact", [True, False])
def test_factorialx_scalar_return_type(self, exact):
assert np.isscalar(special.factorial(1, exact=exact))
assert np.isscalar(special.factorial2(1, exact=exact))
assert np.isscalar(special.factorialk(1, 3, exact=True))
@pytest.mark.parametrize("n", [-1, -2, -3])
@pytest.mark.parametrize("exact", [True, False])
def test_factorialx_negative(self, exact, n):
assert_equal(special.factorial(n, exact=exact), 0)
assert_equal(special.factorial2(n, exact=exact), 0)
assert_equal(special.factorialk(n, 3, exact=True), 0)
@pytest.mark.parametrize("exact", [True, False])
def test_factorialx_negative_array(self, exact):
assert_func = assert_array_equal if exact else assert_allclose
# Consistent output for n < 0
assert_func(special.factorial([-5, -4, 0, 1], exact=exact),
[0, 0, 1, 1])
assert_func(special.factorial2([-5, -4, 0, 1], exact=exact),
[0, 0, 1, 1])
assert_func(special.factorialk([-5, -4, 0, 1], 3, exact=True),
[0, 0, 1, 1])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("content", [np.nan, None, np.datetime64('nat')],
ids=["NaN", "None", "NaT"])
def test_factorialx_nan(self, content, exact):
# scalar
assert special.factorial(content, exact=exact) is np.nan
assert special.factorial2(content, exact=exact) is np.nan
assert special.factorialk(content, 3, exact=True) is np.nan
# array-like (initializes np.array with default dtype)
if content is not np.nan:
# None causes object dtype, which is not supported; as is datetime
with pytest.raises(ValueError, match="Unsupported datatype.*"):
special.factorial([content], exact=exact)
elif exact:
# cannot use `is np.nan` see https://stackoverflow.com/a/52124109
with pytest.warns(DeprecationWarning, match="Non-integer array.*"):
assert np.isnan(special.factorial([content], exact=exact)[0])
else:
assert np.isnan(special.factorial([content], exact=exact)[0])
# factorial{2,k} don't support array case due to dtype constraints
with pytest.raises(ValueError, match="factorial2 does not support.*"):
special.factorial2([content], exact=exact)
with pytest.raises(ValueError, match="factorialk does not support.*"):
special.factorialk([content], 3, exact=True)
# array-case also tested in test_factorial{,2,k}_corner_cases
@pytest.mark.parametrize("levels", range(1, 5))
@pytest.mark.parametrize("exact", [True, False])
def test_factorialx_array_shape(self, levels, exact):
def _nest_me(x, k=1):
"""
Double x and nest it k times
For example:
>>> _nest_me([3, 4], 2)
[[[3, 4], [3, 4]], [[3, 4], [3, 4]]]
"""
if k == 0:
return x
else:
return _nest_me([x, x], k-1)
def _check(res, nucleus):
exp = np.array(_nest_me(nucleus, k=levels), dtype=object)
# test that ndarray shape is maintained
# need to cast to float due to numpy/numpy#21220
assert_allclose(res.astype(np.float64), exp.astype(np.float64))
n = np.array(_nest_me([5, 25], k=levels))
exp_nucleus = {1: [120, math.factorial(25)],
# correctness of factorial2() is tested elsewhere
2: [15, special.factorial2(25, exact=True)],
3: [10, special.factorialk(25, 3)]}
_check(special.factorial(n, exact=exact), exp_nucleus[1])
_check(special.factorial2(n, exact=exact), exp_nucleus[2])
_check(special.factorialk(n, 3, exact=True), exp_nucleus[3])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
def test_factorialx_array_dimension(self, dim, exact):
n = np.array(5, ndmin=dim)
exp = {1: 120, 2: 15, 3: 10}
assert_allclose(special.factorial(n, exact=exact),
np.array(exp[1], ndmin=dim))
assert_allclose(special.factorial2(n, exact=exact),
np.array(exp[2], ndmin=dim))
assert_allclose(special.factorialk(n, 3, exact=True),
np.array(exp[3], ndmin=dim))
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("level", range(1, 5))
def test_factorialx_array_like(self, level, exact):
def _nest_me(x, k=1):
if k == 0:
return x
else:
return _nest_me([x], k-1)
n = _nest_me([5], k=level-1) # nested list
exp_nucleus = {1: 120, 2: 15, 3: 10}
assert_func = assert_array_equal if exact else assert_allclose
assert_func(special.factorial(n, exact=exact),
np.array(exp_nucleus[1], ndmin=level))
assert_func(special.factorial2(n, exact=exact),
np.array(exp_nucleus[2], ndmin=level))
assert_func(special.factorialk(n, 3, exact=True),
np.array(exp_nucleus[3], ndmin=level))
# note that n=170 is the last integer such that factorial(n) fits float64
@pytest.mark.parametrize('n', range(30, 180, 10))
def test_factorial_accuracy(self, n):
# Compare exact=True vs False, i.e. that the accuracy of the
# approximation is better than the specified tolerance.
rtol = 6e-14 if sys.platform == 'win32' else 1e-15
# need to cast exact result to float due to numpy/numpy#21220
assert_allclose(float(special.factorial(n, exact=True)),
special.factorial(n, exact=False), rtol=rtol)
assert_allclose(special.factorial([n], exact=True).astype(float),
special.factorial([n], exact=False), rtol=rtol)
@pytest.mark.parametrize('n',
list(range(0, 22)) + list(range(30, 180, 10)))
def test_factorial_int_reference(self, n):
# Compare all with math.factorial
correct = math.factorial(n)
assert_array_equal(correct, special.factorial(n, True))
assert_array_equal(correct, special.factorial([n], True)[0])
rtol = 6e-14 if sys.platform == 'win32' else 1e-15
assert_allclose(float(correct), special.factorial(n, False),
rtol=rtol)
assert_allclose(float(correct), special.factorial([n], False)[0],
rtol=rtol)
@pytest.mark.parametrize("exact", [True, False])
def test_factorial_float_reference(self, exact):
def _check(n, expected):
# support for exact=True with scalar floats grandfathered in
assert_allclose(special.factorial(n, exact=exact), expected)
# non-integer types in arrays only allowed with exact=False
assert_allclose(special.factorial([n])[0], expected)
# Reference values from mpmath for gamma(n+1)
_check(0.01, 0.994325851191506032181932988)
_check(1.11, 1.051609009483625091514147465)
_check(5.55, 314.9503192327208241614959052)
_check(11.1, 50983227.84411615655137170553)
_check(33.3, 2.493363339642036352229215273e+37)
_check(55.5, 9.479934358436729043289162027e+73)
_check(77.7, 3.060540559059579022358692625e+114)
_check(99.9, 5.885840419492871504575693337e+157)
# close to maximum for float64
_check(170.6243, 1.79698185749571048960082e+308)
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@pytest.mark.parametrize("content",
[[], [1], [1.1], [np.nan], [np.nan, 1]],
ids=["[]", "[1]", "[1.1]", "[NaN]", "[NaN, 1]"])
def test_factorial_array_corner_cases(self, content, dim, exact, dtype):
if dtype == np.int64 and any(np.isnan(x) for x in content):
pytest.skip("impossible combination")
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
result = None
if not content:
result = special.factorial(n, exact=exact)
elif not (np.issubdtype(n.dtype, np.integer)
or np.issubdtype(n.dtype, np.floating)):
with pytest.raises(ValueError, match="Unsupported datatype*"):
special.factorial(n, exact=exact)
elif (exact and not np.issubdtype(n.dtype, np.integer) and n.size and
np.allclose(n[~np.isnan(n)], n[~np.isnan(n)].astype(np.int64))):
# using integers but in array with wrong dtype (e.g. due to NaNs)
with pytest.warns(DeprecationWarning, match="Non-integer array.*"):
result = special.factorial(n, exact=exact)
# expected dtype is integer, unless there are NaNs
dtype = np.float_ if np.any(np.isnan(n)) else np.int_
elif exact and not np.issubdtype(n.dtype, np.integer):
with pytest.raises(ValueError, match="factorial with exact=.*"):
special.factorial(n, exact=exact)
else:
# no error
result = special.factorial(n, exact=exact)
# assert_equal does not distinguish scalars and 0-dim arrays of the same value, see
# https://github.com/numpy/numpy/issues/24050
def assert_really_equal(x, y):
assert type(x) == type(y), f"types not equal: {type(x)}, {type(y)}"
assert_equal(x, y)
if result is not None:
# expected result is empty if and only if n is empty,
# and has the same dtype & dimension as n
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
# keep 0-dim.; otherwise n.ravel().ndim==1, even if n.ndim==0
n_flat = n.ravel() if n.ndim else n
r = special.factorial(n_flat, exact=exact) if n.size else []
expected = np.array(r, ndmin=dim, dtype=dtype)
assert_really_equal(result, expected)
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None],
ids=["1", "1.1", "2+2j", "NaN", "None"])
def test_factorial_scalar_corner_cases(self, n, exact):
if (n is None or n is np.nan or np.issubdtype(type(n), np.integer)
or np.issubdtype(type(n), np.floating)):
# no error
result = special.factorial(n, exact=exact)
exp = np.nan if n is np.nan or n is None else special.factorial(n)
assert_equal(result, exp)
else:
with pytest.raises(ValueError, match="Unsupported datatype*"):
special.factorial(n, exact=exact)
# use odd increment to make sure both odd & even numbers are tested!
@pytest.mark.parametrize('n', range(30, 180, 11))
def test_factorial2_accuracy(self, n):
# Compare exact=True vs False, i.e. that the accuracy of the
# approximation is better than the specified tolerance.
rtol = 2e-14 if sys.platform == 'win32' else 1e-15
# need to cast exact result to float due to numpy/numpy#21220
assert_allclose(float(special.factorial2(n, exact=True)),
special.factorial2(n, exact=False), rtol=rtol)
assert_allclose(special.factorial2([n], exact=True).astype(float),
special.factorial2([n], exact=False), rtol=rtol)
@pytest.mark.parametrize('n',
list(range(0, 22)) + list(range(30, 180, 11)))
def test_factorial2_int_reference(self, n):
# Compare all with correct value
# Cannot use np.product due to overflow
correct = functools.reduce(operator.mul, list(range(n, 0, -2)), 1)
assert_array_equal(correct, special.factorial2(n, True))
assert_array_equal(correct, special.factorial2([n], True)[0])
assert_allclose(float(correct), special.factorial2(n, False))
assert_allclose(float(correct), special.factorial2([n], False)[0])
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]],
ids=["[]", "[1]", "[NaN]", "[NaN, 1]"])
def test_factorial2_array_corner_cases(self, content, dim, exact, dtype):
if dtype == np.int64 and any(np.isnan(x) for x in content):
pytest.skip("impossible combination")
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
if np.issubdtype(n.dtype, np.integer) or (not content):
# no error
result = special.factorial2(n, exact=exact)
# expected result is identical to n for exact=True resp. empty
# arrays (assert_allclose chokes on object), otherwise up to tol
func = assert_equal if exact or (not content) else assert_allclose
func(result, n)
else:
with pytest.raises(ValueError, match="factorial2 does not*"):
special.factorial2(n, 3)
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None],
ids=["1", "1.1", "2+2j", "NaN", "None"])
def test_factorial2_scalar_corner_cases(self, n, exact):
if n is None or n is np.nan or np.issubdtype(type(n), np.integer):
# no error
result = special.factorial2(n, exact=exact)
exp = np.nan if n is np.nan or n is None else special.factorial(n)
assert_equal(result, exp)
else:
with pytest.raises(ValueError, match="factorial2 does not*"):
special.factorial2(n, exact=exact)
@pytest.mark.parametrize('k', list(range(1, 5)) + [10, 20])
@pytest.mark.parametrize('n',
list(range(0, 22)) + list(range(22, 100, 11)))
def test_factorialk_int_reference(self, n, k):
# Compare all with correct value
# Would be nice to use np.product here, but that's
# broken on windows, see numpy/numpy#21219
correct = functools.reduce(operator.mul, list(range(n, 0, -k)), 1)
assert_array_equal(correct, special.factorialk(n, k, True))
assert_array_equal(correct, special.factorialk([n], k, True)[0])
# exact=False not yet supported
# assert_allclose(float(correct), special.factorialk(n, k, False))
# assert_allclose(float(correct), special.factorialk([n], k, False)[0])
@pytest.mark.parametrize("dtype", [np.int64, np.float64,
np.complex128, object])
@pytest.mark.parametrize("dim", range(0, 5))
# test empty & non-empty arrays, with nans and mixed
@pytest.mark.parametrize("content", [[], [1], [np.nan], [np.nan, 1]],
ids=["[]", "[1]", "[NaN]", "[NaN, 1]"])
def test_factorialk_array_corner_cases(self, content, dim, dtype):
if dtype == np.int64 and any(np.isnan(x) for x in content):
pytest.skip("impossible combination")
# np.array(x, ndim=0) will not be 0-dim. unless x is too
content = content if (dim > 0 or len(content) != 1) else content[0]
n = np.array(content, ndmin=dim, dtype=dtype)
if np.issubdtype(n.dtype, np.integer) or (not content):
# no error; expected result is identical to n
assert_equal(special.factorialk(n, 3), n)
else:
with pytest.raises(ValueError, match="factorialk does not*"):
special.factorialk(n, 3)
@pytest.mark.parametrize("exact", [True, False])
@pytest.mark.parametrize("k", range(1, 5))
@pytest.mark.parametrize("n", [1, 1.1, 2 + 2j, np.nan, None],
ids=["1", "1.1", "2+2j", "NaN", "None"])
def test_factorialk_scalar_corner_cases(self, n, k, exact):
if not exact:
with pytest.raises(NotImplementedError):
special.factorialk(n, k=k, exact=exact)
elif n is None or n is np.nan or np.issubdtype(type(n), np.integer):
# no error
result = special.factorial2(n, exact=exact)
nan_cond = n is np.nan or n is None
expected = np.nan if nan_cond else special.factorialk(n, k=k)
assert_equal(result, expected)
else:
with pytest.raises(ValueError, match="factorialk does not*"):
special.factorialk(n, k=k, exact=exact)
@pytest.mark.parametrize("k", [0, 1.1, np.nan, "1"])
def test_factorialk_raises_k(self, k):
with pytest.raises(ValueError, match="k must be a positive integer*"):
special.factorialk(1, k)
@pytest.mark.parametrize("k", range(1, 12))
def test_factorialk_dtype(self, k):
if k in _FACTORIALK_LIMITS_64BITS.keys():
n = np.array([_FACTORIALK_LIMITS_32BITS[k]])
assert_equal(special.factorialk(n, k).dtype, np.int_)
assert_equal(special.factorialk(n + 1, k).dtype, np.int64)
# assert maximality of limits for given dtype
assert special.factorialk(n + 1, k) > np.iinfo(np.int32).max
n = np.array([_FACTORIALK_LIMITS_64BITS[k]])
assert_equal(special.factorialk(n, k).dtype, np.int64)
assert_equal(special.factorialk(n + 1, k).dtype, object)
assert special.factorialk(n + 1, k) > np.iinfo(np.int64).max
else:
# for k >= 10, we always return object
assert_equal(special.factorialk(np.array([1]), k).dtype, object)
def test_factorial_mixed_nan_inputs(self):
x = np.array([np.nan, 1, 2, 3, np.nan])
expected = np.array([np.nan, 1, 2, 6, np.nan])
assert_equal(special.factorial(x, exact=False), expected)
with pytest.warns(DeprecationWarning, match=r"Non-integer array.*"):
assert_equal(special.factorial(x, exact=True), expected)
class TestFresnel:
@pytest.mark.parametrize("z, s, c", [
# some positive value
(.5, 0.064732432859999287, 0.49234422587144644),
(.5 + .0j, 0.064732432859999287, 0.49234422587144644),
# negative half annulus
# https://github.com/scipy/scipy/issues/12309
# Reference values can be reproduced with
# https://www.wolframalpha.com/input/?i=FresnelS%5B-2.0+%2B+0.1i%5D
# https://www.wolframalpha.com/input/?i=FresnelC%5B-2.0+%2B+0.1i%5D
(
-2.0 + 0.1j,
-0.3109538687728942-0.0005870728836383176j,
-0.4879956866358554+0.10670801832903172j
),
(
-0.1 - 1.5j,
-0.03918309471866977+0.7197508454568574j,
0.09605692502968956-0.43625191013617465j
),
# a different algorithm kicks in for "large" values, i.e., |z| >= 4.5,
# make sure to test both float and complex values; a different
# algorithm is used
(6.0, 0.44696076, 0.49953147),
(6.0 + 0.0j, 0.44696076, 0.49953147),
(6.0j, -0.44696076j, 0.49953147j),
(-6.0 + 0.0j, -0.44696076, -0.49953147),
(-6.0j, 0.44696076j, -0.49953147j),
# inf
(np.inf, 0.5, 0.5),
(-np.inf, -0.5, -0.5),
])
def test_fresnel_values(self, z, s, c):
frs = array(special.fresnel(z))
assert_array_almost_equal(frs, array([s, c]), 8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = special.fresnel_zeros(5)
assert_array_almost_equal(szo,
array([2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = special.fresnel(szo)[0]
vals2 = special.fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = special.fresnel_zeros(6)
frc = special.fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = special.fresnel_zeros(5)
frs = special.fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma:
def test_gamma(self):
gam = special.gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = special.gammaln(3)
lngam = log(special.gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainccinv(self):
gccinv = special.gammainccinv(.5,.5)
gcinv = special.gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
@with_special_errors
def test_gammaincinv(self):
y = special.gammaincinv(.4,.4)
x = special.gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = special.gammainc(10, 0.05)
x = special.gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = special.gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
@with_special_errors
def test_975(self):
# Regression test for ticket #975 -- switch point in algorithm
# check that things work OK at the point, immediately next floats
# around it, and a bit further away
pts = [0.25,
np.nextafter(0.25, 0), 0.25 - 1e-12,
np.nextafter(0.25, 1), 0.25 + 1e-12]
for xp in pts:
y = special.gammaincinv(.4, xp)
x = special.gammainc(0.4, y)
assert_allclose(x, xp, rtol=1e-12)
def test_rgamma(self):
rgam = special.rgamma(8)
rlgam = 1/special.gamma(8)
assert_almost_equal(rgam,rlgam,8)
def test_infinity(self):
assert_(np.isinf(special.gamma(-1)))
assert_equal(special.rgamma(-1), 0)
class TestHankel:
def test_negv1(self):
assert_almost_equal(special.hankel1(-3,2), -special.hankel1(3,2), 14)
def test_hankel1(self):
hank1 = special.hankel1(1,.1)
hankrl = (special.jv(1,.1) + special.yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv1e(self):
assert_almost_equal(special.hankel1e(-3,2), -special.hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = special.hankel1e(1,.1)
hankrle = special.hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv2(self):
assert_almost_equal(special.hankel2(-3,2), -special.hankel2(3,2), 14)
def test_hankel2(self):
hank2 = special.hankel2(1,.1)
hankrl2 = (special.jv(1,.1) - special.yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_neg2e(self):
assert_almost_equal(special.hankel2e(-3,2), -special.hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = special.hankel2e(1,.1)
hankrl2e = special.hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHyper:
def test_h1vp(self):
h1 = special.h1vp(1,.1)
h1real = (special.jvp(1,.1) + special.yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = special.h2vp(1,.1)
h2real = (special.jvp(1,.1) - special.yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
# scalar input
assert_allclose(special.hyp0f1(2.5, 0.5), 1.21482702689997, rtol=1e-12)
assert_allclose(special.hyp0f1(2.5, 0), 1.0, rtol=1e-15)
# float input, expected values match mpmath
x = special.hyp0f1(3.0, [-1.5, -1, 0, 1, 1.5])
expected = np.array([0.58493659229143, 0.70566805723127, 1.0,
1.37789689539747, 1.60373685288480])
assert_allclose(x, expected, rtol=1e-12)
# complex input
x = special.hyp0f1(3.0, np.array([-1.5, -1, 0, 1, 1.5]) + 0.j)
assert_allclose(x, expected.astype(complex), rtol=1e-12)
# test broadcasting
x1 = [0.5, 1.5, 2.5]
x2 = [0, 1, 0.5]
x = special.hyp0f1(x1, x2)
expected = [1.0, 1.8134302039235093, 1.21482702689997]
assert_allclose(x, expected, rtol=1e-12)
x = special.hyp0f1(np.row_stack([x1] * 2), x2)
assert_allclose(x, np.row_stack([expected] * 2), rtol=1e-12)
assert_raises(ValueError, special.hyp0f1,
np.row_stack([x1] * 3), [0, 1])
def test_hyp0f1_gh5764(self):
# Just checks the point that failed; there's a more systematic
# test in test_mpmath
res = special.hyp0f1(0.8, 0.5 + 0.5*1J)
# The expected value was generated using mpmath
assert_almost_equal(res, 1.6139719776441115 + 1J*0.80893054061790665)
def test_hyp1f1(self):
hyp1 = special.hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# https://github.com/scipy/scipy/issues/1186 (Trac #659)
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[-8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[-1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[-2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[-4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[-2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[-1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[-4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[-2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[-1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[-9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[-2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[-8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[-1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[-3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[-2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[-1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[-1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[-2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[-2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[-1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[-5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[-1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[-1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[-1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[-2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[-2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[-2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[-4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[-7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[-2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[-2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[-2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[-3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[-1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[-8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[-1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[-5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[-5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[-2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[-2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[-1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[-1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[-1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[-2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[-9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[-5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[-1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[-5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = special.hyp1f1(a,b,c)
assert_(abs(expected - result)/expected < 1e-4)
def test_hyp1f1_gh2957(self):
hyp1 = special.hyp1f1(0.5, 1.5, -709.7827128933)
hyp2 = special.hyp1f1(0.5, 1.5, -709.7827128934)
assert_almost_equal(hyp1, hyp2, 12)
def test_hyp1f1_gh2282(self):
hyp = special.hyp1f1(0.5, 1.5, -1000)
assert_almost_equal(hyp, 0.028024956081989643, 12)
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, special.gamma(8)*special.gamma(8-4-3)/special.gamma(8-3)/special.gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi) *
special.gamma(1+3-2)/special.gamma(1+0.5*3-2)/special.gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi) *
special.gamma(1+5-2)/special.gamma(1+0.5*5-2)/special.gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*special.gamma(4./3) *
special.gamma(1.5-2*4)/special.gamma(3./2)/special.gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
# negative integer a or b, with c-a-b integer and x > 0.9
[-2,3,1,0.95,0.715],
[2,-3,1,0.95,-0.007],
[-6,3,1,0.95,0.0000810625],
[2,-5,1,0.95,-0.000029375],
# huge negative integers
(10, -900, 10.5, 0.99, 1.91853705796607664803709475658e-24),
(10, -900, -10.5, 0.99, 3.54279200040355710199058559155e-18),
]
for i, (a, b, c, x, v) in enumerate(values):
cv = special.hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyperu(self):
val1 = special.hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = special.hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(special.hyp1f1(a,b,z) /
(special.gamma(1+a-b)*special.gamma(b)) -
z**(1-b)*special.hyp1f1(1+a-b,2-b,z)
/ (special.gamma(a)*special.gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
def test_hyperu_gh2287(self):
assert_almost_equal(special.hyperu(1, 1.5, 20.2),
0.048360918656699191, 12)
class TestBessel:
def test_itj0y0(self):
it0 = array(special.itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(special.it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv_iv(self):
assert_equal(special.iv(3,2), special.iv(-3,2))
def test_j0(self):
oz = special.j0(.1)
ozr = special.jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = special.j1(.1)
o1r = special.jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = special.jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv_jv(self):
assert_almost_equal(special.jv(-3,2), -special.jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = special.jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv_jve(self):
assert_almost_equal(special.jve(-3,2), -special.jve(3,2), 14)
def test_jve(self):
jvexp = special.jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = special.jve(1,.2+1j)
z = .2+1j
jvexpr = special.jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = special.jn_zeros(0,5)
jn1 = special.jn_zeros(1,5)
assert_array_almost_equal(jn0,array([2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = special.jn_zeros(102,5)
assert_allclose(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = special.jn_zeros(301,5)
assert_allclose(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = special.jn_zeros(0, 300)
assert_allclose(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_allclose(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_allclose(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = special.jn_zeros(10, 300)
assert_allclose(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_allclose(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_allclose(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = special.jn_zeros(3010,5)
assert_allclose(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
jn = special.jn
def jnp(n, x):
return (jn(n-1,x) - jn(n+1,x))/2
for nt in range(1, 30):
z, n, m, t = special.jnjnp_zeros(nt)
for zz, nn, tt in zip(z, n, t):
if tt == 0:
assert_allclose(jn(nn, zz), 0, atol=1e-6)
elif tt == 1:
assert_allclose(jnp(nn, zz), 0, atol=1e-6)
else:
raise AssertionError("Invalid t return for nt=%d" % nt)
def test_jnp_zeros(self):
jnp = special.jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = special.jnp_zeros(443,5)
assert_allclose(special.jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = special.jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = special.jvp(2,2)
jv0 = (special.jv(1,2)-special.jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = special.k0(.1)
ozkr = special.kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = special.k0e(.1)
ozker = special.kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = special.k1(.1)
o1kr = special.kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = special.k1e(.1)
o1ker = special.kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*np.random.random() - 1
b = 5*np.random.random() - 1
P0 = special.jacobi(0,a,b)
P1 = special.jacobi(1,a,b)
P2 = special.jacobi(2,a,b)
P3 = special.jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = special.kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv_kv(self):
assert_equal(special.kv(3.0, 2.2), special.kv(-3.0, 2.2))
def test_kv0(self):
kv0 = special.kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = special.kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = special.kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_kn_largeorder(self):
assert_allclose(special.kn(32, 1), 1.7516596664574289e+43)
def test_kv_largearg(self):
assert_equal(special.kv(0, 1e19), 0)
def test_negv_kve(self):
assert_equal(special.kve(3.0, 2.2), special.kve(-3.0, 2.2))
def test_kve(self):
kve1 = special.kve(0,.2)
kv1 = special.kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = special.kve(0,z)
kv2 = special.kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-special.kv(1,z), special.kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -special.kv(v+1,z) + v/z*special.kv(v,z)
x = special.kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) # this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * special.kv(v,z) + special.kv(v+1,z)/z
x = special.kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = special.y0(.1)
ozr = special.yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = special.y1(.1)
o1r = special.yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = special.y0_zeros(2)
zo,zpo = special.y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(special.yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(special.yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = special.y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = special.y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = special.yn_zeros(4,2)
assert_array_almost_equal(an,array([5.64515, 9.36162]),5)
an = special.yn_zeros(443,5)
assert_allclose(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = special.ynp_zeros(0,2)
assert_array_almost_equal(ao,array([2.19714133, 5.42968104]),6)
ao = special.ynp_zeros(43,5)
assert_allclose(special.yvp(43, ao), 0, atol=1e-15)
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-9)
def test_ynp_zeros_large_order(self):
ao = special.ynp_zeros(443,5)
assert_allclose(special.yvp(443, ao), 0, atol=1e-14)
def test_yn(self):
yn2n = special.yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv_yv(self):
assert_almost_equal(special.yv(-3,2), -special.yv(3,2), 14)
def test_yv(self):
yv2 = special.yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv_yve(self):
assert_almost_equal(special.yve(-3,2), -special.yve(3,2), 14)
def test_yve(self):
yve2 = special.yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = special.yv(1,.2+1j)*exp(-1)
yve22 = special.yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (special.yv(1,.2) - special.yv(3,.2))/2.0
yvp1 = special.yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
v = [-120, -100.3, -20., -10., -1., -.5, 0., 1., 12.49, 120., 301]
z = [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5, 700.6, 1300,
10003]
yield from itertools.product(v, z)
# check half-integers; these are problematic points at least
# for cephes/iv
yield from itertools.product(0.5 + arange(-60, 60), [3.5])
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert_(np.abs(c2) >= 1e300, (v, z))
elif np.isnan(c1):
assert_(c2.imag != 0, (v, z))
else:
assert_allclose(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_allclose(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.jv, special.jn, rtol=1e-10, atol=1e-305)
@pytest.mark.xfail(platform.machine() == 'ppc64le',
reason="fails on ppc64le")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
def skipper(v, z):
return abs(v) > 50
self.check_cephes_vs_amos(special.yv, special.yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
with np.errstate(all='ignore'):
self.check_cephes_vs_amos(special.iv, special.iv, rtol=5e-9, atol=1e-305)
@pytest.mark.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v[imsk].astype(int)
with np.errstate(all='ignore'):
c1 = special.iv(v, x)
c2 = special.iv(v, x+0j)
# deal with differences in the inf and zero cutoffs
c1[abs(c1) > 1e300] = np.inf
c2[abs(c2) > 1e300] = np.inf
c1[abs(c1) < 1e-300] = 0
c2[abs(c2) < 1e-300] = 0
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert_(dc[k] < 2e-7, (v[k], x[k], special.iv(v[k], x[k]), special.iv(v[k], x[k]+0j)))
def test_kv_cephes_vs_amos(self):
self.check_cephes_vs_amos(special.kv, special.kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(special.kv, special.kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_allclose(special.jv(3, 4), 0.43017147387562193)
assert_allclose(special.jv(301, 1300), 0.0183487151115275)
assert_allclose(special.jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_allclose(special.jv(-1, 1), -0.4400505857449335)
assert_allclose(special.jv(-2, 1), 0.1149034849319005)
assert_allclose(special.yv(-1, 1), 0.7812128213002887)
assert_allclose(special.yv(-2, 1), -1.650682606816255)
assert_allclose(special.iv(-1, 1), 0.5651591039924851)
assert_allclose(special.iv(-2, 1), 0.1357476697670383)
assert_allclose(special.kv(-1, 1), 0.6019072301972347)
assert_allclose(special.kv(-2, 1), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1), 0.43109886801837607952)
assert_allclose(special.yv(-0.5, 1), 0.6713967071418031)
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
assert_allclose(special.kv(-0.5, 1), 0.4610685044478945)
# amos
assert_allclose(special.jv(-1, 1+0j), -0.4400505857449335)
assert_allclose(special.jv(-2, 1+0j), 0.1149034849319005)
assert_allclose(special.yv(-1, 1+0j), 0.7812128213002887)
assert_allclose(special.yv(-2, 1+0j), -1.650682606816255)
assert_allclose(special.iv(-1, 1+0j), 0.5651591039924851)
assert_allclose(special.iv(-2, 1+0j), 0.1357476697670383)
assert_allclose(special.kv(-1, 1+0j), 0.6019072301972347)
assert_allclose(special.kv(-2, 1+0j), 1.624838898635178)
assert_allclose(special.jv(-0.5, 1+0j), 0.43109886801837607952)
assert_allclose(special.jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_allclose(special.yv(-0.5, 1+0j), 0.6713967071418031)
assert_allclose(special.yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_allclose(special.iv(-0.5, 1+0j), 1.231200214592967)
assert_allclose(special.iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_allclose(special.kv(-0.5, 1+0j), 0.4610685044478945)
assert_allclose(special.kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_allclose(special.jve(-0.5,1+0.3j), special.jv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.yve(-0.5,1+0.3j), special.yv(-0.5, 1+0.3j)*exp(-0.3))
assert_allclose(special.ive(-0.5,0.3+1j), special.iv(-0.5, 0.3+1j)*exp(-0.3))
assert_allclose(special.kve(-0.5,0.3+1j), special.kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_allclose(special.hankel1(-0.5, 1+1j), special.jv(-0.5, 1+1j) + 1j*special.yv(-0.5,1+1j))
assert_allclose(special.hankel2(-0.5, 1+1j), special.jv(-0.5, 1+1j) - 1j*special.yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert_(isnan(special.jv(0.5, -1)))
assert_(isnan(special.iv(0.5, -1)))
assert_(isnan(special.yv(0.5, -1)))
assert_(isnan(special.yv(1, -1)))
assert_(isnan(special.kv(0.5, -1)))
assert_(isnan(special.kv(1, -1)))
assert_(isnan(special.jve(0.5, -1)))
assert_(isnan(special.ive(0.5, -1)))
assert_(isnan(special.yve(0.5, -1)))
assert_(isnan(special.yve(1, -1)))
assert_(isnan(special.kve(0.5, -1)))
assert_(isnan(special.kve(1, -1)))
assert_(isnan(special.airye(-1)[0:2]).all(), special.airye(-1))
assert_(not isnan(special.airye(-1)[2:4]).any(), special.airye(-1))
def test_gh_7909(self):
assert_(special.kv(1.5, 0) == np.inf)
assert_(special.kve(1.5, 0) == np.inf)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_allclose(special.iv(1, 700), 1.528500390233901e302)
assert_allclose(special.iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_allclose(special.iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - special.gammaln(k+1) - special.gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_allclose(special.i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_allclose(special.i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_allclose(special.iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = special.i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = special.i0e(.1)
oizer = special.ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = special.i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = special.i1e(.1)
oi1er = special.ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(special.iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = special.it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv_ive(self):
assert_equal(special.ive(3,2), special.ive(-3,2))
def test_ive(self):
ive1 = special.ive(0,.1)
iv1 = special.iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(special.iv(1,2), special.ivp(0,2), 10)
def test_ivp(self):
y = (special.iv(0,2) + special.iv(2,2))/2
x = special.ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre:
def test_laguerre(self):
lag0 = special.laguerre(0)
lag1 = special.laguerre(1)
lag2 = special.laguerre(2)
lag3 = special.laguerre(3)
lag4 = special.laguerre(4)
lag5 = special.laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*np.random.random() - 0.9
lag0 = special.genlaguerre(0,k)
lag1 = special.genlaguerre(1,k)
lag2 = special.genlaguerre(2,k)
lag3 = special.genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre:
def test_legendre(self):
leg0 = special.legendre(0)
leg1 = special.legendre(1)
leg2 = special.legendre(2)
leg3 = special.legendre(3)
leg4 = special.legendre(4)
leg5 = special.legendre(5)
assert_equal(leg0.c, [1])
assert_equal(leg1.c, [1,0])
assert_almost_equal(leg2.c, array([3,0,-1])/2.0, decimal=13)
assert_almost_equal(leg3.c, array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c, array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c, array([63,0,-70,0,15,0])/8.0)
class TestLambda:
def test_lmbda(self):
lam = special.lmbda(1,.1)
lamr = (array([special.jn(0,.1), 2*special.jn(1,.1)/.1]),
array([special.jvp(0,.1), -2*special.jv(1,.1)/.01 + 2*special.jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p:
def test_log1p(self):
l1p = (special.log1p(10), special.log1p(11), special.log1p(12))
l1prl = (log(11), log(12), log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (special.log1p(1), special.log1p(1.1), special.log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions:
def test_clpmn(self):
z = 0.5+0.3j
clp = special.clpmn(2, 2, z, 3)
assert_array_almost_equal(clp,
(array([[1.0000, z, 0.5*(3*z*z-1)],
[0.0000, sqrt(z*z-1), 3*z*sqrt(z*z-1)],
[0.0000, 0.0000, 3*(z*z-1)]]),
array([[0.0000, 1.0000, 3*z],
[0.0000, z/sqrt(z*z-1), 3*(2*z*z-1)/sqrt(z*z-1)],
[0.0000, 0.0000, 6*z]])),
7)
def test_clpmn_close_to_real_2(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 2)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 2)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x),
special.lpmv(m, n, x)]),
7)
def test_clpmn_close_to_real_3(self):
eps = 1e-10
m = 1
n = 3
x = 0.5
clp_plus = special.clpmn(m, n, x+1j*eps, 3)[0][m, n]
clp_minus = special.clpmn(m, n, x-1j*eps, 3)[0][m, n]
assert_array_almost_equal(array([clp_plus, clp_minus]),
array([special.lpmv(m, n, x)*np.exp(-0.5j*m*np.pi),
special.lpmv(m, n, x)*np.exp(0.5j*m*np.pi)]),
7)
def test_clpmn_across_unit_circle(self):
eps = 1e-7
m = 1
n = 1
x = 1j
for type in [2, 3]:
assert_almost_equal(special.clpmn(m, n, x+1j*eps, type)[0][m, n],
special.clpmn(m, n, x-1j*eps, type)[0][m, n], 6)
def test_inf(self):
for z in (1, -1):
for n in range(4):
for m in range(1, n):
lp = special.clpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
lp = special.lpmn(m, n, z)
assert_(np.isinf(lp[1][1,1:]).all())
def test_deriv_clpmn(self):
# data inside and outside of the unit circle
zvals = [0.5+0.5j, -0.5+0.5j, -0.5-0.5j, 0.5-0.5j,
1+1j, -1+1j, -1-1j, 1-1j]
m = 2
n = 3
for type in [2, 3]:
for z in zvals:
for h in [1e-3, 1e-3j]:
approx_derivative = (special.clpmn(m, n, z+0.5*h, type)[0]
- special.clpmn(m, n, z-0.5*h, type)[0])/h
assert_allclose(special.clpmn(m, n, z, type)[1],
approx_derivative,
rtol=1e-4)
def test_lpmn(self):
lp = special.lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([[1.00000,
0.50000,
-0.12500]]),
array([[0.00000,
1.00000,
1.50000]])),4)
def test_lpn(self):
lpnf = special.lpn(2,.5)
assert_array_almost_equal(lpnf,(array([1.00000,
0.50000,
-0.12500]),
array([0.00000,
1.00000,
1.50000])),4)
def test_lpmv(self):
lp = special.lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,7)
lp = special.lpmv(0,40,.001)
assert_almost_equal(lp,0.1252678976534484,7)
# XXX: this is outside the domain of the current implementation,
# so ensure it returns a NaN rather than a wrong answer.
with np.errstate(all='ignore'):
lp = special.lpmv(-1,-1,.001)
assert_(lp != 0 or np.isnan(lp))
def test_lqmn(self):
lqmnf = special.lqmn(0,2,.5)
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_gt1(self):
"""algorithm for real arguments changes at 1.0001
test against analytical result for m=2, n=1
"""
x0 = 1.0001
delta = 0.00002
for x in (x0-delta, x0+delta):
lq = special.lqmn(2, 1, x)[0][-1, -1]
expected = 2/(x*x-1)
assert_almost_equal(lq, expected)
def test_lqmn_shape(self):
a, b = special.lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = special.lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = special.lqn(2,.5)
assert_array_almost_equal(lqf,(array([0.5493, -0.7253, -0.8187]),
array([1.3333, 1.216, -0.8427])),4)
class TestMathieu:
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
special.mathieu_even_coef(2,5)
# Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
# same problem as above
pass
class TestFresnelIntegral:
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq:
def test_obl_cv_seq(self):
obl = special.obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([-0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder:
def test_pbdn_seq(self):
pb = special.pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
special.pbdv(1,.2)
1/2*(.2)*special.pbdv(1,.2)[0] - special.pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = special.pbdn_seq(1,.1)
pbv = special.pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/special.gamma(.5-.5*eta)
assert_allclose(special.pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_allclose(special.pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_allclose(special.pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbdv(eta, x + eps)[0] - special.pbdv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = special.pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (special.pbvv(eta, x + eps)[0] - special.pbvv(eta, x - eps)[0]) / eps / 2.
assert_allclose(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma:
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = special.polygamma(2,1)
poly3 = special.polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
# Test polygamma(0, x) == psi(x)
x = [2, 3, 1.1e14]
assert_almost_equal(special.polygamma(0, x), special.psi(x))
# Test broadcasting
n = [0, 1, 2]
x = [0.5, 1.5, 2.5]
expected = [-1.9635100260214238, 0.93480220054467933,
-0.23620405164172739]
assert_almost_equal(special.polygamma(n, x), expected)
expected = np.row_stack([expected]*2)
assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
expected)
assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
expected)
class TestProCvSeq:
def test_pro_cv_seq(self):
prol = special.pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi:
def test_psi(self):
ps = special.psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian:
def test_radian(self):
rad = special.radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = special.radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati:
def test_riccati_jn(self):
N, x = 2, 0.2
S = np.empty((N, N))
for n in range(N):
j = special.spherical_jn(n, x)
jp = special.spherical_jn(n, x, derivative=True)
S[0,n] = x*j
S[1,n] = x*jp + j
assert_array_almost_equal(S, special.riccati_jn(n, x), 8)
def test_riccati_yn(self):
N, x = 2, 0.2
C = np.empty((N, N))
for n in range(N):
y = special.spherical_yn(n, x)
yp = special.spherical_yn(n, x, derivative=True)
C[0,n] = x*y
C[1,n] = x*yp + y
assert_array_almost_equal(C, special.riccati_yn(n, x), 8)
class TestRound:
def test_round(self):
rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
def test_sph_harm():
# Tests derived from tables in
# https://en.wikipedia.org/wiki/Table_of_spherical_harmonics
sh = special.sph_harm
pi = np.pi
exp = np.exp
sqrt = np.sqrt
sin = np.sin
cos = np.cos
assert_array_almost_equal(sh(0,0,0,0),
0.5/sqrt(pi))
assert_array_almost_equal(sh(-2,2,0.,pi/4),
0.25*sqrt(15./(2.*pi)) *
(sin(pi/4))**2.)
assert_array_almost_equal(sh(-2,2,0.,pi/2),
0.25*sqrt(15./(2.*pi)))
assert_array_almost_equal(sh(2,2,pi,pi/2),
0.25*sqrt(15/(2.*pi)) *
exp(0+2.*pi*1j)*sin(pi/2.)**2.)
assert_array_almost_equal(sh(2,4,pi/4.,pi/3.),
(3./8.)*sqrt(5./(2.*pi)) *
exp(0+2.*pi/4.*1j) *
sin(pi/3.)**2. *
(7.*cos(pi/3.)**2.-1))
assert_array_almost_equal(sh(4,4,pi/8.,pi/6.),
(3./16.)*sqrt(35./(2.*pi)) *
exp(0+4.*pi/8.*1j)*sin(pi/6.)**4.)
def test_sph_harm_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(special.sph_harm(0, 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm([0], 0, 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, [0], 0, 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, [0], 0).dtype, dt)
assert_equal(special.sph_harm(0, 0, 0, [0]).dtype, dt)
assert_equal(special.sph_harm([0], [0], [0], [0]).dtype, dt)
class TestStruve:
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/special.gamma(k+1.5)/special.gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_allclose(special.struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_allclose(special.struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_allclose(special.struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_allclose(special.struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_allclose(special.struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(special.struve(-12, -41), -special.struve(-12, 41))
assert_equal(special.struve(+12, -41), -special.struve(+12, 41))
assert_equal(special.struve(-11, -41), +special.struve(-11, 41))
assert_equal(special.struve(+11, -41), +special.struve(+11, 41))
assert_(isnan(special.struve(-7.1, -1)))
assert_(isnan(special.struve(-10.1, -1)))
def test_regression_679(self):
"""Regression test for #679"""
assert_allclose(special.struve(-1.0, 20 - 1e-8), special.struve(-1.0, 20 + 1e-8))
assert_allclose(special.struve(-2.0, 20 - 1e-8), special.struve(-2.0, 20 + 1e-8))
assert_allclose(special.struve(-4.3, 20 - 1e-8), special.struve(-4.3, 20 + 1e-8))
def test_chi2_smalldf():
assert_almost_equal(special.chdtr(0.6,3), 0.957890536704110)
def test_ch2_inf():
assert_equal(special.chdtr(0.7,np.inf), 1.0)
def test_chi2c_smalldf():
assert_almost_equal(special.chdtrc(0.6,3), 1-0.957890536704110)
def test_chi2_inv_smalldf():
assert_almost_equal(special.chdtri(0.6,1-0.957890536704110), 3)
def test_agm_simple():
rtol = 1e-13
# Gauss's constant
assert_allclose(1/special.agm(1, np.sqrt(2)), 0.834626841674073186,
rtol=rtol)
# These values were computed using Wolfram Alpha, with the
# function ArithmeticGeometricMean[a, b].
agm13 = 1.863616783244897
agm15 = 2.604008190530940
agm35 = 3.936235503649555
assert_allclose(special.agm([[1], [3]], [1, 3, 5]),
[[1, agm13, agm15],
[agm13, 3, agm35]], rtol=rtol)
# Computed by the iteration formula using mpmath,
# with mpmath.mp.prec = 1000:
agm12 = 1.4567910310469068
assert_allclose(special.agm(1, 2), agm12, rtol=rtol)
assert_allclose(special.agm(2, 1), agm12, rtol=rtol)
assert_allclose(special.agm(-1, -2), -agm12, rtol=rtol)
assert_allclose(special.agm(24, 6), 13.458171481725614, rtol=rtol)
assert_allclose(special.agm(13, 123456789.5), 11111458.498599306,
rtol=rtol)
assert_allclose(special.agm(1e30, 1), 2.229223055945383e+28, rtol=rtol)
assert_allclose(special.agm(1e-22, 1), 0.030182566420169886, rtol=rtol)
assert_allclose(special.agm(1e150, 1e180), 2.229223055945383e+178,
rtol=rtol)
assert_allclose(special.agm(1e180, 1e-150), 2.0634722510162677e+177,
rtol=rtol)
assert_allclose(special.agm(1e-150, 1e-170), 3.3112619670463756e-152,
rtol=rtol)
fi = np.finfo(1.0)
assert_allclose(special.agm(fi.tiny, fi.max), 1.9892072050015473e+305,
rtol=rtol)
assert_allclose(special.agm(0.75*fi.max, fi.max), 1.564904312298045e+308,
rtol=rtol)
assert_allclose(special.agm(fi.tiny, 3*fi.tiny), 4.1466849866735005e-308,
rtol=rtol)
# zero, nan and inf cases.
assert_equal(special.agm(0, 0), 0)
assert_equal(special.agm(99, 0), 0)
assert_equal(special.agm(-1, 10), np.nan)
assert_equal(special.agm(0, np.inf), np.nan)
assert_equal(special.agm(np.inf, 0), np.nan)
assert_equal(special.agm(0, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, 0), np.nan)
assert_equal(special.agm(np.inf, -np.inf), np.nan)
assert_equal(special.agm(-np.inf, np.inf), np.nan)
assert_equal(special.agm(1, np.nan), np.nan)
assert_equal(special.agm(np.nan, -1), np.nan)
assert_equal(special.agm(1, np.inf), np.inf)
assert_equal(special.agm(np.inf, 1), np.inf)
assert_equal(special.agm(-1, -np.inf), -np.inf)
assert_equal(special.agm(-np.inf, -1), -np.inf)
def test_legacy():
# Legacy behavior: truncating arguments to integers
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "floating point number truncated to an integer")
assert_equal(special.expn(1, 0.3), special.expn(1.8, 0.3))
assert_equal(special.nbdtrc(1, 2, 0.3), special.nbdtrc(1.8, 2.8, 0.3))
assert_equal(special.nbdtr(1, 2, 0.3), special.nbdtr(1.8, 2.8, 0.3))
assert_equal(special.nbdtri(1, 2, 0.3), special.nbdtri(1.8, 2.8, 0.3))
assert_equal(special.pdtri(1, 0.3), special.pdtri(1.8, 0.3))
assert_equal(special.kn(1, 0.3), special.kn(1.8, 0.3))
assert_equal(special.yn(1, 0.3), special.yn(1.8, 0.3))
assert_equal(special.smirnov(1, 0.3), special.smirnov(1.8, 0.3))
assert_equal(special.smirnovi(1, 0.3), special.smirnovi(1.8, 0.3))
@with_special_errors
def test_error_raising():
assert_raises(special.SpecialFunctionError, special.iv, 1, 1e99j)
def test_xlogy():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x*np.log(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0)], dtype=float)
z2 = np.r_[z1, [(0, 1j), (1, 1j)]]
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlogy, w1, z1, rtol=1e-13, atol=1e-13)
w2 = np.vectorize(xfunc)(z2[:,0], z2[:,1])
assert_func_equal(special.xlogy, w2, z2, rtol=1e-13, atol=1e-13)
def test_xlog1py():
def xfunc(x, y):
with np.errstate(invalid='ignore'):
if x == 0 and not np.isnan(y):
return x
else:
return x * np.log1p(y)
z1 = np.asarray([(0,0), (0, np.nan), (0, np.inf), (1.0, 2.0),
(1, 1e-30)], dtype=float)
w1 = np.vectorize(xfunc)(z1[:,0], z1[:,1])
assert_func_equal(special.xlog1py, w1, z1, rtol=1e-13, atol=1e-13)
def test_entr():
def xfunc(x):
if x < 0:
return -np.inf
else:
return -special.xlogy(x, x)
values = (0, 0.5, 1.0, np.inf)
signs = [-1, 1]
arr = []
for sgn, v in itertools.product(signs, values):
arr.append(sgn * v)
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z)
assert_func_equal(special.entr, w, z, rtol=1e-13, atol=1e-13)
def test_kl_div():
def xfunc(x, y):
if x < 0 or y < 0 or (y == 0 and x != 0):
# extension of natural domain to preserve convexity
return np.inf
elif np.isposinf(x) or np.isposinf(y):
# limits within the natural domain
return np.inf
elif x == 0:
return y
else:
return special.xlogy(x, x/y) - x + y
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.kl_div, w, z, rtol=1e-13, atol=1e-13)
def test_rel_entr():
def xfunc(x, y):
if x > 0 and y > 0:
return special.xlogy(x, x/y)
elif x == 0 and y >= 0:
return 0
else:
return np.inf
values = (0, 0.5, 1.0)
signs = [-1, 1]
arr = []
for sgna, va, sgnb, vb in itertools.product(signs, values, signs, values):
arr.append((sgna*va, sgnb*vb))
z = np.array(arr, dtype=float)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.rel_entr, w, z, rtol=1e-13, atol=1e-13)
def test_huber():
assert_equal(special.huber(-1, 1.5), np.inf)
assert_allclose(special.huber(2, 1.5), 0.5 * np.square(1.5))
assert_allclose(special.huber(2, 2.5), 2 * (2.5 - 0.5 * 2))
def xfunc(delta, r):
if delta < 0:
return np.inf
elif np.abs(r) < delta:
return 0.5 * np.square(r)
else:
return delta * (np.abs(r) - 0.5 * delta)
z = np.random.randn(10, 2)
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber():
def xfunc(delta, r):
if delta < 0:
return np.inf
elif (not delta) or (not r):
return 0
else:
return delta**2 * (np.sqrt(1 + (r/delta)**2) - 1)
z = np.array(np.random.randn(10, 2).tolist() + [[0, 0.5], [0.5, 0]])
w = np.vectorize(xfunc, otypes=[np.float64])(z[:,0], z[:,1])
assert_func_equal(special.pseudo_huber, w, z, rtol=1e-13, atol=1e-13)
def test_pseudo_huber_small_r():
delta = 1.0
r = 1e-18
y = special.pseudo_huber(delta, r)
# expected computed with mpmath:
# import mpmath
# mpmath.mp.dps = 200
# r = mpmath.mpf(1e-18)
# expected = float(mpmath.sqrt(1 + r**2) - 1)
expected = 5.0000000000000005e-37
assert_allclose(y, expected, rtol=1e-13)
def test_runtime_warning():
with pytest.warns(RuntimeWarning,
match=r'Too many predicted coefficients'):
mathieu_odd_coef(1000, 1000)
with pytest.warns(RuntimeWarning,
match=r'Too many predicted coefficients'):
mathieu_even_coef(1000, 1000)
| 158,898
| 39.360427
| 110
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_round.py
|
import numpy as np
import pytest
from scipy.special import _test_internal
@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
def test_add_round_up():
np.random.seed(1234)
_test_internal.test_add_round(10**5, 'up')
@pytest.mark.skipif(not _test_internal.have_fenv(), reason="no fenv()")
def test_add_round_down():
np.random.seed(1234)
_test_internal.test_add_round(10**5, 'down')
| 421
| 23.823529
| 71
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_ellip_harm.py
|
#
# Tests for the Ellipsoidal Harmonic Function,
# Distributed under the same license as SciPy itself.
#
import numpy as np
from numpy.testing import (assert_equal, assert_almost_equal, assert_allclose,
assert_, suppress_warnings)
from scipy.special._testutils import assert_func_equal
from scipy.special import ellip_harm, ellip_harm_2, ellip_normal
from scipy.integrate import IntegrationWarning
from numpy import sqrt, pi
def test_ellip_potential():
def change_coefficient(lambda1, mu, nu, h2, k2):
x = sqrt(lambda1**2*mu**2*nu**2/(h2*k2))
y = sqrt((lambda1**2 - h2)*(mu**2 - h2)*(h2 - nu**2)/(h2*(k2 - h2)))
z = sqrt((lambda1**2 - k2)*(k2 - mu**2)*(k2 - nu**2)/(k2*(k2 - h2)))
return x, y, z
def solid_int_ellip(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm(h2, k2, n, p, lambda1)*ellip_harm(h2, k2, n, p, mu)
* ellip_harm(h2, k2, n, p, nu))
def solid_int_ellip2(lambda1, mu, nu, n, p, h2, k2):
return (ellip_harm_2(h2, k2, n, p, lambda1)
* ellip_harm(h2, k2, n, p, mu)*ellip_harm(h2, k2, n, p, nu))
def summation(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
tol = 1e-8
sum1 = 0
for n in range(20):
xsum = 0
for p in range(1, 2*n+2):
xsum += (4*pi*(solid_int_ellip(lambda2, mu2, nu2, n, p, h2, k2)
* solid_int_ellip2(lambda1, mu1, nu1, n, p, h2, k2)) /
(ellip_normal(h2, k2, n, p)*(2*n + 1)))
if abs(xsum) < 0.1*tol*abs(sum1):
break
sum1 += xsum
return sum1, xsum
def potential(lambda1, mu1, nu1, lambda2, mu2, nu2, h2, k2):
x1, y1, z1 = change_coefficient(lambda1, mu1, nu1, h2, k2)
x2, y2, z2 = change_coefficient(lambda2, mu2, nu2, h2, k2)
res = sqrt((x2 - x1)**2 + (y2 - y1)**2 + (z2 - z1)**2)
return 1/res
pts = [
(120, sqrt(19), 2, 41, sqrt(17), 2, 15, 25),
(120, sqrt(16), 3.2, 21, sqrt(11), 2.9, 11, 20),
]
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
sup.filter(IntegrationWarning, "The maximum number of subdivisions")
for p in pts:
err_msg = repr(p)
exact = potential(*p)
result, last_term = summation(*p)
assert_allclose(exact, result, atol=0, rtol=1e-8, err_msg=err_msg)
assert_(abs(result - exact) < 10*abs(last_term), err_msg)
def test_ellip_norm():
def G01(h2, k2):
return 4*pi
def G11(h2, k2):
return 4*pi*h2*k2/3
def G12(h2, k2):
return 4*pi*h2*(k2 - h2)/3
def G13(h2, k2):
return 4*pi*k2*(k2 - h2)/3
def G22(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2 +
sqrt(h2**2 + k2**2 - h2*k2)*(-2*(h2**3 + k2**3) + 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G21(h2, k2):
res = (2*(h2**4 + k2**4) - 4*h2*k2*(h2**2 + k2**2) + 6*h2**2*k2**2
+ sqrt(h2**2 + k2**2 - h2*k2)*(2*(h2**3 + k2**3) - 3*h2*k2*(h2 + k2)))
return 16*pi/405*res
def G23(h2, k2):
return 4*pi*h2**2*k2*(k2 - h2)/15
def G24(h2, k2):
return 4*pi*h2*k2**2*(k2 - h2)/15
def G25(h2, k2):
return 4*pi*h2*k2*(k2 - h2)**2/15
def G32(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(-8*(h2**3 + k2**3) +
11*h2*k2*(h2 + k2)))
return 16*pi/13125*k2*h2*res
def G31(h2, k2):
res = (16*(h2**4 + k2**4) - 36*h2*k2*(h2**2 + k2**2) + 46*h2**2*k2**2
+ sqrt(4*(h2**2 + k2**2) - 7*h2*k2)*(8*(h2**3 + k2**3) -
11*h2*k2*(h2 + k2)))
return 16*pi/13125*h2*k2*res
def G34(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(-6*h2**3 - 8*k2**3 + 9*h2**2*k2 +
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G33(h2, k2):
res = (6*h2**4 + 16*k2**4 - 12*h2**3*k2 - 28*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(h2**2 + 4*k2**2 - h2*k2)*(6*h2**3 + 8*k2**3 - 9*h2**2*k2 -
13*h2*k2**2))
return 16*pi/13125*h2*(k2 - h2)*res
def G36(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(-8*h2**3 - 6*k2**3 + 13*h2**2*k2 +
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G35(h2, k2):
res = (16*h2**4 + 6*k2**4 - 28*h2**3*k2 - 12*h2*k2**3 + 34*h2**2*k2**2
+ sqrt(4*h2**2 + k2**2 - h2*k2)*(8*h2**3 + 6*k2**3 - 13*h2**2*k2 -
9*h2*k2**2))
return 16*pi/13125*k2*(k2 - h2)*res
def G37(h2, k2):
return 4*pi*h2**2*k2**2*(k2 - h2)**2/105
known_funcs = {(0, 1): G01, (1, 1): G11, (1, 2): G12, (1, 3): G13,
(2, 1): G21, (2, 2): G22, (2, 3): G23, (2, 4): G24,
(2, 5): G25, (3, 1): G31, (3, 2): G32, (3, 3): G33,
(3, 4): G34, (3, 5): G35, (3, 6): G36, (3, 7): G37}
def _ellip_norm(n, p, h2, k2):
func = known_funcs[n, p]
return func(h2, k2)
_ellip_norm = np.vectorize(_ellip_norm)
def ellip_normal_known(h2, k2, n, p):
return _ellip_norm(n, p, h2, k2)
# generate both large and small h2 < k2 pairs
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=1)
k2 = h2 * (1 + np.random.pareto(0.5, size=h2.size))
points = []
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2, k2, np.full(h2.size, n), np.full(h2.size, p)))
points = np.array(points)
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_func_equal(ellip_normal, ellip_normal_known, points, rtol=1e-12)
def test_ellip_harm_2():
def I1(h2, k2, s):
res = (ellip_harm_2(h2, k2, 1, 1, s)/(3 * ellip_harm(h2, k2, 1, 1, s))
+ ellip_harm_2(h2, k2, 1, 2, s)/(3 * ellip_harm(h2, k2, 1, 2, s)) +
ellip_harm_2(h2, k2, 1, 3, s)/(3 * ellip_harm(h2, k2, 1, 3, s)))
return res
with suppress_warnings() as sup:
sup.filter(IntegrationWarning, "The occurrence of roundoff error")
assert_almost_equal(I1(5, 8, 10), 1/(10*sqrt((100-5)*(100-8))))
# Values produced by code from arXiv:1204.0267
assert_almost_equal(ellip_harm_2(5, 8, 2, 1, 10), 0.00108056853382)
assert_almost_equal(ellip_harm_2(5, 8, 2, 2, 10), 0.00105820513809)
assert_almost_equal(ellip_harm_2(5, 8, 2, 3, 10), 0.00106058384743)
assert_almost_equal(ellip_harm_2(5, 8, 2, 4, 10), 0.00106774492306)
assert_almost_equal(ellip_harm_2(5, 8, 2, 5, 10), 0.00107976356454)
def test_ellip_harm():
def E01(h2, k2, s):
return 1
def E11(h2, k2, s):
return s
def E12(h2, k2, s):
return sqrt(abs(s*s - h2))
def E13(h2, k2, s):
return sqrt(abs(s*s - k2))
def E21(h2, k2, s):
return s*s - 1/3*((h2 + k2) + sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E22(h2, k2, s):
return s*s - 1/3*((h2 + k2) - sqrt(abs((h2 + k2)*(h2 + k2)-3*h2*k2)))
def E23(h2, k2, s):
return s * sqrt(abs(s*s - h2))
def E24(h2, k2, s):
return s * sqrt(abs(s*s - k2))
def E25(h2, k2, s):
return sqrt(abs((s*s - h2)*(s*s - k2)))
def E31(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) + sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E32(h2, k2, s):
return s*s*s - (s/5)*(2*(h2 + k2) - sqrt(4*(h2 + k2)*(h2 + k2) -
15*h2*k2))
def E33(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) + sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E34(h2, k2, s):
return sqrt(abs(s*s - h2))*(s*s - 1/5*((h2 + 2*k2) - sqrt(abs((h2 +
2*k2)*(h2 + 2*k2) - 5*h2*k2))))
def E35(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) + sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E36(h2, k2, s):
return sqrt(abs(s*s - k2))*(s*s - 1/5*((2*h2 + k2) - sqrt(abs((2*h2
+ k2)*(2*h2 + k2) - 5*h2*k2))))
def E37(h2, k2, s):
return s * sqrt(abs((s*s - h2)*(s*s - k2)))
assert_equal(ellip_harm(5, 8, 1, 2, 2.5, 1, 1),
ellip_harm(5, 8, 1, 2, 2.5))
known_funcs = {(0, 1): E01, (1, 1): E11, (1, 2): E12, (1, 3): E13,
(2, 1): E21, (2, 2): E22, (2, 3): E23, (2, 4): E24,
(2, 5): E25, (3, 1): E31, (3, 2): E32, (3, 3): E33,
(3, 4): E34, (3, 5): E35, (3, 6): E36, (3, 7): E37}
point_ref = []
def ellip_harm_known(h2, k2, n, p, s):
for i in range(h2.size):
func = known_funcs[(int(n[i]), int(p[i]))]
point_ref.append(func(h2[i], k2[i], s[i]))
return point_ref
np.random.seed(1234)
h2 = np.random.pareto(0.5, size=30)
k2 = h2*(1 + np.random.pareto(0.5, size=h2.size))
s = np.random.pareto(0.5, size=h2.size)
points = []
for i in range(h2.size):
for n in range(4):
for p in range(1, 2*n+2):
points.append((h2[i], k2[i], n, p, s[i]))
points = np.array(points)
assert_func_equal(ellip_harm, ellip_harm_known, points, rtol=1e-12)
def test_ellip_harm_invalid_p():
# Regression test. This should return nan.
n = 4
# Make p > 2*n + 1.
p = 2*n + 2
result = ellip_harm(0.5, 2.0, n, p, 0.2)
assert np.isnan(result)
| 9,640
| 33.555556
| 79
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_wrightomega.py
|
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_allclose
import scipy.special as sc
from scipy.special._testutils import assert_func_equal
def test_wrightomega_nan():
pts = [complex(np.nan, 0),
complex(0, np.nan),
complex(np.nan, np.nan),
complex(np.nan, 1),
complex(1, np.nan)]
for p in pts:
res = sc.wrightomega(p)
assert_(np.isnan(res.real))
assert_(np.isnan(res.imag))
def test_wrightomega_inf_branch():
pts = [complex(-np.inf, np.pi/4),
complex(-np.inf, -np.pi/4),
complex(-np.inf, 3*np.pi/4),
complex(-np.inf, -3*np.pi/4)]
expected_results = [complex(0.0, 0.0),
complex(0.0, -0.0),
complex(-0.0, 0.0),
complex(-0.0, -0.0)]
for p, expected in zip(pts, expected_results):
res = sc.wrightomega(p)
# We can't use assert_equal(res, expected) because in older versions of
# numpy, assert_equal doesn't check the sign of the real and imaginary
# parts when comparing complex zeros. It does check the sign when the
# arguments are *real* scalars.
assert_equal(res.real, expected.real)
assert_equal(res.imag, expected.imag)
def test_wrightomega_inf():
pts = [complex(np.inf, 10),
complex(-np.inf, 10),
complex(10, np.inf),
complex(10, -np.inf)]
for p in pts:
assert_equal(sc.wrightomega(p), p)
def test_wrightomega_singular():
pts = [complex(-1.0, np.pi),
complex(-1.0, -np.pi)]
for p in pts:
res = sc.wrightomega(p)
assert_equal(res, -1.0)
assert_(np.signbit(res.imag) == np.bool_(False))
@pytest.mark.parametrize('x, desired', [
(-np.inf, 0),
(np.inf, np.inf),
])
def test_wrightomega_real_infinities(x, desired):
assert sc.wrightomega(x) == desired
def test_wrightomega_real_nan():
assert np.isnan(sc.wrightomega(np.nan))
def test_wrightomega_real_series_crossover():
desired_error = 2 * np.finfo(float).eps
crossover = 1e20
x_before_crossover = np.nextafter(crossover, -np.inf)
x_after_crossover = np.nextafter(crossover, np.inf)
# Computed using Mpmath
desired_before_crossover = 99999999999999983569.948
desired_after_crossover = 100000000000000016337.948
assert_allclose(
sc.wrightomega(x_before_crossover),
desired_before_crossover,
atol=0,
rtol=desired_error,
)
assert_allclose(
sc.wrightomega(x_after_crossover),
desired_after_crossover,
atol=0,
rtol=desired_error,
)
def test_wrightomega_exp_approximation_crossover():
desired_error = 2 * np.finfo(float).eps
crossover = -50
x_before_crossover = np.nextafter(crossover, np.inf)
x_after_crossover = np.nextafter(crossover, -np.inf)
# Computed using Mpmath
desired_before_crossover = 1.9287498479639314876e-22
desired_after_crossover = 1.9287498479639040784e-22
assert_allclose(
sc.wrightomega(x_before_crossover),
desired_before_crossover,
atol=0,
rtol=desired_error,
)
assert_allclose(
sc.wrightomega(x_after_crossover),
desired_after_crossover,
atol=0,
rtol=desired_error,
)
def test_wrightomega_real_versus_complex():
x = np.linspace(-500, 500, 1001)
results = sc.wrightomega(x + 0j).real
assert_func_equal(sc.wrightomega, results, x, atol=0, rtol=1e-14)
| 3,560
| 29.177966
| 79
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_lambertw.py
|
#
# Tests for the lambertw function,
# Adapted from the MPMath tests [1] by Yosef Meller, mellerf@netvision.net.il
# Distributed under the same license as SciPy itself.
#
# [1] mpmath source code, Subversion revision 992
# http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992
import pytest
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_almost_equal
from scipy.special import lambertw
from numpy import nan, inf, pi, e, isnan, log, r_, array, complex_
from scipy.special._testutils import FuncData
def test_values():
assert_(isnan(lambertw(nan)))
assert_equal(lambertw(inf,1).real, inf)
assert_equal(lambertw(inf,1).imag, 2*pi)
assert_equal(lambertw(-inf,1).real, inf)
assert_equal(lambertw(-inf,1).imag, 3*pi)
assert_equal(lambertw(1.), lambertw(1., 0))
data = [
(0,0, 0),
(0+0j,0, 0),
(inf,0, inf),
(0,-1, -inf),
(0,1, -inf),
(0,3, -inf),
(e,0, 1),
(1,0, 0.567143290409783873),
(-pi/2,0, 1j*pi/2),
(-log(2)/2,0, -log(2)),
(0.25,0, 0.203888354702240164),
(-0.25,0, -0.357402956181388903),
(-1./10000,0, -0.000100010001500266719),
(-0.25,-1, -2.15329236411034965),
(0.25,-1, -3.00899800997004620-4.07652978899159763j),
(-0.25,-1, -2.15329236411034965),
(0.25,1, -3.00899800997004620+4.07652978899159763j),
(-0.25,1, -3.48973228422959210+7.41405453009603664j),
(-4,0, 0.67881197132094523+1.91195078174339937j),
(-4,1, -0.66743107129800988+7.76827456802783084j),
(-4,-1, 0.67881197132094523-1.91195078174339937j),
(1000,0, 5.24960285240159623),
(1000,1, 4.91492239981054535+5.44652615979447070j),
(1000,-1, 4.91492239981054535-5.44652615979447070j),
(1000,5, 3.5010625305312892+29.9614548941181328j),
(3+4j,0, 1.281561806123775878+0.533095222020971071j),
(-0.4+0.4j,0, -0.10396515323290657+0.61899273315171632j),
(3+4j,1, -0.11691092896595324+5.61888039871282334j),
(3+4j,-1, 0.25856740686699742-3.85211668616143559j),
(-0.5,-1, -0.794023632344689368-0.770111750510379110j),
(-1./10000,1, -11.82350837248724344+6.80546081842002101j),
(-1./10000,-1, -11.6671145325663544),
(-1./10000,-2, -11.82350837248724344-6.80546081842002101j),
(-1./100000,4, -14.9186890769540539+26.1856750178782046j),
(-1./100000,5, -15.0931437726379218666+32.5525721210262290086j),
((2+1j)/10,0, 0.173704503762911669+0.071781336752835511j),
((2+1j)/10,1, -3.21746028349820063+4.56175438896292539j),
((2+1j)/10,-1, -3.03781405002993088-3.53946629633505737j),
((2+1j)/10,4, -4.6878509692773249+23.8313630697683291j),
(-(2+1j)/10,0, -0.226933772515757933-0.164986470020154580j),
(-(2+1j)/10,1, -2.43569517046110001+0.76974067544756289j),
(-(2+1j)/10,-1, -3.54858738151989450-6.91627921869943589j),
(-(2+1j)/10,4, -4.5500846928118151+20.6672982215434637j),
(pi,0, 1.073658194796149172092178407024821347547745350410314531),
# Former bug in generated branch,
(-0.5+0.002j,0, -0.78917138132659918344 + 0.76743539379990327749j),
(-0.5-0.002j,0, -0.78917138132659918344 - 0.76743539379990327749j),
(-0.448+0.4j,0, -0.11855133765652382241 + 0.66570534313583423116j),
(-0.448-0.4j,0, -0.11855133765652382241 - 0.66570534313583423116j),
]
data = array(data, dtype=complex_)
def w(x, y):
return lambertw(x, y.real.astype(int))
with np.errstate(all='ignore'):
FuncData(w, data, (0,1), 2, rtol=1e-10, atol=1e-13).check()
def test_ufunc():
assert_array_almost_equal(
lambertw(r_[0., e, 1.]), r_[0., 1., 0.567143290409783873])
def test_lambertw_ufunc_loop_selection():
# see https://github.com/scipy/scipy/issues/4895
dt = np.dtype(np.complex128)
assert_equal(lambertw(0, 0, 0).dtype, dt)
assert_equal(lambertw([0], 0, 0).dtype, dt)
assert_equal(lambertw(0, [0], 0).dtype, dt)
assert_equal(lambertw(0, 0, [0]).dtype, dt)
assert_equal(lambertw([0], [0], [0]).dtype, dt)
@pytest.mark.parametrize('z', [1e-316, -2e-320j, -5e-318+1e-320j])
def test_lambertw_subnormal_k0(z):
# Verify that subnormal inputs are handled correctly on
# the branch k=0 (regression test for gh-16291).
w = lambertw(z)
# For values this small, we can be sure that numerically,
# lambertw(z) is z.
assert w == z
| 4,556
| 40.427273
| 107
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_owens_t.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose
import scipy.special as sc
def test_symmetries():
np.random.seed(1234)
a, h = np.random.rand(100), np.random.rand(100)
assert_equal(sc.owens_t(h, a), sc.owens_t(-h, a))
assert_equal(sc.owens_t(h, a), -sc.owens_t(h, -a))
def test_special_cases():
assert_equal(sc.owens_t(5, 0), 0)
assert_allclose(sc.owens_t(0, 5), 0.5*np.arctan(5)/np.pi,
rtol=5e-14)
# Target value is 0.5*Phi(5)*(1 - Phi(5)) for Phi the CDF of the
# standard normal distribution
assert_allclose(sc.owens_t(5, 1), 1.4332574485503512543e-07,
rtol=5e-14)
def test_nans():
assert_equal(sc.owens_t(20, np.nan), np.nan)
assert_equal(sc.owens_t(np.nan, 20), np.nan)
assert_equal(sc.owens_t(np.nan, np.nan), np.nan)
def test_infs():
h, a = 0, np.inf
# T(0, a) = 1/2π * arctan(a)
res = 1/(2*np.pi) * np.arctan(a)
assert_allclose(sc.owens_t(h, a), res, rtol=5e-14)
assert_allclose(sc.owens_t(h, -a), -res, rtol=5e-14)
h = 1
# Refer Owens T function definition in Wikipedia
# https://en.wikipedia.org/wiki/Owen%27s_T_function
# Value approximated through Numerical Integration
# using scipy.integrate.quad
# quad(lambda x: 1/(2*pi)*(exp(-0.5*(1*1)*(1+x*x))/(1+x*x)), 0, inf)
res = 0.07932762696572854
assert_allclose(sc.owens_t(h, np.inf), res, rtol=5e-14)
assert_allclose(sc.owens_t(h, -np.inf), -res, rtol=5e-14)
assert_equal(sc.owens_t(np.inf, 1), 0)
assert_equal(sc.owens_t(-np.inf, 1), 0)
assert_equal(sc.owens_t(np.inf, np.inf), 0)
assert_equal(sc.owens_t(-np.inf, np.inf), 0)
assert_equal(sc.owens_t(np.inf, -np.inf), -0.0)
assert_equal(sc.owens_t(-np.inf, -np.inf), -0.0)
| 1,791
| 32.185185
| 72
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_log_softmax.py
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
import scipy.special as sc
@pytest.mark.parametrize('x, expected', [
(np.array([1000, 1]), np.array([0, -999])),
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
(np.arange(4), np.array([-3.4401896985611953,
-2.4401896985611953,
-1.4401896985611953,
-0.44018969856119533]))
])
def test_log_softmax(x, expected):
assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
@pytest.fixture
def log_softmax_x():
x = np.arange(4)
return x
@pytest.fixture
def log_softmax_expected():
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
expected = np.array([-3.4401896985611953,
-2.4401896985611953,
-1.4401896985611953,
-0.44018969856119533])
return expected
def test_log_softmax_translation(log_softmax_x, log_softmax_expected):
# Translation property. If all the values are changed by the same amount,
# the softmax result does not change.
x = log_softmax_x + 100
expected = log_softmax_expected
assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
def test_log_softmax_noneaxis(log_softmax_x, log_softmax_expected):
# When axis=None, softmax operates on the entire array, and preserves
# the shape.
x = log_softmax_x.reshape(2, 2)
expected = log_softmax_expected.reshape(2, 2)
assert_allclose(sc.log_softmax(x), expected, rtol=1e-13)
@pytest.mark.parametrize('axis_2d, expected_2d', [
(0, np.log(0.5) * np.ones((2, 2))),
(1, np.array([[0, -999], [0, -999]]))
])
def test_axes(axis_2d, expected_2d):
assert_allclose(
sc.log_softmax([[1000, 1], [1000, 1]], axis=axis_2d),
expected_2d,
rtol=1e-13,
)
@pytest.fixture
def log_softmax_2d_x():
x = np.arange(8).reshape(2, 4)
return x
@pytest.fixture
def log_softmax_2d_expected():
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
expected = np.array([[-3.4401896985611953,
-2.4401896985611953,
-1.4401896985611953,
-0.44018969856119533],
[-3.4401896985611953,
-2.4401896985611953,
-1.4401896985611953,
-0.44018969856119533]])
return expected
def test_log_softmax_2d_axis1(log_softmax_2d_x, log_softmax_2d_expected):
x = log_softmax_2d_x
expected = log_softmax_2d_expected
assert_allclose(sc.log_softmax(x, axis=1), expected, rtol=1e-13)
def test_log_softmax_2d_axis0(log_softmax_2d_x, log_softmax_2d_expected):
x = log_softmax_2d_x.T
expected = log_softmax_2d_expected.T
assert_allclose(sc.log_softmax(x, axis=0), expected, rtol=1e-13)
def test_log_softmax_3d(log_softmax_2d_x, log_softmax_2d_expected):
# 3-d input, with a tuple for the axis.
x_3d = log_softmax_2d_x.reshape(2, 2, 2)
expected_3d = log_softmax_2d_expected.reshape(2, 2, 2)
assert_allclose(sc.log_softmax(x_3d, axis=(1, 2)), expected_3d, rtol=1e-13)
def test_log_softmax_scalar():
assert_allclose(sc.log_softmax(1.0), 0.0, rtol=1e-13)
| 3,415
| 30.054545
| 79
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_orthogonal_eval.py
|
import numpy as np
from numpy.testing import assert_, assert_allclose
import pytest
from scipy.special import _ufuncs
import scipy.special._orthogonal as orth
from scipy.special._testutils import FuncData
def test_eval_chebyt():
n = np.arange(0, 10000, 7)
x = 2*np.random.rand() - 1
v1 = np.cos(n*np.arccos(x))
v2 = _ufuncs.eval_chebyt(n, x)
assert_(np.allclose(v1, v2, rtol=1e-15))
def test_eval_genlaguerre_restriction():
# check it returns nan for alpha <= -1
assert_(np.isnan(_ufuncs.eval_genlaguerre(0, -1, 0)))
assert_(np.isnan(_ufuncs.eval_genlaguerre(0.1, -1, 0)))
def test_warnings():
# ticket 1334
with np.errstate(all='raise'):
# these should raise no fp warnings
_ufuncs.eval_legendre(1, 0)
_ufuncs.eval_laguerre(1, 1)
_ufuncs.eval_gegenbauer(1, 1, 0)
class TestPolys:
"""
Check that the eval_* functions agree with the constructed polynomials
"""
def check_poly(self, func, cls, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
poly = np.poly1d(cls(*p).coef)
z = np.c_[np.tile(p, (nx,1)), x, poly(x)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
return func(*p)
with np.errstate(all='raise'):
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
def test_jacobi(self):
self.check_poly(_ufuncs.eval_jacobi, orth.jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)],
x_range=[-1, 1], rtol=1e-5)
def test_sh_jacobi(self):
self.check_poly(_ufuncs.eval_sh_jacobi, orth.sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1],
rtol=1e-5)
def test_gegenbauer(self):
self.check_poly(_ufuncs.eval_gegenbauer, orth.gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1],
rtol=1e-7)
def test_chebyt(self):
self.check_poly(_ufuncs.eval_chebyt, orth.chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(_ufuncs.eval_chebyu, orth.chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(_ufuncs.eval_chebys, orth.chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(_ufuncs.eval_chebyc, orth.chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
with np.errstate(all='ignore'):
self.check_poly(_ufuncs.eval_sh_chebyt, orth.sh_chebyt,
param_ranges=[], x_range=[0, 1])
def test_sh_chebyu(self):
self.check_poly(_ufuncs.eval_sh_chebyu, orth.sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(_ufuncs.eval_legendre, orth.legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
with np.errstate(all='ignore'):
self.check_poly(_ufuncs.eval_sh_legendre, orth.sh_legendre,
param_ranges=[], x_range=[0, 1])
def test_genlaguerre(self):
self.check_poly(_ufuncs.eval_genlaguerre, orth.genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(_ufuncs.eval_laguerre, orth.laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
self.check_poly(_ufuncs.eval_hermite, orth.hermite,
param_ranges=[], x_range=[-100, 100])
def test_hermitenorm(self):
self.check_poly(_ufuncs.eval_hermitenorm, orth.hermitenorm,
param_ranges=[], x_range=[-100, 100])
class TestRecurrence:
"""
Check that the eval_* functions sig='ld->d' and 'dd->d' agree.
"""
def check_poly(self, func, param_ranges=[], x_range=[], nn=10,
nparam=10, nx=10, rtol=1e-8):
np.random.seed(1234)
dataset = []
for n in np.arange(nn):
params = [a + (b-a)*np.random.rand(nparam) for a,b in param_ranges]
params = np.asarray(params).T
if not param_ranges:
params = [0]
for p in params:
if param_ranges:
p = (n,) + tuple(p)
else:
p = (n,)
x = x_range[0] + (x_range[1] - x_range[0])*np.random.rand(nx)
x[0] = x_range[0] # always include domain start point
x[1] = x_range[1] # always include domain end point
kw = dict(sig=(len(p)+1)*'d'+'->d')
z = np.c_[np.tile(p, (nx,1)), x, func(*(p + (x,)), **kw)]
dataset.append(z)
dataset = np.concatenate(dataset, axis=0)
def polyfunc(*p):
p = (p[0].astype(int),) + p[1:]
kw = dict(sig='l'+(len(p)-1)*'d'+'->d')
return func(*p, **kw)
with np.errstate(all='raise'):
ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1,
rtol=rtol)
ds.check()
def test_jacobi(self):
self.check_poly(_ufuncs.eval_jacobi,
param_ranges=[(-0.99, 10), (-0.99, 10)],
x_range=[-1, 1])
def test_sh_jacobi(self):
self.check_poly(_ufuncs.eval_sh_jacobi,
param_ranges=[(1, 10), (0, 1)], x_range=[0, 1])
def test_gegenbauer(self):
self.check_poly(_ufuncs.eval_gegenbauer,
param_ranges=[(-0.499, 10)], x_range=[-1, 1])
def test_chebyt(self):
self.check_poly(_ufuncs.eval_chebyt,
param_ranges=[], x_range=[-1, 1])
def test_chebyu(self):
self.check_poly(_ufuncs.eval_chebyu,
param_ranges=[], x_range=[-1, 1])
def test_chebys(self):
self.check_poly(_ufuncs.eval_chebys,
param_ranges=[], x_range=[-2, 2])
def test_chebyc(self):
self.check_poly(_ufuncs.eval_chebyc,
param_ranges=[], x_range=[-2, 2])
def test_sh_chebyt(self):
self.check_poly(_ufuncs.eval_sh_chebyt,
param_ranges=[], x_range=[0, 1])
def test_sh_chebyu(self):
self.check_poly(_ufuncs.eval_sh_chebyu,
param_ranges=[], x_range=[0, 1])
def test_legendre(self):
self.check_poly(_ufuncs.eval_legendre,
param_ranges=[], x_range=[-1, 1])
def test_sh_legendre(self):
self.check_poly(_ufuncs.eval_sh_legendre,
param_ranges=[], x_range=[0, 1])
def test_genlaguerre(self):
self.check_poly(_ufuncs.eval_genlaguerre,
param_ranges=[(-0.99, 10)], x_range=[0, 100])
def test_laguerre(self):
self.check_poly(_ufuncs.eval_laguerre,
param_ranges=[], x_range=[0, 100])
def test_hermite(self):
v = _ufuncs.eval_hermite(70, 1.0)
a = -1.457076485701412e60
assert_allclose(v, a)
def test_hermite_domain():
# Regression test for gh-11091.
assert np.isnan(_ufuncs.eval_hermite(-1, 1.0))
assert np.isnan(_ufuncs.eval_hermitenorm(-1, 1.0))
@pytest.mark.parametrize("n", [0, 1, 2])
@pytest.mark.parametrize("x", [0, 1, np.nan])
def test_hermite_nan(n, x):
# Regression test for gh-11369.
assert np.isnan(_ufuncs.eval_hermite(n, x)) == np.any(np.isnan([n, x]))
assert np.isnan(_ufuncs.eval_hermitenorm(n, x)) == np.any(np.isnan([n, x]))
@pytest.mark.parametrize('n', [0, 1, 2, 3.2])
@pytest.mark.parametrize('alpha', [1, np.nan])
@pytest.mark.parametrize('x', [2, np.nan])
def test_genlaguerre_nan(n, alpha, x):
# Regression test for gh-11361.
nan_laguerre = np.isnan(_ufuncs.eval_genlaguerre(n, alpha, x))
nan_arg = np.any(np.isnan([n, alpha, x]))
assert nan_laguerre == nan_arg
@pytest.mark.parametrize('n', [0, 1, 2, 3.2])
@pytest.mark.parametrize('alpha', [0.0, 1, np.nan])
@pytest.mark.parametrize('x', [1e-6, 2, np.nan])
def test_gegenbauer_nan(n, alpha, x):
# Regression test for gh-11370.
nan_gegenbauer = np.isnan(_ufuncs.eval_gegenbauer(n, alpha, x))
nan_arg = np.any(np.isnan([n, alpha, x]))
assert nan_gegenbauer == nan_arg
| 9,319
| 33.64684
| 82
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_sph_harm.py
|
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
def test_first_harmonics():
# Test against explicit representations of the first four
# spherical harmonics which use `theta` as the azimuthal angle,
# `phi` as the polar angle, and include the Condon-Shortley
# phase.
# Notation is Ymn
def Y00(theta, phi):
return 0.5*np.sqrt(1/np.pi)
def Yn11(theta, phi):
return 0.5*np.sqrt(3/(2*np.pi))*np.exp(-1j*theta)*np.sin(phi)
def Y01(theta, phi):
return 0.5*np.sqrt(3/np.pi)*np.cos(phi)
def Y11(theta, phi):
return -0.5*np.sqrt(3/(2*np.pi))*np.exp(1j*theta)*np.sin(phi)
harms = [Y00, Yn11, Y01, Y11]
m = [0, -1, 0, 1]
n = [0, 1, 1, 1]
theta = np.linspace(0, 2*np.pi)
phi = np.linspace(0, np.pi)
theta, phi = np.meshgrid(theta, phi)
for harm, m, n in zip(harms, m, n):
assert_allclose(sc.sph_harm(m, n, theta, phi),
harm(theta, phi),
rtol=1e-15, atol=1e-15,
err_msg=f"Y^{m}_{n} incorrect")
| 1,106
| 28.131579
| 69
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_exponential_integrals.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
class TestExp1:
def test_branch_cut(self):
assert np.isnan(sc.exp1(-1))
assert sc.exp1(complex(-1, 0)).imag == (
-sc.exp1(complex(-1, -0.0)).imag
)
assert_allclose(
sc.exp1(complex(-1, 0)),
sc.exp1(-1 + 1e-20j),
atol=0,
rtol=1e-15
)
assert_allclose(
sc.exp1(complex(-1, -0.0)),
sc.exp1(-1 - 1e-20j),
atol=0,
rtol=1e-15
)
def test_834(self):
# Regression test for #834
a = sc.exp1(-complex(19.9999990))
b = sc.exp1(-complex(19.9999991))
assert_allclose(a.imag, b.imag, atol=0, rtol=1e-15)
class TestScaledExp1:
@pytest.mark.parametrize('x, expected', [(0, 0), (np.inf, 1)])
def test_limits(self, x, expected):
y = sc._ufuncs._scaled_exp1(x)
assert y == expected
# The expected values were computed with mpmath, e.g.:
#
# from mpmath import mp
# mp.dps = 80
# x = 1e-25
# print(float(x*mp.exp(x)*np.expint(1, x)))
#
# prints 5.698741165994961e-24
#
# The method used to compute _scaled_exp1 changes at x=1
# and x=1250, so values at those inputs, and values just
# above and below them, are included in the test data.
@pytest.mark.parametrize('x, expected',
[(1e-25, 5.698741165994961e-24),
(0.1, 0.20146425447084518),
(0.9995, 0.5962509885831002),
(1.0, 0.5963473623231941),
(1.0005, 0.5964436833238044),
(2.5, 0.7588145912149602),
(10.0, 0.9156333393978808),
(100.0, 0.9901942286733019),
(500.0, 0.9980079523802055),
(1000.0, 0.9990019940238807),
(1249.5, 0.9992009578306811),
(1250.0, 0.9992012769377913),
(1250.25, 0.9992014363957858),
(2000.0, 0.9995004992514963),
(1e4, 0.9999000199940024),
(1e10, 0.9999999999),
(1e15, 0.999999999999999),
])
def test_scaled_exp1(self, x, expected):
y = sc._ufuncs._scaled_exp1(x)
assert_allclose(y, expected, rtol=2e-15)
class TestExpi:
@pytest.mark.parametrize('result', [
sc.expi(complex(-1, 0)),
sc.expi(complex(-1, -0.0)),
sc.expi(-1)
])
def test_branch_cut(self, result):
desired = -0.21938393439552027368 # Computed using Mpmath
assert_allclose(result, desired, atol=0, rtol=1e-14)
def test_near_branch_cut(self):
lim_from_above = sc.expi(-1 + 1e-20j)
lim_from_below = sc.expi(-1 - 1e-20j)
assert_allclose(
lim_from_above.real,
lim_from_below.real,
atol=0,
rtol=1e-15
)
assert_allclose(
lim_from_above.imag,
-lim_from_below.imag,
atol=0,
rtol=1e-15
)
def test_continuity_on_positive_real_axis(self):
assert_allclose(
sc.expi(complex(1, 0)),
sc.expi(complex(1, -0.0)),
atol=0,
rtol=1e-15
)
class TestExpn:
def test_out_of_domain(self):
assert all(np.isnan([sc.expn(-1, 1.0), sc.expn(1, -1.0)]))
| 3,687
| 29.991597
| 66
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_pdtr.py
|
import numpy as np
import scipy.special as sc
from numpy.testing import assert_almost_equal, assert_array_equal
class TestPdtr:
def test(self):
val = sc.pdtr(0, 1)
assert_almost_equal(val, np.exp(-1))
def test_m_zero(self):
val = sc.pdtr([0, 1, 2], 0)
assert_array_equal(val, [1, 1, 1])
def test_rounding(self):
double_val = sc.pdtr([0.1, 1.1, 2.1], 1.0)
int_val = sc.pdtr([0, 1, 2], 1.0)
assert_array_equal(double_val, int_val)
def test_inf(self):
val = sc.pdtr(np.inf, 1.0)
assert_almost_equal(val, 1.0)
def test_domain(self):
val = sc.pdtr(-1.1, 1.0)
assert np.isnan(val)
class TestPdtrc:
def test_value(self):
val = sc.pdtrc(0, 1)
assert_almost_equal(val, 1 - np.exp(-1))
def test_m_zero(self):
val = sc.pdtrc([0, 1, 2], 0.0)
assert_array_equal(val, [0, 0, 0])
def test_rounding(self):
double_val = sc.pdtrc([0.1, 1.1, 2.1], 1.0)
int_val = sc.pdtrc([0, 1, 2], 1.0)
assert_array_equal(double_val, int_val)
def test_inf(self):
val = sc.pdtrc(np.inf, 1.0)
assert_almost_equal(val, 0.0)
def test_domain(self):
val = sc.pdtrc(-1.1, 1.0)
assert np.isnan(val)
| 1,284
| 25.22449
| 65
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_erfinv.py
|
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import pytest
import scipy.special as sc
class TestInverseErrorFunction:
def test_compliment(self):
# Test erfcinv(1 - x) == erfinv(x)
x = np.linspace(-1, 1, 101)
assert_allclose(sc.erfcinv(1 - x), sc.erfinv(x), rtol=0, atol=1e-15)
def test_literal_values(self):
# The expected values were calculated with mpmath:
#
# import mpmath
# mpmath.mp.dps = 200
# for y in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]:
# x = mpmath.erfinv(y)
# print(x)
#
y = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
actual = sc.erfinv(y)
expected = [
0.0,
0.08885599049425769,
0.1791434546212917,
0.2724627147267543,
0.37080715859355795,
0.4769362762044699,
0.5951160814499948,
0.7328690779592167,
0.9061938024368233,
1.1630871536766743,
]
assert_allclose(actual, expected, rtol=0, atol=1e-15)
@pytest.mark.parametrize(
'f, x, y',
[
(sc.erfinv, -1, -np.inf),
(sc.erfinv, 0, 0),
(sc.erfinv, 1, np.inf),
(sc.erfinv, -100, np.nan),
(sc.erfinv, 100, np.nan),
(sc.erfcinv, 0, np.inf),
(sc.erfcinv, 1, -0.0),
(sc.erfcinv, 2, -np.inf),
(sc.erfcinv, -100, np.nan),
(sc.erfcinv, 100, np.nan),
],
ids=[
'erfinv at lower bound',
'erfinv at midpoint',
'erfinv at upper bound',
'erfinv below lower bound',
'erfinv above upper bound',
'erfcinv at lower bound',
'erfcinv at midpoint',
'erfcinv at upper bound',
'erfcinv below lower bound',
'erfcinv above upper bound',
]
)
def test_domain_bounds(self, f, x, y):
assert_equal(f(x), y)
def test_erfinv_asympt(self):
# regression test for gh-12758: erfinv(x) loses precision at small x
# expected values precomputed with mpmath:
# >>> mpmath.mp.dps = 100
# >>> expected = [float(mpmath.erfinv(t)) for t in x]
x = np.array([1e-20, 1e-15, 1e-14, 1e-10, 1e-8, 0.9e-7, 1.1e-7, 1e-6])
expected = np.array([8.86226925452758e-21,
8.862269254527581e-16,
8.86226925452758e-15,
8.862269254527581e-11,
8.86226925452758e-09,
7.97604232907484e-08,
9.74849617998037e-08,
8.8622692545299e-07])
assert_allclose(sc.erfinv(x), expected,
rtol=1e-15)
# also test the roundtrip consistency
assert_allclose(sc.erf(sc.erfinv(x)),
x,
rtol=5e-15)
| 3,059
| 33
| 78
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_cosine_distr.py
|
import numpy as np
from numpy.testing import assert_allclose
import pytest
from scipy.special._ufuncs import _cosine_cdf, _cosine_invcdf
# These values are (x, p) where p is the expected exact value of
# _cosine_cdf(x). These values will be tested for exact agreement.
_coscdf_exact = [
(-4.0, 0.0),
(0, 0.5),
(np.pi, 1.0),
(4.0, 1.0),
]
@pytest.mark.parametrize("x, expected", _coscdf_exact)
def test_cosine_cdf_exact(x, expected):
assert _cosine_cdf(x) == expected
# These values are (x, p), where p is the expected value of
# _cosine_cdf(x). The expected values were computed with mpmath using
# 50 digits of precision. These values will be tested for agreement
# with the computed values using a very small relative tolerance.
# The value at -np.pi is not 0, because -np.pi does not equal -π.
_coscdf_close = [
(3.1409, 0.999999999991185),
(2.25, 0.9819328173287907),
# -1.6 is the threshold below which the Pade approximant is used.
(-1.599, 0.08641959838382553),
(-1.601, 0.086110582992713),
(-2.0, 0.0369709335961611),
(-3.0, 7.522387241801384e-05),
(-3.1415, 2.109869685443648e-14),
(-3.14159, 4.956444476505336e-19),
(-np.pi, 4.871934450264861e-50),
]
@pytest.mark.parametrize("x, expected", _coscdf_close)
def test_cosine_cdf(x, expected):
assert_allclose(_cosine_cdf(x), expected, rtol=5e-15)
# These values are (p, x) where x is the expected exact value of
# _cosine_invcdf(p). These values will be tested for exact agreement.
_cosinvcdf_exact = [
(0.0, -np.pi),
(0.5, 0.0),
(1.0, np.pi),
]
@pytest.mark.parametrize("p, expected", _cosinvcdf_exact)
def test_cosine_invcdf_exact(p, expected):
assert _cosine_invcdf(p) == expected
def test_cosine_invcdf_invalid_p():
# Check that p values outside of [0, 1] return nan.
assert np.isnan(_cosine_invcdf([-0.1, 1.1])).all()
# These values are (p, x), where x is the expected value of _cosine_invcdf(p).
# The expected values were computed with mpmath using 50 digits of precision.
_cosinvcdf_close = [
(1e-50, -np.pi),
(1e-14, -3.1415204137058454),
(1e-08, -3.1343686589124524),
(0.0018001, -2.732563923138336),
(0.010, -2.41276589008678),
(0.060, -1.7881244975330157),
(0.125, -1.3752523669869274),
(0.250, -0.831711193579736),
(0.400, -0.3167954512395289),
(0.419, -0.25586025626919906),
(0.421, -0.24947570750445663),
(0.750, 0.831711193579736),
(0.940, 1.7881244975330153),
(0.9999999996, 3.1391220839917167),
]
@pytest.mark.parametrize("p, expected", _cosinvcdf_close)
def test_cosine_invcdf(p, expected):
assert_allclose(_cosine_invcdf(p), expected, rtol=1e-14)
| 2,689
| 31.02381
| 78
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_gammainc.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
import scipy.special as sc
from scipy.special._testutils import FuncData
INVALID_POINTS = [
(1, -1),
(0, 0),
(-1, 1),
(np.nan, 1),
(1, np.nan)
]
class TestGammainc:
@pytest.mark.parametrize('a, x', INVALID_POINTS)
def test_domain(self, a, x):
assert np.isnan(sc.gammainc(a, x))
def test_a_eq_0_x_gt_0(self):
assert sc.gammainc(0, 1) == 1
@pytest.mark.parametrize('a, x, desired', [
(np.inf, 1, 0),
(np.inf, 0, 0),
(np.inf, np.inf, np.nan),
(1, np.inf, 1)
])
def test_infinite_arguments(self, a, x, desired):
result = sc.gammainc(a, x)
if np.isnan(desired):
assert np.isnan(result)
else:
assert result == desired
def test_infinite_limits(self):
# Test that large arguments converge to the hard-coded limits
# at infinity.
assert_allclose(
sc.gammainc(1000, 100),
sc.gammainc(np.inf, 100),
atol=1e-200, # Use `atol` since the function converges to 0.
rtol=0
)
assert sc.gammainc(100, 1000) == sc.gammainc(100, np.inf)
def test_x_zero(self):
a = np.arange(1, 10)
assert_array_equal(sc.gammainc(a, 0), 0)
def test_limit_check(self):
result = sc.gammainc(1e-10, 1)
limit = sc.gammainc(0, 1)
assert np.isclose(result, limit)
def gammainc_line(self, x):
# The line a = x where a simpler asymptotic expansion (analog
# of DLMF 8.12.15) is available.
c = np.array([-1/3, -1/540, 25/6048, 101/155520,
-3184811/3695155200, -2745493/8151736420])
res = 0
xfac = 1
for ck in c:
res -= ck*xfac
xfac /= x
res /= np.sqrt(2*np.pi*x)
res += 0.5
return res
def test_line(self):
x = np.logspace(np.log10(25), 300, 500)
a = x
dataset = np.vstack((a, x, self.gammainc_line(x))).T
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-11).check()
def test_roundtrip(self):
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammaincinv(a, sc.gammainc(a, x))
assert_allclose(x, y, rtol=1e-10)
class TestGammaincc:
@pytest.mark.parametrize('a, x', INVALID_POINTS)
def test_domain(self, a, x):
assert np.isnan(sc.gammaincc(a, x))
def test_a_eq_0_x_gt_0(self):
assert sc.gammaincc(0, 1) == 0
@pytest.mark.parametrize('a, x, desired', [
(np.inf, 1, 1),
(np.inf, 0, 1),
(np.inf, np.inf, np.nan),
(1, np.inf, 0)
])
def test_infinite_arguments(self, a, x, desired):
result = sc.gammaincc(a, x)
if np.isnan(desired):
assert np.isnan(result)
else:
assert result == desired
def test_infinite_limits(self):
# Test that large arguments converge to the hard-coded limits
# at infinity.
assert sc.gammaincc(1000, 100) == sc.gammaincc(np.inf, 100)
assert_allclose(
sc.gammaincc(100, 1000),
sc.gammaincc(100, np.inf),
atol=1e-200, # Use `atol` since the function converges to 0.
rtol=0
)
def test_limit_check(self):
result = sc.gammaincc(1e-10,1)
limit = sc.gammaincc(0,1)
assert np.isclose(result, limit)
def test_x_zero(self):
a = np.arange(1, 10)
assert_array_equal(sc.gammaincc(a, 0), 1)
def test_roundtrip(self):
a = np.logspace(-5, 10, 100)
x = np.logspace(-5, 10, 100)
y = sc.gammainccinv(a, sc.gammaincc(a, x))
assert_allclose(x, y, rtol=1e-14)
| 3,815
| 26.854015
| 73
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_powm1.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.special import powm1
# Expected values were computed with mpmath, e.g.
#
# >>> import mpmath
# >>> mpmath.np.dps = 200
# >>> print(float(mpmath.powm1(2.0, 1e-7))
# 6.931472045825965e-08
#
powm1_test_cases = [
(1.25, 0.75, 0.18217701125396976, 1e-15),
(2.0, 1e-7, 6.931472045825965e-08, 1e-15),
(25.0, 5e-11, 1.6094379125636148e-10, 1e-15),
(0.99996, 0.75, -3.0000150002530058e-05, 1e-15),
(0.9999999999990905, 20, -1.81898940353014e-11, 1e-15),
(-1.25, 751.0, -6.017550852453444e+72, 2e-15)
]
@pytest.mark.parametrize('x, y, expected, rtol', powm1_test_cases)
def test_powm1(x, y, expected, rtol):
p = powm1(x, y)
assert_allclose(p, expected, rtol=rtol)
@pytest.mark.parametrize('x, y, expected',
[(0.0, 0.0, 0.0),
(0.0, -1.5, np.inf),
(0.0, 1.75, -1.0),
(-1.5, 2.0, 1.25),
(-1.5, 3.0, -4.375),
(np.nan, 0.0, 0.0),
(1.0, np.nan, 0.0),
(1.0, np.inf, 0.0),
(1.0, -np.inf, 0.0),
(np.inf, 7.5, np.inf),
(np.inf, -7.5, -1.0),
(3.25, np.inf, np.inf),
(np.inf, np.inf, np.inf),
(np.inf, -np.inf, -1.0),
(np.inf, 0.0, 0.0),
(-np.inf, 0.0, 0.0),
(-np.inf, 2.0, np.inf),
(-np.inf, 3.0, -np.inf),
(-1.0, float(2**53 - 1), -2.0)])
def test_powm1_exact_cases(x, y, expected):
# Test cases where we have an exact expected value.
p = powm1(x, y)
assert p == expected
@pytest.mark.parametrize('x, y',
[(-1.25, 751.03),
(-1.25, np.inf),
(np.nan, np.nan),
(-np.inf, -np.inf),
(-np.inf, 2.5)])
def test_powm1_return_nan(x, y):
# Test cases where the expected return value is nan.
p = powm1(x, y)
assert np.isnan(p)
| 2,276
| 33.5
| 66
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_sf_error.py
|
import sys
import warnings
from numpy.testing import assert_, assert_equal, IS_PYPY
import pytest
from pytest import raises as assert_raises
import scipy.special as sc
from scipy.special._ufuncs import _sf_error_test_function
_sf_error_code_map = {
# skip 'ok'
'singular': 1,
'underflow': 2,
'overflow': 3,
'slow': 4,
'loss': 5,
'no_result': 6,
'domain': 7,
'arg': 8,
'other': 9
}
_sf_error_actions = [
'ignore',
'warn',
'raise'
]
def _check_action(fun, args, action):
if action == 'warn':
with pytest.warns(sc.SpecialFunctionWarning):
fun(*args)
elif action == 'raise':
with assert_raises(sc.SpecialFunctionError):
fun(*args)
else:
# action == 'ignore', make sure there are no warnings/exceptions
with warnings.catch_warnings():
warnings.simplefilter("error")
fun(*args)
def test_geterr():
err = sc.geterr()
for key, value in err.items():
assert_(key in _sf_error_code_map)
assert_(value in _sf_error_actions)
def test_seterr():
entry_err = sc.geterr()
try:
for category, error_code in _sf_error_code_map.items():
for action in _sf_error_actions:
geterr_olderr = sc.geterr()
seterr_olderr = sc.seterr(**{category: action})
assert_(geterr_olderr == seterr_olderr)
newerr = sc.geterr()
assert_(newerr[category] == action)
geterr_olderr.pop(category)
newerr.pop(category)
assert_(geterr_olderr == newerr)
_check_action(_sf_error_test_function, (error_code,), action)
finally:
sc.seterr(**entry_err)
@pytest.mark.skipif(IS_PYPY, reason="Test not meaningful on PyPy")
def test_sf_error_special_refcount():
# Regression test for gh-16233.
# Check that the reference count of scipy.special is not increased
# when a SpecialFunctionError is raised.
refcount_before = sys.getrefcount(sc)
with sc.errstate(all='raise'):
with pytest.raises(sc.SpecialFunctionError, match='domain error'):
sc.ndtri(2.0)
refcount_after = sys.getrefcount(sc)
assert refcount_after == refcount_before
def test_errstate_pyx_basic():
olderr = sc.geterr()
with sc.errstate(singular='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.loggamma(0)
assert_equal(olderr, sc.geterr())
def test_errstate_c_basic():
olderr = sc.geterr()
with sc.errstate(domain='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1)
assert_equal(olderr, sc.geterr())
def test_errstate_cpp_basic():
olderr = sc.geterr()
with sc.errstate(underflow='raise'):
with assert_raises(sc.SpecialFunctionError):
sc.wrightomega(-1000)
assert_equal(olderr, sc.geterr())
def test_errstate():
for category, error_code in _sf_error_code_map.items():
for action in _sf_error_actions:
olderr = sc.geterr()
with sc.errstate(**{category: action}):
_check_action(_sf_error_test_function, (error_code,), action)
assert_equal(olderr, sc.geterr())
def test_errstate_all_but_one():
olderr = sc.geterr()
with sc.errstate(all='raise', singular='ignore'):
sc.gammaln(0)
with assert_raises(sc.SpecialFunctionError):
sc.spence(-1.0)
assert_equal(olderr, sc.geterr())
| 3,521
| 27.634146
| 77
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_sici.py
|
import numpy as np
import scipy.special as sc
from scipy.special._testutils import FuncData
def test_sici_consistency():
# Make sure the implementation of sici for real arguments agrees
# with the implementation of sici for complex arguments.
# On the negative real axis Cephes drops the imaginary part in ci
def sici(x):
si, ci = sc.sici(x + 0j)
return si.real, ci.real
x = np.r_[-np.logspace(8, -30, 200), 0, np.logspace(-30, 8, 200)]
si, ci = sc.sici(x)
dataset = np.column_stack((x, si, ci))
FuncData(sici, dataset, 0, (1, 2), rtol=1e-12).check()
def test_shichi_consistency():
# Make sure the implementation of shichi for real arguments agrees
# with the implementation of shichi for complex arguments.
# On the negative real axis Cephes drops the imaginary part in chi
def shichi(x):
shi, chi = sc.shichi(x + 0j)
return shi.real, chi.real
# Overflow happens quickly, so limit range
x = np.r_[-np.logspace(np.log10(700), -30, 200), 0,
np.logspace(-30, np.log10(700), 200)]
shi, chi = sc.shichi(x)
dataset = np.column_stack((x, shi, chi))
FuncData(shichi, dataset, 0, (1, 2), rtol=1e-14).check()
| 1,227
| 32.189189
| 70
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_gamma.py
|
import numpy as np
import scipy.special as sc
class TestRgamma:
def test_gh_11315(self):
assert sc.rgamma(-35) == 0
def test_rgamma_zeros(self):
x = np.array([0, -10, -100, -1000, -10000])
assert np.all(sc.rgamma(x) == 0)
| 258
| 18.923077
| 51
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_logsumexp.py
|
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal, assert_allclose,
assert_array_almost_equal, assert_)
from scipy.special import logsumexp, softmax
def test_logsumexp():
# Test whether logsumexp() function correctly handles large inputs.
a = np.arange(200)
desired = np.log(np.sum(np.exp(a)))
assert_almost_equal(logsumexp(a), desired)
# Now test with large numbers
b = [1000, 1000]
desired = 1000.0 + np.log(2.0)
assert_almost_equal(logsumexp(b), desired)
n = 1000
b = np.full(n, 10000, dtype='float64')
desired = 10000.0 + np.log(n)
assert_almost_equal(logsumexp(b), desired)
x = np.array([1e-40] * 1000000)
logx = np.log(x)
X = np.vstack([x, x])
logX = np.vstack([logx, logx])
assert_array_almost_equal(np.exp(logsumexp(logX)), X.sum())
assert_array_almost_equal(np.exp(logsumexp(logX, axis=0)), X.sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, axis=1)), X.sum(axis=1))
# Handling special values properly
assert_equal(logsumexp(np.inf), np.inf)
assert_equal(logsumexp(-np.inf), -np.inf)
assert_equal(logsumexp(np.nan), np.nan)
assert_equal(logsumexp([-np.inf, -np.inf]), -np.inf)
# Handling an array with different magnitudes on the axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]], axis=-1),
[1e10, -1e10])
# Test keeping dimensions
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=-1,
keepdims=True),
[[1e10], [-1e10]])
# Test multiple axes
assert_array_almost_equal(logsumexp([[1e10, 1e-10],
[-1e10, -np.inf]],
axis=(-1,-2)),
1e10)
def test_logsumexp_b():
a = np.arange(200)
b = np.arange(200, 0, -1)
desired = np.log(np.sum(b*np.exp(a)))
assert_almost_equal(logsumexp(a, b=b), desired)
a = [1000, 1000]
b = [1.2, 1.2]
desired = 1000 + np.log(2 * 1.2)
assert_almost_equal(logsumexp(a, b=b), desired)
x = np.array([1e-40] * 100000)
b = np.linspace(1, 1000, 100000)
logx = np.log(x)
X = np.vstack((x, x))
logX = np.vstack((logx, logx))
B = np.vstack((b, b))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B)), (B * X).sum())
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=0)),
(B * X).sum(axis=0))
assert_array_almost_equal(np.exp(logsumexp(logX, b=B, axis=1)),
(B * X).sum(axis=1))
def test_logsumexp_sign():
a = [1,1,1]
b = [1,-1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_almost_equal(r,1)
assert_equal(s,-1)
def test_logsumexp_sign_zero():
a = [1,1]
b = [1,-1]
r, s = logsumexp(a, b=b, return_sign=True)
assert_(not np.isfinite(r))
assert_(not np.isnan(r))
assert_(r < 0)
assert_equal(s,0)
def test_logsumexp_sign_shape():
a = np.ones((1,2,3,4))
b = np.ones_like(a)
r, s = logsumexp(a, axis=2, b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,2,4))
r, s = logsumexp(a, axis=(1,3), b=b, return_sign=True)
assert_equal(r.shape, s.shape)
assert_equal(r.shape, (1,3))
def test_logsumexp_shape():
a = np.ones((1, 2, 3, 4))
b = np.ones_like(a)
r = logsumexp(a, axis=2, b=b)
assert_equal(r.shape, (1, 2, 4))
r = logsumexp(a, axis=(1, 3), b=b)
assert_equal(r.shape, (1, 3))
def test_logsumexp_b_zero():
a = [1,10000]
b = [1,0]
assert_almost_equal(logsumexp(a, b=b), 1)
def test_logsumexp_b_shape():
a = np.zeros((4,1,2,1))
b = np.ones((3,1,5))
logsumexp(a, b=b)
def test_softmax_fixtures():
assert_allclose(softmax([1000, 0, 0, 0]), np.array([1, 0, 0, 0]),
rtol=1e-13)
assert_allclose(softmax([1, 1]), np.array([.5, .5]), rtol=1e-13)
assert_allclose(softmax([0, 1]), np.array([1, np.e])/(1 + np.e),
rtol=1e-13)
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
x = np.arange(4)
expected = np.array([0.03205860328008499,
0.08714431874203256,
0.23688281808991013,
0.6439142598879722])
assert_allclose(softmax(x), expected, rtol=1e-13)
# Translation property. If all the values are changed by the same amount,
# the softmax result does not change.
assert_allclose(softmax(x + 100), expected, rtol=1e-13)
# When axis=None, softmax operates on the entire array, and preserves
# the shape.
assert_allclose(softmax(x.reshape(2, 2)), expected.reshape(2, 2),
rtol=1e-13)
def test_softmax_multi_axes():
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=0),
np.array([[.5, .5], [.5, .5]]), rtol=1e-13)
assert_allclose(softmax([[1000, 0], [1000, 0]], axis=1),
np.array([[1, 0], [1, 0]]), rtol=1e-13)
# Expected value computed using mpmath (with mpmath.mp.dps = 200) and then
# converted to float.
x = np.array([[-25, 0, 25, 50],
[1, 325, 749, 750]])
expected = np.array([[2.678636961770877e-33,
1.9287498479371314e-22,
1.3887943864771144e-11,
0.999999999986112],
[0.0,
1.9444526359919372e-185,
0.2689414213699951,
0.7310585786300048]])
assert_allclose(softmax(x, axis=1), expected, rtol=1e-13)
assert_allclose(softmax(x.T, axis=0), expected.T, rtol=1e-13)
# 3-d input, with a tuple for the axis.
x3d = x.reshape(2, 2, 2)
assert_allclose(softmax(x3d, axis=(1, 2)), expected.reshape(2, 2, 2),
rtol=1e-13)
| 6,180
| 30.697436
| 78
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_boxcox.py
|
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal, assert_allclose
from scipy.special import boxcox, boxcox1p, inv_boxcox, inv_boxcox1p
# There are more tests of boxcox and boxcox1p in test_mpmath.py.
def test_boxcox_basic():
x = np.array([0.5, 1, 2, 4])
# lambda = 0 => y = log(x)
y = boxcox(x, 0)
assert_almost_equal(y, np.log(x))
# lambda = 1 => y = x - 1
y = boxcox(x, 1)
assert_almost_equal(y, x - 1)
# lambda = 2 => y = 0.5*(x**2 - 1)
y = boxcox(x, 2)
assert_almost_equal(y, 0.5*(x**2 - 1))
# x = 0 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox(0, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox_underflow():
x = 1 + 1e-15
lmbda = 1e-306
y = boxcox(x, lmbda)
assert_allclose(y, np.log(x), rtol=1e-14)
def test_boxcox_nonfinite():
# x < 0 => y = nan
x = np.array([-1, -1, -0.5])
y = boxcox(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = 0 and lambda <= 0 => y = -inf
x = 0
y = boxcox(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_boxcox1p_basic():
x = np.array([-0.25, -1e-20, 0, 1e-20, 0.25, 1, 3])
# lambda = 0 => y = log(1+x)
y = boxcox1p(x, 0)
assert_almost_equal(y, np.log1p(x))
# lambda = 1 => y = x
y = boxcox1p(x, 1)
assert_almost_equal(y, x)
# lambda = 2 => y = 0.5*((1+x)**2 - 1) = 0.5*x*(2 + x)
y = boxcox1p(x, 2)
assert_almost_equal(y, 0.5*x*(2 + x))
# x = -1 and lambda > 0 => y = -1 / lambda
lam = np.array([0.5, 1, 2])
y = boxcox1p(-1, lam)
assert_almost_equal(y, -1.0 / lam)
def test_boxcox1p_underflow():
x = np.array([1e-15, 1e-306])
lmbda = np.array([1e-306, 1e-18])
y = boxcox1p(x, lmbda)
assert_allclose(y, np.log1p(x), rtol=1e-14)
def test_boxcox1p_nonfinite():
# x < -1 => y = nan
x = np.array([-2, -2, -1.5])
y = boxcox1p(x, [0.5, 2.0, -1.5])
assert_equal(y, np.array([np.nan, np.nan, np.nan]))
# x = -1 and lambda <= 0 => y = -inf
x = -1
y = boxcox1p(x, [-2.5, 0])
assert_equal(y, np.array([-np.inf, -np.inf]))
def test_inv_boxcox():
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox(x, lam)
x2 = inv_boxcox(y, lam)
assert_almost_equal(x, x2)
x = np.array([0., 1., 2.])
lam = np.array([0., 1., 2.])
y = boxcox1p(x, lam)
x2 = inv_boxcox1p(y, lam)
assert_almost_equal(x, x2)
def test_inv_boxcox1p_underflow():
x = 1e-15
lam = 1e-306
y = inv_boxcox1p(x, lam)
assert_allclose(y, x, rtol=1e-14)
| 2,672
| 23.981308
| 76
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_faddeeva.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose
import scipy.special as sc
from scipy.special._testutils import FuncData
class TestVoigtProfile:
@pytest.mark.parametrize('x, sigma, gamma', [
(np.nan, 1, 1),
(0, np.nan, 1),
(0, 1, np.nan),
(1, np.nan, 0),
(np.nan, 1, 0),
(1, 0, np.nan),
(np.nan, 0, 1),
(np.nan, 0, 0)
])
def test_nan(self, x, sigma, gamma):
assert np.isnan(sc.voigt_profile(x, sigma, gamma))
@pytest.mark.parametrize('x, desired', [
(-np.inf, 0),
(np.inf, 0)
])
def test_inf(self, x, desired):
assert sc.voigt_profile(x, 1, 1) == desired
def test_against_mathematica(self):
# Results obtained from Mathematica by computing
#
# PDF[VoigtDistribution[gamma, sigma], x]
#
points = np.array([
[-7.89, 45.06, 6.66, 0.0077921073660388806401],
[-0.05, 7.98, 24.13, 0.012068223646769913478],
[-13.98, 16.83, 42.37, 0.0062442236362132357833],
[-12.66, 0.21, 6.32, 0.010052516161087379402],
[11.34, 4.25, 21.96, 0.0113698923627278917805],
[-11.56, 20.40, 30.53, 0.0076332760432097464987],
[-9.17, 25.61, 8.32, 0.011646345779083005429],
[16.59, 18.05, 2.50, 0.013637768837526809181],
[9.11, 2.12, 39.33, 0.0076644040807277677585],
[-43.33, 0.30, 45.68, 0.0036680463875330150996]
])
FuncData(
sc.voigt_profile,
points,
(0, 1, 2),
3,
atol=0,
rtol=1e-15
).check()
def test_symmetry(self):
x = np.linspace(0, 10, 20)
assert_allclose(
sc.voigt_profile(x, 1, 1),
sc.voigt_profile(-x, 1, 1),
rtol=1e-15,
atol=0
)
@pytest.mark.parametrize('x, sigma, gamma, desired', [
(0, 0, 0, np.inf),
(1, 0, 0, 0)
])
def test_corner_cases(self, x, sigma, gamma, desired):
assert sc.voigt_profile(x, sigma, gamma) == desired
@pytest.mark.parametrize('sigma1, gamma1, sigma2, gamma2', [
(0, 1, 1e-16, 1),
(1, 0, 1, 1e-16),
(0, 0, 1e-16, 1e-16)
])
def test_continuity(self, sigma1, gamma1, sigma2, gamma2):
x = np.linspace(1, 10, 20)
assert_allclose(
sc.voigt_profile(x, sigma1, gamma1),
sc.voigt_profile(x, sigma2, gamma2),
rtol=1e-16,
atol=1e-16
)
| 2,568
| 28.872093
| 64
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_precompute_utils.py
|
import pytest
from scipy.special._testutils import MissingModule, check_version
from scipy.special._mptestutils import mp_assert_allclose
from scipy.special._precompute.utils import lagrange_inversion
try:
import sympy
except ImportError:
sympy = MissingModule('sympy')
try:
import mpmath as mp
except ImportError:
mp = MissingModule('mpmath')
@pytest.mark.slow
@check_version(sympy, '0.7')
@check_version(mp, '0.19')
class TestInversion:
@pytest.mark.xfail_on_32bit("rtol only 2e-9, see gh-6938")
def test_log(self):
with mp.workdps(30):
logcoeffs = mp.taylor(lambda x: mp.log(1 + x), 0, 10)
expcoeffs = mp.taylor(lambda x: mp.exp(x) - 1, 0, 10)
invlogcoeffs = lagrange_inversion(logcoeffs)
mp_assert_allclose(invlogcoeffs, expcoeffs)
@pytest.mark.xfail_on_32bit("rtol only 1e-15, see gh-6938")
def test_sin(self):
with mp.workdps(30):
sincoeffs = mp.taylor(mp.sin, 0, 10)
asincoeffs = mp.taylor(mp.asin, 0, 10)
invsincoeffs = lagrange_inversion(sincoeffs)
mp_assert_allclose(invsincoeffs, asincoeffs, atol=1e-30)
| 1,165
| 30.513514
| 68
|
py
|
scipy
|
scipy-main/scipy/special/tests/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/special/tests/test_ndtri_exp.py
|
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy.special import log_ndtr, ndtri_exp
from scipy.special._testutils import assert_func_equal
def log_ndtr_ndtri_exp(y):
return log_ndtr(ndtri_exp(y))
@pytest.fixture(scope="class")
def uniform_random_points():
random_state = np.random.RandomState(1234)
points = random_state.random_sample(1000)
return points
class TestNdtriExp:
"""Tests that ndtri_exp is sufficiently close to an inverse of log_ndtr.
We have separate tests for the five intervals (-inf, -10),
[-10, -2), [-2, -0.14542), [-0.14542, -1e-6), and [-1e-6, 0).
ndtri_exp(y) is computed in three different ways depending on if y
is in (-inf, -2), [-2, log(1 - exp(-2))], or [log(1 - exp(-2), 0).
Each of these intervals is given its own test with two additional tests
for handling very small values and values very close to zero.
"""
@pytest.mark.parametrize(
"test_input", [-1e1, -1e2, -1e10, -1e20, -np.finfo(float).max]
)
def test_very_small_arg(self, test_input, uniform_random_points):
scale = test_input
points = scale * (0.5 * uniform_random_points + 0.5)
assert_func_equal(
log_ndtr_ndtri_exp,
lambda y: y, points,
rtol=1e-14,
nan_ok=True
)
@pytest.mark.parametrize(
"interval,expected_rtol",
[
((-10, -2), 1e-14),
((-2, -0.14542), 1e-12),
((-0.14542, -1e-6), 1e-10),
((-1e-6, 0), 1e-6),
],
)
def test_in_interval(self, interval, expected_rtol, uniform_random_points):
left, right = interval
points = (right - left) * uniform_random_points + left
assert_func_equal(
log_ndtr_ndtri_exp,
lambda y: y, points,
rtol=expected_rtol,
nan_ok=True
)
def test_extreme(self):
# bigneg is not quite the largest negative double precision value.
# Here's why:
# The round-trip calculation
# y = ndtri_exp(bigneg)
# bigneg2 = log_ndtr(y)
# where bigneg is a very large negative value, would--with infinite
# precision--result in bigneg2 == bigneg. When bigneg is large enough,
# y is effectively equal to -sqrt(2)*sqrt(-bigneg), and log_ndtr(y) is
# effectively -(y/sqrt(2))**2. If we use bigneg = np.finfo(float).min,
# then by construction, the theoretical value is the most negative
# finite value that can be represented with 64 bit float point. This
# means tiny changes in how the computation proceeds can result in the
# return value being -inf. (E.g. changing the constant representation
# of 1/sqrt(2) from 0.7071067811865475--which is the value returned by
# 1/np.sqrt(2)--to 0.7071067811865476--which is the most accurate 64
# bit floating point representation of 1/sqrt(2)--results in the
# round-trip that starts with np.finfo(float).min returning -inf. So
# we'll move the bigneg value a few ULPs towards 0 to avoid this
# sensitivity.
# Use the reduce method to apply nextafter four times.
bigneg = np.nextafter.reduce([np.finfo(float).min, 0, 0, 0, 0])
# tinyneg is approx. -2.225e-308.
tinyneg = -np.finfo(float).tiny
x = np.array([tinyneg, bigneg])
result = log_ndtr_ndtri_exp(x)
assert_allclose(result, x, rtol=1e-12)
def test_asymptotes(self):
assert_equal(ndtri_exp([-np.inf, 0.0]), [-np.inf, np.inf])
def test_outside_domain(self):
assert np.isnan(ndtri_exp(1.0))
| 3,708
| 38.042105
| 79
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_cdft_asymptotic.py
|
# gh-14777 regression tests
# Test stdtr and stdtrit with infinite df and large values of df
import numpy as np
from numpy.testing import assert_allclose, assert_equal
from scipy.special import stdtr, stdtrit, ndtr, ndtri
def test_stdtr_vs_R_large_df():
df = [1e10, 1e12, 1e120, np.inf]
t = 1.
res = stdtr(df, t)
# R Code:
# options(digits=20)
# pt(1., c(1e10, 1e12, 1e120, Inf))
res_R = [0.84134474605644460343,
0.84134474606842180044,
0.84134474606854281475,
0.84134474606854292578]
assert_allclose(res, res_R, rtol=2e-15)
# last value should also agree with ndtr
assert_equal(res[3], ndtr(1.))
def test_stdtrit_vs_R_large_df():
df = [1e10, 1e12, 1e120, np.inf]
p = 0.1
res = stdtrit(df, p)
# R Code:
# options(digits=20)
# qt(0.1, c(1e10, 1e12, 1e120, Inf))
res_R = [-1.2815515656292593150,
-1.2815515655454472466,
-1.2815515655446008125,
-1.2815515655446008125]
assert_allclose(res, res_R, rtol=1e-15)
# last value should also agree with ndtri
assert_equal(res[3], ndtri(0.1))
def test_stdtr_stdtri_invalid():
# a mix of large and inf df with t/p equal to nan
df = [1e10, 1e12, 1e120, np.inf]
x = np.nan
res1 = stdtr(df, x)
res2 = stdtrit(df, x)
res_ex = 4*[np.nan]
assert_equal(res1, res_ex)
assert_equal(res2, res_ex)
| 1,429
| 27.6
| 64
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_trig.py
|
import numpy as np
from numpy.testing import assert_equal, assert_allclose, suppress_warnings
from scipy.special._ufuncs import _sinpi as sinpi
from scipy.special._ufuncs import _cospi as cospi
def test_integer_real_part():
x = np.arange(-100, 101)
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.real, 0.0)
res = cospi(z)
assert_equal(res.imag, 0.0)
def test_half_integer_real_part():
x = np.arange(-100, 101) + 0.5
y = np.hstack((-np.linspace(310, -30, 10), np.linspace(-30, 310, 10)))
x, y = np.meshgrid(x, y)
z = x + 1j*y
# In the following we should be *exactly* right
res = sinpi(z)
assert_equal(res.imag, 0.0)
res = cospi(z)
assert_equal(res.real, 0.0)
def test_intermediate_overlow():
# Make sure we avoid overflow in situations where cosh/sinh would
# overflow but the product with sin/cos would not
sinpi_pts = [complex(1 + 1e-14, 227),
complex(1e-35, 250),
complex(1e-301, 445)]
# Data generated with mpmath
sinpi_std = [complex(-8.113438309924894e+295, -np.inf),
complex(1.9507801934611995e+306, np.inf),
complex(2.205958493464539e+306, np.inf)]
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
for p, std in zip(sinpi_pts, sinpi_std):
assert_allclose(sinpi(p), std)
# Test for cosine, less interesting because cos(0) = 1.
p = complex(0.5 + 1e-14, 227)
std = complex(-8.113438309924894e+295, -np.inf)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(cospi(p), std)
def test_zero_sign():
y = sinpi(-0.0)
assert y == 0.0
assert np.signbit(y)
y = sinpi(0.0)
assert y == 0.0
assert not np.signbit(y)
y = cospi(0.5)
assert y == 0.0
assert not np.signbit(y)
| 2,097
| 30.313433
| 75
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_kolmogorov.py
|
import itertools
import sys
import pytest
import numpy as np
from numpy.testing import assert_
from scipy.special._testutils import FuncData
from scipy.special import kolmogorov, kolmogi, smirnov, smirnovi
from scipy.special._ufuncs import (_kolmogc, _kolmogci, _kolmogp,
_smirnovc, _smirnovci, _smirnovp)
_rtol = 1e-10
class TestSmirnov:
def test_nan(self):
assert_(np.isnan(smirnov(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.1, 0.9),
(1, 0.875, 0.125),
(2, 0.875, 0.125 * 0.125),
(3, 0.875, 0.125 * 0.125 * 0.125)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.25),
(3, 0.5, 0.166666666667),
(4, 0.5, 0.09375),
(5, 0.5, 0.056),
(6, 0.5, 0.0327932098765),
(7, 0.5, 0.0191958707681),
(8, 0.5, 0.0112953186035),
(9, 0.5, 0.00661933257355),
(10, 0.5, 0.003888705)]
dataset = np.asarray(dataset)
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_1(self):
x = np.linspace(0, 1, 101, endpoint=True)
dataset = np.column_stack([[1]*len(x), x, 1-x])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, x, p])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, x, p])
FuncData(smirnov, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, -1] = 1 - dataset[:, -1]
FuncData(_smirnovc, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_large(self):
# test for large values of n
# Probabilities should go down as n goes up
x = 0.4
pvals = np.array([smirnov(n, x) for n in range(400, 1100, 20)])
dfs = np.diff(pvals)
assert_(np.all(dfs <= 0), msg='Not all diffs negative %s' % dfs)
class TestSmirnovi:
def test_nan(self):
assert_(np.isnan(smirnovi(1, np.nan)))
def test_basic(self):
dataset = [(1, 0.4, 0.6),
(1, 0.6, 0.4),
(1, 0.99, 0.01),
(1, 0.01, 0.99),
(2, 0.125 * 0.125, 0.875),
(3, 0.125 * 0.125 * 0.125, 0.875),
(10, 1.0 / 16 ** 10, 1 - 1.0 / 16)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0(self):
dataset = [(n, 0, 1) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_1(self):
dataset = [(n, 1, 0) for n in itertools.chain(range(2, 20), range(1010, 1020))]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_1(self):
pp = np.linspace(0, 1, 101, endpoint=True)
# dataset = np.array([(1, p, 1-p) for p in pp])
dataset = np.column_stack([[1]*len(pp), pp, 1-pp])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_2(self):
x = np.linspace(0.5, 1, 101, endpoint=True)
p = np.power(1-x, 2)
n = np.array([2] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_n_equals_3(self):
x = np.linspace(0.7, 1, 31, endpoint=True)
p = np.power(1-x, 3)
n = np.array([3] * len(x))
dataset = np.column_stack([n, p, x])
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_round_trip(self):
def _sm_smi(n, p):
return smirnov(n, smirnovi(n, p))
def _smc_smci(n, p):
return _smirnovc(n, _smirnovci(n, p))
dataset = [(1, 0.4, 0.4),
(1, 0.6, 0.6),
(2, 0.875, 0.875),
(3, 0.875, 0.875),
(3, 0.125, 0.125),
(10, 0.999, 0.999),
(10, 0.0001, 0.0001)]
dataset = np.asarray(dataset)
FuncData(_sm_smi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
FuncData(_smc_smci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_x_equals_0point5(self):
dataset = [(1, 0.5, 0.5),
(2, 0.5, 0.366025403784),
(2, 0.25, 0.5),
(3, 0.5, 0.297156508177),
(4, 0.5, 0.255520481121),
(5, 0.5, 0.234559536069),
(6, 0.5, 0.21715965898),
(7, 0.5, 0.202722580034),
(8, 0.5, 0.190621765256),
(9, 0.5, 0.180363501362),
(10, 0.5, 0.17157867006)]
dataset = np.asarray(dataset)
FuncData(smirnovi, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
dataset[:, 1] = 1 - dataset[:, 1]
FuncData(_smirnovci, dataset, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
class TestSmirnovp:
def test_nan(self):
assert_(np.isnan(_smirnovp(1, np.nan)))
def test_basic(self):
# Check derivative at endpoints
n1_10 = np.arange(1, 10)
dataset0 = np.column_stack([n1_10, np.full_like(n1_10, 0), np.full_like(n1_10, -1)])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
n2_10 = np.arange(2, 10)
dataset1 = np.column_stack([n2_10, np.full_like(n2_10, 1.0), np.full_like(n2_10, 0)])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneminusoneovern(self):
# Check derivative at x=1-1/n
n = np.arange(1, 20)
x = 1.0/n
xm1 = 1-1.0/n
pp1 = -n * x**(n-1)
pp1 -= (1-np.sign(n-2)**2) * 0.5 # n=2, x=0.5, 1-1/n = 0.5, need to adjust
dataset1 = np.column_stack([n, xm1, pp1])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneovertwon(self):
# Check derivative at x=1/2n (Discontinuous at x=1/n, so check at x=1/2n)
n = np.arange(1, 20)
x = 1.0/2/n
pp = -(n*x+1) * (1+x)**(n-2)
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
def test_oneovern(self):
# Check derivative at x=1/n (Discontinuous at x=1/n, hard to tell if x==1/n, only use n=power of 2)
n = 2**np.arange(1, 10)
x = 1.0/n
pp = -(n*x+1) * (1+x)**(n-2) + 0.5
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
@pytest.mark.xfail(sys.maxsize <= 2**32,
reason="requires 64-bit platform")
def test_oneovernclose(self):
# Check derivative at x=1/n (Discontinuous at x=1/n, test on either side: x=1/n +/- 2epsilon)
n = np.arange(3, 20)
x = 1.0/n - 2*np.finfo(float).eps
pp = -(n*x+1) * (1+x)**(n-2)
dataset0 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset0, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
x = 1.0/n + 2*np.finfo(float).eps
pp = -(n*x+1) * (1+x)**(n-2) + 1
dataset1 = np.column_stack([n, x, pp])
FuncData(_smirnovp, dataset1, (0, 1), 2, rtol=_rtol).check(dtypes=[int, float, float])
class TestKolmogorov:
def test_nan(self):
assert_(np.isnan(kolmogorov(np.nan)))
def test_basic(self):
dataset = [(0, 1.0),
(0.5, 0.96394524366487511),
(0.8275735551899077, 0.5000000000000000),
(1, 0.26999967167735456),
(2, 0.00067092525577969533)]
dataset = np.asarray(dataset)
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
def test_linspace(self):
x = np.linspace(0, 2.0, 21)
dataset = [1.0000000000000000, 1.0000000000000000, 0.9999999999994950,
0.9999906941986655, 0.9971923267772983, 0.9639452436648751,
0.8642827790506042, 0.7112351950296890, 0.5441424115741981,
0.3927307079406543, 0.2699996716773546, 0.1777181926064012,
0.1122496666707249, 0.0680922218447664, 0.0396818795381144,
0.0222179626165251, 0.0119520432391966, 0.0061774306344441,
0.0030676213475797, 0.0014636048371873, 0.0006709252557797]
dataset_c = [0.0000000000000000, 6.609305242245699e-53, 5.050407338670114e-13,
9.305801334566668e-06, 0.0028076732227017, 0.0360547563351249,
0.1357172209493958, 0.2887648049703110, 0.4558575884258019,
0.6072692920593457, 0.7300003283226455, 0.8222818073935988,
0.8877503333292751, 0.9319077781552336, 0.9603181204618857,
0.9777820373834749, 0.9880479567608034, 0.9938225693655559,
0.9969323786524203, 0.9985363951628127, 0.9993290747442203]
dataset = np.column_stack([x, dataset])
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
dataset_c = np.column_stack([x, dataset_c])
FuncData(_kolmogc, dataset_c, (0,), 1, rtol=_rtol).check()
def test_linspacei(self):
p = np.linspace(0, 1.0, 21, endpoint=True)
dataset = [np.inf, 1.3580986393225507, 1.2238478702170823,
1.1379465424937751, 1.0727491749396481, 1.0191847202536859,
0.9730633753323726, 0.9320695842357622, 0.8947644549851197,
0.8601710725555463, 0.8275735551899077, 0.7964065373291559,
0.7661855555617682, 0.7364542888171910, 0.7067326523068980,
0.6764476915028201, 0.6448126061663567, 0.6105590999244391,
0.5711732651063401, 0.5196103791686224, 0.0000000000000000]
dataset_c = [0.0000000000000000, 0.5196103791686225, 0.5711732651063401,
0.6105590999244391, 0.6448126061663567, 0.6764476915028201,
0.7067326523068980, 0.7364542888171910, 0.7661855555617682,
0.7964065373291559, 0.8275735551899077, 0.8601710725555463,
0.8947644549851196, 0.9320695842357622, 0.9730633753323727,
1.0191847202536859, 1.0727491749396481, 1.1379465424937754,
1.2238478702170825, 1.3580986393225509, np.inf]
dataset = np.column_stack([p[1:], dataset[1:]])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset_c = np.column_stack([p[:-1], dataset_c[:-1]])
FuncData(_kolmogci, dataset_c, (0,), 1, rtol=_rtol).check()
def test_smallx(self):
epsilon = 0.1 ** np.arange(1, 14)
x = np.array([0.571173265106, 0.441027698518, 0.374219690278, 0.331392659217,
0.300820537459, 0.277539353999, 0.259023494805, 0.243829561254,
0.231063086389, 0.220135543236, 0.210641372041, 0.202290283658,
0.19487060742])
dataset = np.column_stack([x, 1-epsilon])
FuncData(kolmogorov, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _ki_k(_x):
return kolmogi(kolmogorov(_x))
def _kci_kc(_x):
return _kolmogci(_kolmogc(_x))
x = np.linspace(0.0, 2.0, 21, endpoint=True)
x02 = x[(x == 0) | (x > 0.21)] # Exclude 0.1, 0.2. 0.2 almost makes succeeds, but 0.1 has no chance.
dataset02 = np.column_stack([x02, x02])
FuncData(_ki_k, dataset02, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([x, x])
FuncData(_kci_kc, dataset, (0,), 1, rtol=_rtol).check()
class TestKolmogi:
def test_nan(self):
assert_(np.isnan(kolmogi(np.nan)))
def test_basic(self):
dataset = [(1.0, 0),
(0.96394524366487511, 0.5),
(0.9, 0.571173265106),
(0.5000000000000000, 0.8275735551899077),
(0.26999967167735456, 1),
(0.00067092525577969533, 2)]
dataset = np.asarray(dataset)
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpcdf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(1-p) == _kolmogci(p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
x = np.array([0.8275735551899077, 0.5345255069097583, 0.4320114038786941,
0.3736868442620478, 0.3345161714909591, 0.3057833329315859,
0.2835052890528936, 0.2655578150208676, 0.2506869966107999,
0.2380971058736669, 0.2272549289962079, 0.2177876361600040,
0.2094254686862041, 0.2019676748836232, 0.1952612948137504,
0.1891874239646641, 0.1836520225050326, 0.1785795904846466])
dataset = np.column_stack([1-epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_smallpsf(self):
epsilon = 0.5 ** np.arange(1, 55, 3)
# kolmogi(p) == _kolmogci(1-p) if 1-(1-p) == p, but not necessarily otherwise
# Use epsilon s.t. 1-(1-epsilon)) == epsilon, so can use same x-array for both results
x = np.array([0.8275735551899077, 1.3163786275161036, 1.6651092133663343,
1.9525136345289607, 2.2027324540033235, 2.4272929437460848,
2.6327688477341593, 2.8233300509220260, 3.0018183401530627,
3.1702735084088891, 3.3302184446307912, 3.4828258153113318,
3.6290214150152051, 3.7695513262825959, 3.9050272690877326,
4.0359582187082550, 4.1627730557884890, 4.2858371743264527])
dataset = np.column_stack([epsilon, x])
FuncData(kolmogi, dataset, (0,), 1, rtol=_rtol).check()
dataset = np.column_stack([1-epsilon, x])
FuncData(_kolmogci, dataset, (0,), 1, rtol=_rtol).check()
def test_round_trip(self):
def _k_ki(_p):
return kolmogorov(kolmogi(_p))
p = np.linspace(0.1, 1.0, 10, endpoint=True)
dataset = np.column_stack([p, p])
FuncData(_k_ki, dataset, (0,), 1, rtol=_rtol).check()
class TestKolmogp:
def test_nan(self):
assert_(np.isnan(_kolmogp(np.nan)))
def test_basic(self):
dataset = [(0.000000, -0.0),
(0.200000, -1.532420541338916e-10),
(0.400000, -0.1012254419260496),
(0.600000, -1.324123244249925),
(0.800000, -1.627024345636592),
(1.000000, -1.071948558356941),
(1.200000, -0.538512430720529),
(1.400000, -0.2222133182429472),
(1.600000, -0.07649302775520538),
(1.800000, -0.02208687346347873),
(2.000000, -0.005367402045629683)]
dataset = np.asarray(dataset)
FuncData(_kolmogp, dataset, (0,), 1, rtol=_rtol).check()
| 18,407
| 43.571429
| 110
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_mpmath.py
|
"""
Test SciPy functions versus mpmath, if available.
"""
import numpy as np
from numpy.testing import assert_, assert_allclose
from numpy import pi
import pytest
import itertools
from scipy._lib import _pep440
import scipy.special as sc
from scipy.special._testutils import (
MissingModule, check_version, FuncData,
assert_func_equal)
from scipy.special._mptestutils import (
Arg, FixedArg, ComplexArg, IntArg, assert_mpmath_equal,
nonfunctional_tooslow, trace_args, time_limited, exception_to_nan,
inf_to_nan)
from scipy.special._ufuncs import (
_sinpi, _cospi, _lgam1p, _lanczos_sum_expg_scaled, _log1pmx,
_igam_fac)
try:
import mpmath
except ImportError:
mpmath = MissingModule('mpmath')
# ------------------------------------------------------------------------------
# expi
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.10')
def test_expi_complex():
dataset = []
for r in np.logspace(-99, 2, 10):
for p in np.linspace(0, 2*np.pi, 30):
z = r*np.exp(1j*p)
dataset.append((z, complex(mpmath.ei(z))))
dataset = np.array(dataset, dtype=np.complex_)
FuncData(sc.expi, dataset, 0, 1).check()
# ------------------------------------------------------------------------------
# expn
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_expn_large_n():
# Test the transition to the asymptotic regime of n.
dataset = []
for n in [50, 51]:
for x in np.logspace(0, 4, 200):
with mpmath.workdps(100):
dataset.append((n, x, float(mpmath.expint(n, x))))
dataset = np.asarray(dataset)
FuncData(sc.expn, dataset, (0, 1), 2, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# hyp0f1
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_hyp0f1_gh5764():
# Do a small and somewhat systematic test that runs quickly
dataset = []
axis = [-99.5, -9.5, -0.5, 0.5, 9.5, 99.5]
for v in axis:
for x in axis:
for y in axis:
z = x + 1j*y
# mpmath computes the answer correctly at dps ~ 17 but
# fails for 20 < dps < 120 (uses a different method);
# set the dps high enough that this isn't an issue
with mpmath.workdps(120):
res = complex(mpmath.hyp0f1(v, z))
dataset.append((v, z, res))
dataset = np.array(dataset)
FuncData(lambda v, z: sc.hyp0f1(v.real, z), dataset, (0, 1), 2,
rtol=1e-13).check()
@check_version(mpmath, '0.19')
def test_hyp0f1_gh_1609():
# this is a regression test for gh-1609
vv = np.linspace(150, 180, 21)
af = sc.hyp0f1(vv, 0.5)
mf = np.array([mpmath.hyp0f1(v, 0.5) for v in vv])
assert_allclose(af, mf.astype(float), rtol=1e-12)
# ------------------------------------------------------------------------------
# hyperu
# ------------------------------------------------------------------------------
@check_version(mpmath, '1.1.0')
def test_hyperu_around_0():
dataset = []
# DLMF 13.2.14-15 test points.
for n in np.arange(-5, 5):
for b in np.linspace(-5, 5, 20):
a = -n
dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
a = -n + b - 1
dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
# DLMF 13.2.16-22 test points.
for a in [-10.5, -1.5, -0.5, 0, 0.5, 1, 10]:
for b in [-1.0, -0.5, 0, 0.5, 1, 1.5, 2, 2.5]:
dataset.append((a, b, 0, float(mpmath.hyperu(a, b, 0))))
dataset = np.array(dataset)
FuncData(sc.hyperu, dataset, (0, 1, 2), 3, rtol=1e-15, atol=5e-13).check()
# ------------------------------------------------------------------------------
# hyp2f1
# ------------------------------------------------------------------------------
@check_version(mpmath, '1.0.0')
def test_hyp2f1_strange_points():
pts = [
(2, -1, -1, 0.7), # expected: 2.4
(2, -2, -2, 0.7), # expected: 3.87
]
pts += list(itertools.product([2, 1, -0.7, -1000], repeat=4))
pts = [
(a, b, c, x) for a, b, c, x in pts
if b == c and round(b) == b and b < 0 and b != -1000
]
kw = dict(eliminate=True)
dataset = [p + (float(mpmath.hyp2f1(*p, **kw)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
@check_version(mpmath, '0.13')
def test_hyp2f1_real_some_points():
pts = [
(1, 2, 3, 0),
(1./3, 2./3, 5./6, 27./32),
(1./4, 1./2, 3./4, 80./81),
(2,-2, -3, 3),
(2, -3, -2, 3),
(2, -1.5, -1.5, 3),
(1, 2, 3, 0),
(0.7235, -1, -5, 0.3),
(0.25, 1./3, 2, 0.999),
(0.25, 1./3, 2, -1),
(2, 3, 5, 0.99),
(3./2, -0.5, 3, 0.99),
(2, 2.5, -3.25, 0.999),
(-8, 18.016500331508873, 10.805295997850628, 0.90875647507000001),
(-10, 900, -10.5, 0.99),
(-10, 900, 10.5, 0.99),
(-1, 2, 1, 1.0),
(-1, 2, 1, -1.0),
(-3, 13, 5, 1.0),
(-3, 13, 5, -1.0),
(0.5, 1 - 270.5, 1.5, 0.999**2), # from issue 1561
]
dataset = [p + (float(mpmath.hyp2f1(*p)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
with np.errstate(invalid='ignore'):
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
@check_version(mpmath, '0.14')
def test_hyp2f1_some_points_2():
# Taken from mpmath unit tests -- this point failed for mpmath 0.13 but
# was fixed in their SVN since then
pts = [
(112, (51,10), (-9,10), -0.99999),
(10,-900,10.5,0.99),
(10,-900,-10.5,0.99),
]
def fev(x):
if isinstance(x, tuple):
return float(x[0]) / x[1]
else:
return x
dataset = [tuple(map(fev, p)) + (float(mpmath.hyp2f1(*p)),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-10).check()
@check_version(mpmath, '0.13')
def test_hyp2f1_real_some():
dataset = []
for a in [-10, -5, -1.8, 1.8, 5, 10]:
for b in [-2.5, -1, 1, 7.4]:
for c in [-9, -1.8, 5, 20.4]:
for z in [-10, -1.01, -0.99, 0, 0.6, 0.95, 1.5, 10]:
try:
v = float(mpmath.hyp2f1(a, b, c, z))
except Exception:
continue
dataset.append((a, b, c, z, v))
dataset = np.array(dataset, dtype=np.float_)
with np.errstate(invalid='ignore'):
FuncData(sc.hyp2f1, dataset, (0,1,2,3), 4, rtol=1e-9,
ignore_inf_sign=True).check()
@check_version(mpmath, '0.12')
@pytest.mark.slow
def test_hyp2f1_real_random():
npoints = 500
dataset = np.zeros((npoints, 5), np.float_)
np.random.seed(1234)
dataset[:, 0] = np.random.pareto(1.5, npoints)
dataset[:, 1] = np.random.pareto(1.5, npoints)
dataset[:, 2] = np.random.pareto(1.5, npoints)
dataset[:, 3] = 2*np.random.rand(npoints) - 1
dataset[:, 0] *= (-1)**np.random.randint(2, npoints)
dataset[:, 1] *= (-1)**np.random.randint(2, npoints)
dataset[:, 2] *= (-1)**np.random.randint(2, npoints)
for ds in dataset:
if mpmath.__version__ < '0.14':
# mpmath < 0.14 fails for c too much smaller than a, b
if abs(ds[:2]).max() > abs(ds[2]):
ds[2] = abs(ds[:2]).max()
ds[4] = float(mpmath.hyp2f1(*tuple(ds[:4])))
FuncData(sc.hyp2f1, dataset, (0, 1, 2, 3), 4, rtol=1e-9).check()
# ------------------------------------------------------------------------------
# erf (complex)
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.14')
def test_erf_complex():
# need to increase mpmath precision for this test
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
mpmath.mp.dps = 70
x1, y1 = np.meshgrid(np.linspace(-10, 1, 31), np.linspace(-10, 1, 11))
x2, y2 = np.meshgrid(np.logspace(-80, .8, 31), np.logspace(-80, .8, 11))
points = np.r_[x1.ravel(),x2.ravel()] + 1j*np.r_[y1.ravel(), y2.ravel()]
assert_func_equal(sc.erf, lambda x: complex(mpmath.erf(x)), points,
vectorized=False, rtol=1e-13)
assert_func_equal(sc.erfc, lambda x: complex(mpmath.erfc(x)), points,
vectorized=False, rtol=1e-13)
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
# ------------------------------------------------------------------------------
# lpmv
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.15')
def test_lpmv():
pts = []
for x in [-0.99, -0.557, 1e-6, 0.132, 1]:
pts.extend([
(1, 1, x),
(1, -1, x),
(-1, 1, x),
(-1, -2, x),
(1, 1.7, x),
(1, -1.7, x),
(-1, 1.7, x),
(-1, -2.7, x),
(1, 10, x),
(1, 11, x),
(3, 8, x),
(5, 11, x),
(-3, 8, x),
(-5, 11, x),
(3, -8, x),
(5, -11, x),
(-3, -8, x),
(-5, -11, x),
(3, 8.3, x),
(5, 11.3, x),
(-3, 8.3, x),
(-5, 11.3, x),
(3, -8.3, x),
(5, -11.3, x),
(-3, -8.3, x),
(-5, -11.3, x),
])
def mplegenp(nu, mu, x):
if mu == int(mu) and x == 1:
# mpmath 0.17 gets this wrong
if mu == 0:
return 1
else:
return 0
return mpmath.legenp(nu, mu, x)
dataset = [p + (mplegenp(p[1], p[0], p[2]),) for p in pts]
dataset = np.array(dataset, dtype=np.float_)
def evf(mu, nu, x):
return sc.lpmv(mu.astype(int), nu, x)
with np.errstate(invalid='ignore'):
FuncData(evf, dataset, (0,1,2), 3, rtol=1e-10, atol=1e-14).check()
# ------------------------------------------------------------------------------
# beta
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.15')
def test_beta():
np.random.seed(1234)
b = np.r_[np.logspace(-200, 200, 4),
np.logspace(-10, 10, 4),
np.logspace(-1, 1, 4),
np.arange(-10, 11, 1),
np.arange(-10, 11, 1) + 0.5,
-1, -2.3, -3, -100.3, -10003.4]
a = b
ab = np.array(np.broadcast_arrays(a[:,None], b[None,:])).reshape(2, -1).T
old_dps, old_prec = mpmath.mp.dps, mpmath.mp.prec
try:
mpmath.mp.dps = 400
assert_func_equal(sc.beta,
lambda a, b: float(mpmath.beta(a, b)),
ab,
vectorized=False,
rtol=1e-10,
ignore_inf_sign=True)
assert_func_equal(
sc.betaln,
lambda a, b: float(mpmath.log(abs(mpmath.beta(a, b)))),
ab,
vectorized=False,
rtol=1e-10)
finally:
mpmath.mp.dps, mpmath.mp.prec = old_dps, old_prec
# ------------------------------------------------------------------------------
# loggamma
# ------------------------------------------------------------------------------
LOGGAMMA_TAYLOR_RADIUS = 0.2
@check_version(mpmath, '0.19')
def test_loggamma_taylor_transition():
# Make sure there isn't a big jump in accuracy when we move from
# using the Taylor series to using the recurrence relation.
r = LOGGAMMA_TAYLOR_RADIUS + np.array([-0.1, -0.01, 0, 0.01, 0.1])
theta = np.linspace(0, 2*np.pi, 20)
r, theta = np.meshgrid(r, theta)
dz = r*np.exp(1j*theta)
z = np.r_[1 + dz, 2 + dz].flatten()
dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z]
dataset = np.array(dataset)
FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
@check_version(mpmath, '0.19')
def test_loggamma_taylor():
# Test around the zeros at z = 1, 2.
r = np.logspace(-16, np.log10(LOGGAMMA_TAYLOR_RADIUS), 10)
theta = np.linspace(0, 2*np.pi, 20)
r, theta = np.meshgrid(r, theta)
dz = r*np.exp(1j*theta)
z = np.r_[1 + dz, 2 + dz].flatten()
dataset = [(z0, complex(mpmath.loggamma(z0))) for z0 in z]
dataset = np.array(dataset)
FuncData(sc.loggamma, dataset, 0, 1, rtol=5e-14).check()
# ------------------------------------------------------------------------------
# rgamma
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_rgamma_zeros():
# Test around the zeros at z = 0, -1, -2, ..., -169. (After -169 we
# get values that are out of floating point range even when we're
# within 0.1 of the zero.)
# Can't use too many points here or the test takes forever.
dx = np.r_[-np.logspace(-1, -13, 3), 0, np.logspace(-13, -1, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = np.arange(0, -170, -1).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
with mpmath.workdps(100):
dataset = [(z0, complex(mpmath.rgamma(z0))) for z0 in z]
dataset = np.array(dataset)
FuncData(sc.rgamma, dataset, 0, 1, rtol=1e-12).check()
# ------------------------------------------------------------------------------
# digamma
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_digamma_roots():
# Test the special-cased roots for digamma.
root = mpmath.findroot(mpmath.digamma, 1.5)
roots = [float(root)]
root = mpmath.findroot(mpmath.digamma, -0.5)
roots.append(float(root))
roots = np.array(roots)
# If we test beyond a radius of 0.24 mpmath will take forever.
dx = np.r_[-0.24, -np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10), 0.24]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
z = (roots + np.dstack((dz,)*roots.size)).flatten()
with mpmath.workdps(30):
dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z]
dataset = np.array(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-14).check()
@check_version(mpmath, '0.19')
def test_digamma_negreal():
# Test digamma around the negative real axis. Don't do this in
# TestSystematic because the points need some jiggering so that
# mpmath doesn't take forever.
digamma = exception_to_nan(mpmath.digamma)
x = -np.logspace(300, -30, 100)
y = np.r_[-np.logspace(0, -3, 5), 0, np.logspace(-3, 0, 5)]
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
with mpmath.workdps(40):
dataset = [(z0, complex(digamma(z0))) for z0 in z]
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
@check_version(mpmath, '0.19')
def test_digamma_boundary():
# Check that there isn't a jump in accuracy when we switch from
# using the asymptotic series to the reflection formula.
x = -np.logspace(300, -30, 100)
y = np.array([-6.1, -5.9, 5.9, 6.1])
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
with mpmath.workdps(30):
dataset = [(z0, complex(mpmath.digamma(z0))) for z0 in z]
dataset = np.asarray(dataset)
FuncData(sc.digamma, dataset, 0, 1, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# gammainc
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_gammainc_boundary():
# Test the transition to the asymptotic series.
small = 20
a = np.linspace(0.5*small, 2*small, 50)
x = a.copy()
a, x = np.meshgrid(a, x)
a, x = a.flatten(), x.flatten()
with mpmath.workdps(100):
dataset = [(a0, x0, float(mpmath.gammainc(a0, b=x0, regularized=True)))
for a0, x0 in zip(a, x)]
dataset = np.array(dataset)
FuncData(sc.gammainc, dataset, (0, 1), 2, rtol=1e-12).check()
# ------------------------------------------------------------------------------
# spence
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
@pytest.mark.slow
def test_spence_circle():
# The trickiest region for spence is around the circle |z - 1| = 1,
# so test that region carefully.
def spence(z):
return complex(mpmath.polylog(2, 1 - z))
r = np.linspace(0.5, 1.5)
theta = np.linspace(0, 2*pi)
z = (1 + np.outer(r, np.exp(1j*theta))).flatten()
dataset = np.asarray([(z0, spence(z0)) for z0 in z])
FuncData(sc.spence, dataset, 0, 1, rtol=1e-14).check()
# ------------------------------------------------------------------------------
# sinpi and cospi
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_sinpi_zeros():
eps = np.finfo(float).eps
dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = np.arange(-100, 100, 1).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
dataset = np.asarray([(z0, complex(mpmath.sinpi(z0)))
for z0 in z])
FuncData(_sinpi, dataset, 0, 1, rtol=2*eps).check()
@check_version(mpmath, '0.19')
def test_cospi_zeros():
eps = np.finfo(float).eps
dx = np.r_[-np.logspace(0, -13, 3), 0, np.logspace(-13, 0, 3)]
dy = dx.copy()
dx, dy = np.meshgrid(dx, dy)
dz = dx + 1j*dy
zeros = (np.arange(-100, 100, 1) + 0.5).reshape(1, 1, -1)
z = (zeros + np.dstack((dz,)*zeros.size)).flatten()
dataset = np.asarray([(z0, complex(mpmath.cospi(z0)))
for z0 in z])
FuncData(_cospi, dataset, 0, 1, rtol=2*eps).check()
# ------------------------------------------------------------------------------
# ellipj
# ------------------------------------------------------------------------------
@check_version(mpmath, '0.19')
def test_dn_quarter_period():
def dn(u, m):
return sc.ellipj(u, m)[2]
def mpmath_dn(u, m):
return float(mpmath.ellipfun("dn", u=u, m=m))
m = np.linspace(0, 1, 20)
du = np.r_[-np.logspace(-1, -15, 10), 0, np.logspace(-15, -1, 10)]
dataset = []
for m0 in m:
u0 = float(mpmath.ellipk(m0))
for du0 in du:
p = u0 + du0
dataset.append((p, m0, mpmath_dn(p, m0)))
dataset = np.asarray(dataset)
FuncData(dn, dataset, (0, 1), 2, rtol=1e-10).check()
# ------------------------------------------------------------------------------
# Wright Omega
# ------------------------------------------------------------------------------
def _mpmath_wrightomega(z, dps):
with mpmath.workdps(dps):
z = mpmath.mpc(z)
unwind = mpmath.ceil((z.imag - mpmath.pi)/(2*mpmath.pi))
res = mpmath.lambertw(mpmath.exp(z), unwind)
return res
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_branch():
x = -np.logspace(10, 0, 25)
picut_above = [np.nextafter(np.pi, np.inf)]
picut_below = [np.nextafter(np.pi, -np.inf)]
npicut_above = [np.nextafter(-np.pi, np.inf)]
npicut_below = [np.nextafter(-np.pi, -np.inf)]
for i in range(50):
picut_above.append(np.nextafter(picut_above[-1], np.inf))
picut_below.append(np.nextafter(picut_below[-1], -np.inf))
npicut_above.append(np.nextafter(npicut_above[-1], np.inf))
npicut_below.append(np.nextafter(npicut_below[-1], -np.inf))
y = np.hstack((picut_above, picut_below, npicut_above, npicut_below))
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
for z0 in z])
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-8).check()
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_region1():
# This region gets less coverage in the TestSystematic test
x = np.linspace(-2, 1)
y = np.linspace(1, 2*np.pi)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
for z0 in z])
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_wrightomega_region2():
# This region gets less coverage in the TestSystematic test
x = np.linspace(-2, 1)
y = np.linspace(-2*np.pi, -1)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = np.asarray([(z0, complex(_mpmath_wrightomega(z0, 25)))
for z0 in z])
FuncData(sc.wrightomega, dataset, 0, 1, rtol=1e-15).check()
# ------------------------------------------------------------------------------
# lambertw
# ------------------------------------------------------------------------------
@pytest.mark.slow
@check_version(mpmath, '0.19')
def test_lambertw_smallz():
x, y = np.linspace(-1, 1, 25), np.linspace(-1, 1, 25)
x, y = np.meshgrid(x, y)
z = (x + 1j*y).flatten()
dataset = np.asarray([(z0, complex(mpmath.lambertw(z0)))
for z0 in z])
FuncData(sc.lambertw, dataset, 0, 1, rtol=1e-13).check()
# ------------------------------------------------------------------------------
# Systematic tests
# ------------------------------------------------------------------------------
HYPERKW = dict(maxprec=200, maxterms=200)
@pytest.mark.slow
@check_version(mpmath, '0.17')
class TestSystematic:
def test_airyai(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[Arg(-1e3, 1e3)])
def test_airyai_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[0],
mpmath.airyai,
[ComplexArg()])
def test_airyai_prime(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[Arg(-1e3, 1e3)])
def test_airyai_prime_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[1], lambda z:
mpmath.airyai(z, derivative=1),
[ComplexArg()])
def test_airybi(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[Arg(-1e3, 1e3)])
def test_airybi_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[2], lambda z:
mpmath.airybi(z),
[ComplexArg()])
def test_airybi_prime(self):
# oscillating function, limit range
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[Arg(-1e8, 1e8)],
rtol=1e-5)
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[Arg(-1e3, 1e3)])
def test_airybi_prime_complex(self):
assert_mpmath_equal(lambda z: sc.airy(z)[3], lambda z:
mpmath.airybi(z, derivative=1),
[ComplexArg()])
def test_bei(self):
assert_mpmath_equal(sc.bei,
exception_to_nan(lambda z: mpmath.bei(0, z, **HYPERKW)),
[Arg(-1e3, 1e3)])
def test_ber(self):
assert_mpmath_equal(sc.ber,
exception_to_nan(lambda z: mpmath.ber(0, z, **HYPERKW)),
[Arg(-1e3, 1e3)])
def test_bernoulli(self):
assert_mpmath_equal(lambda n: sc.bernoulli(int(n))[int(n)],
lambda n: float(mpmath.bernoulli(int(n))),
[IntArg(0, 13000)],
rtol=1e-9, n=13000)
def test_besseli(self):
assert_mpmath_equal(sc.iv,
exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg()],
atol=1e-270)
def test_besseli_complex(self):
assert_mpmath_equal(lambda v, z: sc.iv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besseli(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), ComplexArg()])
def test_besselj(self):
assert_mpmath_equal(sc.jv,
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg(-1e3, 1e3)],
ignore_inf_sign=True)
# loss of precision at large arguments due to oscillation
assert_mpmath_equal(sc.jv,
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
ignore_inf_sign=True,
rtol=1e-5)
def test_besselj_complex(self):
assert_mpmath_equal(lambda v, z: sc.jv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besselj(v, z, **HYPERKW)),
[Arg(), ComplexArg()])
def test_besselk(self):
assert_mpmath_equal(sc.kv,
mpmath.besselk,
[Arg(-200, 200), Arg(0, np.inf)],
nan_ok=False, rtol=1e-12)
def test_besselk_int(self):
assert_mpmath_equal(sc.kn,
mpmath.besselk,
[IntArg(-200, 200), Arg(0, np.inf)],
nan_ok=False, rtol=1e-12)
def test_besselk_complex(self):
assert_mpmath_equal(lambda v, z: sc.kv(v.real, z),
exception_to_nan(lambda v, z: mpmath.besselk(v, z, **HYPERKW)),
[Arg(-1e100, 1e100), ComplexArg()])
def test_bessely(self):
def mpbessely(v, x):
r = float(mpmath.bessely(v, x, **HYPERKW))
if abs(r) > 1e305:
# overflowing to inf a bit earlier is OK
r = np.inf * np.sign(r)
if abs(r) == 0 and x == 0:
# invalid result from mpmath, point x=0 is a divergence
return np.nan
return r
assert_mpmath_equal(sc.yv,
exception_to_nan(mpbessely),
[Arg(-1e100, 1e100), Arg(-1e8, 1e8)],
n=5000)
def test_bessely_complex(self):
def mpbessely(v, x):
r = complex(mpmath.bessely(v, x, **HYPERKW))
if abs(r) > 1e305:
# overflowing to inf a bit earlier is OK
with np.errstate(invalid='ignore'):
r = np.inf * np.sign(r)
return r
assert_mpmath_equal(lambda v, z: sc.yv(v.real, z),
exception_to_nan(mpbessely),
[Arg(), ComplexArg()],
n=15000)
def test_bessely_int(self):
def mpbessely(v, x):
r = float(mpmath.bessely(v, x))
if abs(r) == 0 and x == 0:
# invalid result from mpmath, point x=0 is a divergence
return np.nan
return r
assert_mpmath_equal(lambda v, z: sc.yn(int(v), z),
exception_to_nan(mpbessely),
[IntArg(-1000, 1000), Arg(-1e8, 1e8)])
def test_beta(self):
bad_points = []
def beta(a, b, nonzero=False):
if a < -1e12 or b < -1e12:
# Function is defined here only at integers, but due
# to loss of precision this is numerically
# ill-defined. Don't compare values here.
return np.nan
if (a < 0 or b < 0) and (abs(float(a + b)) % 1) == 0:
# close to a zero of the function: mpmath and scipy
# will not round here the same, so the test needs to be
# run with an absolute tolerance
if nonzero:
bad_points.append((float(a), float(b)))
return np.nan
return mpmath.beta(a, b)
assert_mpmath_equal(sc.beta,
lambda a, b: beta(a, b, nonzero=True),
[Arg(), Arg()],
dps=400,
ignore_inf_sign=True)
assert_mpmath_equal(sc.beta,
beta,
np.array(bad_points),
dps=400,
ignore_inf_sign=True,
atol=1e-11)
def test_betainc(self):
assert_mpmath_equal(sc.betainc,
time_limited()(exception_to_nan(lambda a, b, x: mpmath.betainc(a, b, 0, x, regularized=True))),
[Arg(), Arg(), Arg()])
def test_binom(self):
bad_points = []
def binomial(n, k, nonzero=False):
if abs(k) > 1e8*(abs(n) + 1):
# The binomial is rapidly oscillating in this region,
# and the function is numerically ill-defined. Don't
# compare values here.
return np.nan
if n < k and abs(float(n-k) - np.round(float(n-k))) < 1e-15:
# close to a zero of the function: mpmath and scipy
# will not round here the same, so the test needs to be
# run with an absolute tolerance
if nonzero:
bad_points.append((float(n), float(k)))
return np.nan
return mpmath.binomial(n, k)
assert_mpmath_equal(sc.binom,
lambda n, k: binomial(n, k, nonzero=True),
[Arg(), Arg()],
dps=400)
assert_mpmath_equal(sc.binom,
binomial,
np.array(bad_points),
dps=400,
atol=1e-14)
def test_chebyt_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_chebyt(int(n), x),
exception_to_nan(lambda n, x: mpmath.chebyt(n, x, **HYPERKW)),
[IntArg(), Arg()], dps=50)
@pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
def test_chebyt(self):
assert_mpmath_equal(sc.eval_chebyt,
lambda n, x: time_limited()(exception_to_nan(mpmath.chebyt))(n, x, **HYPERKW),
[Arg(-101, 101), Arg()], n=10000)
def test_chebyu_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_chebyu(int(n), x),
exception_to_nan(lambda n, x: mpmath.chebyu(n, x, **HYPERKW)),
[IntArg(), Arg()], dps=50)
@pytest.mark.xfail(run=False, reason="some cases in hyp2f1 not fully accurate")
def test_chebyu(self):
assert_mpmath_equal(sc.eval_chebyu,
lambda n, x: time_limited()(exception_to_nan(mpmath.chebyu))(n, x, **HYPERKW),
[Arg(-101, 101), Arg()])
def test_chi(self):
def chi(x):
return sc.shichi(x)[1]
assert_mpmath_equal(chi, mpmath.chi, [Arg()])
# check asymptotic series cross-over
assert_mpmath_equal(chi, mpmath.chi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
def test_chi_complex(self):
def chi(z):
return sc.shichi(z)[1]
# chi oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(chi,
mpmath.chi,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-12)
def test_ci(self):
def ci(x):
return sc.sici(x)[1]
# oscillating function: limit range
assert_mpmath_equal(ci,
mpmath.ci,
[Arg(-1e8, 1e8)])
def test_ci_complex(self):
def ci(z):
return sc.sici(z)[1]
# ci oscillates as Re[z] -> +- inf, so limit range
assert_mpmath_equal(ci,
mpmath.ci,
[ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
rtol=1e-8)
def test_cospi(self):
eps = np.finfo(float).eps
assert_mpmath_equal(_cospi,
mpmath.cospi,
[Arg()], nan_ok=False, rtol=2*eps)
def test_cospi_complex(self):
assert_mpmath_equal(_cospi,
mpmath.cospi,
[ComplexArg()], nan_ok=False, rtol=1e-13)
def test_digamma(self):
assert_mpmath_equal(sc.digamma,
exception_to_nan(mpmath.digamma),
[Arg()], rtol=1e-12, dps=50)
def test_digamma_complex(self):
# Test on a cut plane because mpmath will hang. See
# test_digamma_negreal for tests on the negative real axis.
def param_filter(z):
return np.where((z.real < 0) & (np.abs(z.imag) < 1.12), False, True)
assert_mpmath_equal(sc.digamma,
exception_to_nan(mpmath.digamma),
[ComplexArg()], rtol=1e-13, dps=40,
param_filter=param_filter)
def test_e1(self):
assert_mpmath_equal(sc.exp1,
mpmath.e1,
[Arg()], rtol=1e-14)
def test_e1_complex(self):
# E_1 oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(sc.exp1,
mpmath.e1,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-11)
# Check cross-over region
assert_mpmath_equal(sc.exp1,
mpmath.e1,
(np.linspace(-50, 50, 171)[:, None] +
np.r_[0, np.logspace(-3, 2, 61),
-np.logspace(-3, 2, 11)]*1j).ravel(),
rtol=1e-11)
assert_mpmath_equal(sc.exp1,
mpmath.e1,
(np.linspace(-50, -35, 10000) + 0j),
rtol=1e-11)
def test_exprel(self):
assert_mpmath_equal(sc.exprel,
lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
[Arg(a=-np.log(np.finfo(np.double).max), b=np.log(np.finfo(np.double).max))])
assert_mpmath_equal(sc.exprel,
lambda x: mpmath.expm1(x)/x if x != 0 else mpmath.mpf('1.0'),
np.array([1e-12, 1e-24, 0, 1e12, 1e24, np.inf]), rtol=1e-11)
assert_(np.isinf(sc.exprel(np.inf)))
assert_(sc.exprel(-np.inf) == 0)
def test_expm1_complex(self):
# Oscillates as a function of Im[z], so limit range to avoid loss of precision
assert_mpmath_equal(sc.expm1,
mpmath.expm1,
[ComplexArg(complex(-np.inf, -1e7), complex(np.inf, 1e7))])
def test_log1p_complex(self):
assert_mpmath_equal(sc.log1p,
lambda x: mpmath.log(x+1),
[ComplexArg()], dps=60)
def test_log1pmx(self):
assert_mpmath_equal(_log1pmx,
lambda x: mpmath.log(x + 1) - x,
[Arg()], dps=60, rtol=1e-14)
def test_ei(self):
assert_mpmath_equal(sc.expi,
mpmath.ei,
[Arg()],
rtol=1e-11)
def test_ei_complex(self):
# Ei oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(sc.expi,
mpmath.ei,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-9)
def test_ellipe(self):
assert_mpmath_equal(sc.ellipe,
mpmath.ellipe,
[Arg(b=1.0)])
def test_ellipeinc(self):
assert_mpmath_equal(sc.ellipeinc,
mpmath.ellipe,
[Arg(-1e3, 1e3), Arg(b=1.0)])
def test_ellipeinc_largephi(self):
assert_mpmath_equal(sc.ellipeinc,
mpmath.ellipe,
[Arg(), Arg()])
def test_ellipf(self):
assert_mpmath_equal(sc.ellipkinc,
mpmath.ellipf,
[Arg(-1e3, 1e3), Arg()])
def test_ellipf_largephi(self):
assert_mpmath_equal(sc.ellipkinc,
mpmath.ellipf,
[Arg(), Arg()])
def test_ellipk(self):
assert_mpmath_equal(sc.ellipk,
mpmath.ellipk,
[Arg(b=1.0)])
assert_mpmath_equal(sc.ellipkm1,
lambda m: mpmath.ellipk(1 - m),
[Arg(a=0.0)],
dps=400)
def test_ellipkinc(self):
def ellipkinc(phi, m):
return mpmath.ellippi(0, phi, m)
assert_mpmath_equal(sc.ellipkinc,
ellipkinc,
[Arg(-1e3, 1e3), Arg(b=1.0)],
ignore_inf_sign=True)
def test_ellipkinc_largephi(self):
def ellipkinc(phi, m):
return mpmath.ellippi(0, phi, m)
assert_mpmath_equal(sc.ellipkinc,
ellipkinc,
[Arg(), Arg(b=1.0)],
ignore_inf_sign=True)
def test_ellipfun_sn(self):
def sn(u, m):
# mpmath doesn't get the zero at u = 0--fix that
if u == 0:
return 0
else:
return mpmath.ellipfun("sn", u=u, m=m)
# Oscillating function --- limit range of first argument; the
# loss of precision there is an expected numerical feature
# rather than an actual bug
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[0],
sn,
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_ellipfun_cn(self):
# see comment in ellipfun_sn
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[1],
lambda u, m: mpmath.ellipfun("cn", u=u, m=m),
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_ellipfun_dn(self):
# see comment in ellipfun_sn
assert_mpmath_equal(lambda u, m: sc.ellipj(u, m)[2],
lambda u, m: mpmath.ellipfun("dn", u=u, m=m),
[Arg(-1e6, 1e6), Arg(a=0, b=1)],
rtol=1e-8)
def test_erf(self):
assert_mpmath_equal(sc.erf,
lambda z: mpmath.erf(z),
[Arg()])
def test_erf_complex(self):
assert_mpmath_equal(sc.erf,
lambda z: mpmath.erf(z),
[ComplexArg()], n=200)
def test_erfc(self):
assert_mpmath_equal(sc.erfc,
exception_to_nan(lambda z: mpmath.erfc(z)),
[Arg()], rtol=1e-13)
def test_erfc_complex(self):
assert_mpmath_equal(sc.erfc,
exception_to_nan(lambda z: mpmath.erfc(z)),
[ComplexArg()], n=200)
def test_erfi(self):
assert_mpmath_equal(sc.erfi,
mpmath.erfi,
[Arg()], n=200)
def test_erfi_complex(self):
assert_mpmath_equal(sc.erfi,
mpmath.erfi,
[ComplexArg()], n=200)
def test_ndtr(self):
assert_mpmath_equal(sc.ndtr,
exception_to_nan(lambda z: mpmath.ncdf(z)),
[Arg()], n=200)
def test_ndtr_complex(self):
assert_mpmath_equal(sc.ndtr,
lambda z: mpmath.erfc(-z/np.sqrt(2.))/2.,
[ComplexArg(a=complex(-10000, -10000), b=complex(10000, 10000))], n=400)
def test_log_ndtr(self):
assert_mpmath_equal(sc.log_ndtr,
exception_to_nan(lambda z: mpmath.log(mpmath.ncdf(z))),
[Arg()], n=600, dps=300, rtol=1e-13)
def test_log_ndtr_complex(self):
assert_mpmath_equal(sc.log_ndtr,
exception_to_nan(lambda z: mpmath.log(mpmath.erfc(-z/np.sqrt(2.))/2.)),
[ComplexArg(a=complex(-10000, -100),
b=complex(10000, 100))], n=200, dps=300)
def test_eulernum(self):
assert_mpmath_equal(lambda n: sc.euler(n)[-1],
mpmath.eulernum,
[IntArg(1, 10000)], n=10000)
def test_expint(self):
assert_mpmath_equal(sc.expn,
mpmath.expint,
[IntArg(0, 200), Arg(0, np.inf)],
rtol=1e-13, dps=160)
def test_fresnels(self):
def fresnels(x):
return sc.fresnel(x)[0]
assert_mpmath_equal(fresnels,
mpmath.fresnels,
[Arg()])
def test_fresnelc(self):
def fresnelc(x):
return sc.fresnel(x)[1]
assert_mpmath_equal(fresnelc,
mpmath.fresnelc,
[Arg()])
def test_gamma(self):
assert_mpmath_equal(sc.gamma,
exception_to_nan(mpmath.gamma),
[Arg()])
def test_gamma_complex(self):
assert_mpmath_equal(sc.gamma,
exception_to_nan(mpmath.gamma),
[ComplexArg()], rtol=5e-13)
def test_gammainc(self):
# Larger arguments are tested in test_data.py:test_local
assert_mpmath_equal(sc.gammainc,
lambda z, b: mpmath.gammainc(z, b=b, regularized=True),
[Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
nan_ok=False, rtol=1e-11)
def test_gammaincc(self):
# Larger arguments are tested in test_data.py:test_local
assert_mpmath_equal(sc.gammaincc,
lambda z, a: mpmath.gammainc(z, a=a, regularized=True),
[Arg(0, 1e4, inclusive_a=False), Arg(0, 1e4)],
nan_ok=False, rtol=1e-11)
def test_gammaln(self):
# The real part of loggamma is log(|gamma(z)|).
def f(z):
return mpmath.loggamma(z).real
assert_mpmath_equal(sc.gammaln, exception_to_nan(f), [Arg()])
@pytest.mark.xfail(run=False)
def test_gegenbauer(self):
assert_mpmath_equal(sc.eval_gegenbauer,
exception_to_nan(mpmath.gegenbauer),
[Arg(-1e3, 1e3), Arg(), Arg()])
def test_gegenbauer_int(self):
# Redefine functions to deal with numerical + mpmath issues
def gegenbauer(n, a, x):
# Avoid overflow at large `a` (mpmath would need an even larger
# dps to handle this correctly, so just skip this region)
if abs(a) > 1e100:
return np.nan
# Deal with n=0, n=1 correctly; mpmath 0.17 doesn't do these
# always correctly
if n == 0:
r = 1.0
elif n == 1:
r = 2*a*x
else:
r = mpmath.gegenbauer(n, a, x)
# Mpmath 0.17 gives wrong results (spurious zero) in some cases, so
# compute the value by perturbing the result
if float(r) == 0 and a < -1 and float(a) == int(float(a)):
r = mpmath.gegenbauer(n, a + mpmath.mpf('1e-50'), x)
if abs(r) < mpmath.mpf('1e-50'):
r = mpmath.mpf('0.0')
# Differing overflow thresholds in scipy vs. mpmath
if abs(r) > 1e270:
return np.inf
return r
def sc_gegenbauer(n, a, x):
r = sc.eval_gegenbauer(int(n), a, x)
# Differing overflow thresholds in scipy vs. mpmath
if abs(r) > 1e270:
return np.inf
return r
assert_mpmath_equal(sc_gegenbauer,
exception_to_nan(gegenbauer),
[IntArg(0, 100), Arg(-1e9, 1e9), Arg()],
n=40000, dps=100,
ignore_inf_sign=True, rtol=1e-6)
# Check the small-x expansion
assert_mpmath_equal(sc_gegenbauer,
exception_to_nan(gegenbauer),
[IntArg(0, 100), Arg(), FixedArg(np.logspace(-30, -4, 30))],
dps=100,
ignore_inf_sign=True)
@pytest.mark.xfail(run=False)
def test_gegenbauer_complex(self):
assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(int(n), a.real, x),
exception_to_nan(mpmath.gegenbauer),
[IntArg(0, 100), Arg(), ComplexArg()])
@nonfunctional_tooslow
def test_gegenbauer_complex_general(self):
assert_mpmath_equal(lambda n, a, x: sc.eval_gegenbauer(n.real, a.real, x),
exception_to_nan(mpmath.gegenbauer),
[Arg(-1e3, 1e3), Arg(), ComplexArg()])
def test_hankel1(self):
assert_mpmath_equal(sc.hankel1,
exception_to_nan(lambda v, x: mpmath.hankel1(v, x,
**HYPERKW)),
[Arg(-1e20, 1e20), Arg()])
def test_hankel2(self):
assert_mpmath_equal(sc.hankel2,
exception_to_nan(lambda v, x: mpmath.hankel2(v, x, **HYPERKW)),
[Arg(-1e20, 1e20), Arg()])
@pytest.mark.xfail(run=False, reason="issues at intermediately large orders")
def test_hermite(self):
assert_mpmath_equal(lambda n, x: sc.eval_hermite(int(n), x),
exception_to_nan(mpmath.hermite),
[IntArg(0, 10000), Arg()])
# hurwitz: same as zeta
def test_hyp0f1(self):
# mpmath reports no convergence unless maxterms is large enough
KW = dict(maxprec=400, maxterms=1500)
# n=500 (non-xslow default) fails for one bad point
assert_mpmath_equal(sc.hyp0f1,
lambda a, x: mpmath.hyp0f1(a, x, **KW),
[Arg(-1e7, 1e7), Arg(0, 1e5)],
n=5000)
# NB: The range of the second parameter ("z") is limited from below
# because of an overflow in the intermediate calculations. The way
# for fix it is to implement an asymptotic expansion for Bessel J
# (similar to what is implemented for Bessel I here).
def test_hyp0f1_complex(self):
assert_mpmath_equal(lambda a, z: sc.hyp0f1(a.real, z),
exception_to_nan(lambda a, x: mpmath.hyp0f1(a, x, **HYPERKW)),
[Arg(-10, 10), ComplexArg(complex(-120, -120), complex(120, 120))])
# NB: The range of the first parameter ("v") are limited by an overflow
# in the intermediate calculations. Can be fixed by implementing an
# asymptotic expansion for Bessel functions for large order.
def test_hyp1f1(self):
def mpmath_hyp1f1(a, b, x):
try:
return mpmath.hyp1f1(a, b, x)
except ZeroDivisionError:
return np.inf
assert_mpmath_equal(
sc.hyp1f1,
mpmath_hyp1f1,
[Arg(-50, 50), Arg(1, 50, inclusive_a=False), Arg(-50, 50)],
n=500,
nan_ok=False
)
@pytest.mark.xfail(run=False)
def test_hyp1f1_complex(self):
assert_mpmath_equal(inf_to_nan(lambda a, b, x: sc.hyp1f1(a.real, b.real, x)),
exception_to_nan(lambda a, b, x: mpmath.hyp1f1(a, b, x, **HYPERKW)),
[Arg(-1e3, 1e3), Arg(-1e3, 1e3), ComplexArg()],
n=2000)
@nonfunctional_tooslow
def test_hyp2f1_complex(self):
# SciPy's hyp2f1 seems to have performance and accuracy problems
assert_mpmath_equal(lambda a, b, c, x: sc.hyp2f1(a.real, b.real, c.real, x),
exception_to_nan(lambda a, b, c, x: mpmath.hyp2f1(a, b, c, x, **HYPERKW)),
[Arg(-1e2, 1e2), Arg(-1e2, 1e2), Arg(-1e2, 1e2), ComplexArg()],
n=10)
@pytest.mark.xfail(run=False)
def test_hyperu(self):
assert_mpmath_equal(sc.hyperu,
exception_to_nan(lambda a, b, x: mpmath.hyperu(a, b, x, **HYPERKW)),
[Arg(), Arg(), Arg()])
@pytest.mark.xfail_on_32bit("mpmath issue gh-342: unsupported operand mpz, long for pow")
def test_igam_fac(self):
def mp_igam_fac(a, x):
return mpmath.power(x, a)*mpmath.exp(-x)/mpmath.gamma(a)
assert_mpmath_equal(_igam_fac,
mp_igam_fac,
[Arg(0, 1e14, inclusive_a=False), Arg(0, 1e14)],
rtol=1e-10)
def test_j0(self):
# The Bessel function at large arguments is j0(x) ~ cos(x + phi)/sqrt(x)
# and at large arguments the phase of the cosine loses precision.
#
# This is numerically expected behavior, so we compare only up to
# 1e8 = 1e15 * 1e-7
assert_mpmath_equal(sc.j0,
mpmath.j0,
[Arg(-1e3, 1e3)])
assert_mpmath_equal(sc.j0,
mpmath.j0,
[Arg(-1e8, 1e8)],
rtol=1e-5)
def test_j1(self):
# See comment in test_j0
assert_mpmath_equal(sc.j1,
mpmath.j1,
[Arg(-1e3, 1e3)])
assert_mpmath_equal(sc.j1,
mpmath.j1,
[Arg(-1e8, 1e8)],
rtol=1e-5)
@pytest.mark.xfail(run=False)
def test_jacobi(self):
assert_mpmath_equal(sc.eval_jacobi,
exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
[Arg(), Arg(), Arg(), Arg()])
assert_mpmath_equal(lambda n, b, c, x: sc.eval_jacobi(int(n), b, c, x),
exception_to_nan(lambda a, b, c, x: mpmath.jacobi(a, b, c, x, **HYPERKW)),
[IntArg(), Arg(), Arg(), Arg()])
def test_jacobi_int(self):
# Redefine functions to deal with numerical + mpmath issues
def jacobi(n, a, b, x):
# Mpmath does not handle n=0 case always correctly
if n == 0:
return 1.0
return mpmath.jacobi(n, a, b, x)
assert_mpmath_equal(lambda n, a, b, x: sc.eval_jacobi(int(n), a, b, x),
lambda n, a, b, x: exception_to_nan(jacobi)(n, a, b, x, **HYPERKW),
[IntArg(), Arg(), Arg(), Arg()],
n=20000, dps=50)
def test_kei(self):
def kei(x):
if x == 0:
# work around mpmath issue at x=0
return -pi/4
return exception_to_nan(mpmath.kei)(0, x, **HYPERKW)
assert_mpmath_equal(sc.kei,
kei,
[Arg(-1e30, 1e30)], n=1000)
def test_ker(self):
assert_mpmath_equal(sc.ker,
exception_to_nan(lambda x: mpmath.ker(0, x, **HYPERKW)),
[Arg(-1e30, 1e30)], n=1000)
@nonfunctional_tooslow
def test_laguerre(self):
assert_mpmath_equal(trace_args(sc.eval_laguerre),
lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
[Arg(), Arg()])
def test_laguerre_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_laguerre(int(n), x),
lambda n, x: exception_to_nan(mpmath.laguerre)(n, x, **HYPERKW),
[IntArg(), Arg()], n=20000)
@pytest.mark.xfail_on_32bit("see gh-3551 for bad points")
def test_lambertw_real(self):
assert_mpmath_equal(lambda x, k: sc.lambertw(x, int(k.real)),
lambda x, k: mpmath.lambertw(x, int(k.real)),
[ComplexArg(-np.inf, np.inf), IntArg(0, 10)],
rtol=1e-13, nan_ok=False)
def test_lanczos_sum_expg_scaled(self):
maxgamma = 171.624376956302725
e = np.exp(1)
g = 6.024680040776729583740234375
def gamma(x):
with np.errstate(over='ignore'):
fac = ((x + g - 0.5)/e)**(x - 0.5)
if fac != np.inf:
res = fac*_lanczos_sum_expg_scaled(x)
else:
fac = ((x + g - 0.5)/e)**(0.5*(x - 0.5))
res = fac*_lanczos_sum_expg_scaled(x)
res *= fac
return res
assert_mpmath_equal(gamma,
mpmath.gamma,
[Arg(0, maxgamma, inclusive_a=False)],
rtol=1e-13)
@nonfunctional_tooslow
def test_legendre(self):
assert_mpmath_equal(sc.eval_legendre,
mpmath.legendre,
[Arg(), Arg()])
def test_legendre_int(self):
assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
[IntArg(), Arg()],
n=20000)
# Check the small-x expansion
assert_mpmath_equal(lambda n, x: sc.eval_legendre(int(n), x),
lambda n, x: exception_to_nan(mpmath.legendre)(n, x, **HYPERKW),
[IntArg(), FixedArg(np.logspace(-30, -4, 20))])
def test_legenp(self):
def lpnm(n, m, z):
try:
v = sc.lpmn(m, n, z)[0][-1,-1]
except ValueError:
return np.nan
if abs(v) > 1e306:
# harmonize overflow to inf
v = np.inf * np.sign(v.real)
return v
def lpnm_2(n, m, z):
v = sc.lpmv(m, n, z)
if abs(v) > 1e306:
# harmonize overflow to inf
v = np.inf * np.sign(v.real)
return v
def legenp(n, m, z):
if (z == 1 or z == -1) and int(n) == n:
# Special case (mpmath may give inf, we take the limit by
# continuity)
if m == 0:
if n < 0:
n = -n - 1
return mpmath.power(mpmath.sign(z), n)
else:
return 0
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
typ = 2 if abs(z) < 1 else 3
v = exception_to_nan(mpmath.legenp)(n, m, z, type=typ)
if abs(v) > 1e306:
# harmonize overflow to inf
v = mpmath.inf * mpmath.sign(v.real)
return v
assert_mpmath_equal(lpnm,
legenp,
[IntArg(-100, 100), IntArg(-100, 100), Arg()])
assert_mpmath_equal(lpnm_2,
legenp,
[IntArg(-100, 100), Arg(-100, 100), Arg(-1, 1)],
atol=1e-10)
def test_legenp_complex_2(self):
def clpnm(n, m, z):
try:
return sc.clpmn(m.real, n.real, z, type=2)[0][-1,-1]
except ValueError:
return np.nan
def legenp(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=2)
# mpmath is quite slow here
x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
y = np.array([-1e3, -0.5, 0.5, 1.3])
z = (x[:,None] + 1j*y[None,:]).ravel()
assert_mpmath_equal(clpnm,
legenp,
[FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
rtol=1e-6,
n=500)
def test_legenp_complex_3(self):
def clpnm(n, m, z):
try:
return sc.clpmn(m.real, n.real, z, type=3)[0][-1,-1]
except ValueError:
return np.nan
def legenp(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenp)(int(n.real), int(m.real), z, type=3)
# mpmath is quite slow here
x = np.array([-2, -0.99, -0.5, 0, 1e-5, 0.5, 0.99, 20, 2e3])
y = np.array([-1e3, -0.5, 0.5, 1.3])
z = (x[:,None] + 1j*y[None,:]).ravel()
assert_mpmath_equal(clpnm,
legenp,
[FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg([-2, -1, 0, 1, 2, 10]), FixedArg(z)],
rtol=1e-6,
n=500)
@pytest.mark.xfail(run=False, reason="apparently picks wrong function at |z| > 1")
def test_legenq(self):
def lqnm(n, m, z):
return sc.lqmn(m, n, z)[0][-1,-1]
def legenq(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenq)(n, m, z, type=2)
assert_mpmath_equal(lqnm,
legenq,
[IntArg(0, 100), IntArg(0, 100), Arg()])
@nonfunctional_tooslow
def test_legenq_complex(self):
def lqnm(n, m, z):
return sc.lqmn(int(m.real), int(n.real), z)[0][-1,-1]
def legenq(n, m, z):
if abs(z) < 1e-15:
# mpmath has bad performance here
return np.nan
return exception_to_nan(mpmath.legenq)(int(n.real), int(m.real), z, type=2)
assert_mpmath_equal(lqnm,
legenq,
[IntArg(0, 100), IntArg(0, 100), ComplexArg()],
n=100)
def test_lgam1p(self):
def param_filter(x):
# Filter the poles
return np.where((np.floor(x) == x) & (x <= 0), False, True)
def mp_lgam1p(z):
# The real part of loggamma is log(|gamma(z)|)
return mpmath.loggamma(1 + z).real
assert_mpmath_equal(_lgam1p,
mp_lgam1p,
[Arg()], rtol=1e-13, dps=100,
param_filter=param_filter)
def test_loggamma(self):
def mpmath_loggamma(z):
try:
res = mpmath.loggamma(z)
except ValueError:
res = complex(np.nan, np.nan)
return res
assert_mpmath_equal(sc.loggamma,
mpmath_loggamma,
[ComplexArg()], nan_ok=False,
distinguish_nan_and_inf=False, rtol=5e-14)
@pytest.mark.xfail(run=False)
def test_pcfd(self):
def pcfd(v, x):
return sc.pbdv(v, x)[0]
assert_mpmath_equal(pcfd,
exception_to_nan(lambda v, x: mpmath.pcfd(v, x, **HYPERKW)),
[Arg(), Arg()])
@pytest.mark.xfail(run=False, reason="it's not the same as the mpmath function --- maybe different definition?")
def test_pcfv(self):
def pcfv(v, x):
return sc.pbvv(v, x)[0]
assert_mpmath_equal(pcfv,
lambda v, x: time_limited()(exception_to_nan(mpmath.pcfv))(v, x, **HYPERKW),
[Arg(), Arg()], n=1000)
def test_pcfw(self):
def pcfw(a, x):
return sc.pbwa(a, x)[0]
def dpcfw(a, x):
return sc.pbwa(a, x)[1]
def mpmath_dpcfw(a, x):
return mpmath.diff(mpmath.pcfw, (a, x), (0, 1))
# The Zhang and Jin implementation only uses Taylor series and
# is thus accurate in only a very small range.
assert_mpmath_equal(pcfw,
mpmath.pcfw,
[Arg(-5, 5), Arg(-5, 5)], rtol=2e-8, n=100)
assert_mpmath_equal(dpcfw,
mpmath_dpcfw,
[Arg(-5, 5), Arg(-5, 5)], rtol=2e-9, n=100)
@pytest.mark.xfail(run=False, reason="issues at large arguments (atol OK, rtol not) and <eps-close to z=0")
def test_polygamma(self):
assert_mpmath_equal(sc.polygamma,
time_limited()(exception_to_nan(mpmath.polygamma)),
[IntArg(0, 1000), Arg()])
def test_rgamma(self):
assert_mpmath_equal(
sc.rgamma,
mpmath.rgamma,
[Arg(-8000, np.inf)],
n=5000,
nan_ok=False,
ignore_inf_sign=True,
)
def test_rgamma_complex(self):
assert_mpmath_equal(sc.rgamma,
exception_to_nan(mpmath.rgamma),
[ComplexArg()], rtol=5e-13)
@pytest.mark.xfail(reason=("see gh-3551 for bad points on 32 bit "
"systems and gh-8095 for another bad "
"point"))
def test_rf(self):
if _pep440.parse(mpmath.__version__) >= _pep440.Version("1.0.0"):
# no workarounds needed
mppoch = mpmath.rf
else:
def mppoch(a, m):
# deal with cases where the result in double precision
# hits exactly a non-positive integer, but the
# corresponding extended-precision mpf floats don't
if float(a + m) == int(a + m) and float(a + m) <= 0:
a = mpmath.mpf(a)
m = int(a + m) - a
return mpmath.rf(a, m)
assert_mpmath_equal(sc.poch,
mppoch,
[Arg(), Arg()],
dps=400)
def test_sinpi(self):
eps = np.finfo(float).eps
assert_mpmath_equal(_sinpi, mpmath.sinpi,
[Arg()], nan_ok=False, rtol=2*eps)
def test_sinpi_complex(self):
assert_mpmath_equal(_sinpi, mpmath.sinpi,
[ComplexArg()], nan_ok=False, rtol=2e-14)
def test_shi(self):
def shi(x):
return sc.shichi(x)[0]
assert_mpmath_equal(shi, mpmath.shi, [Arg()])
# check asymptotic series cross-over
assert_mpmath_equal(shi, mpmath.shi, [FixedArg([88 - 1e-9, 88, 88 + 1e-9])])
def test_shi_complex(self):
def shi(z):
return sc.shichi(z)[0]
# shi oscillates as Im[z] -> +- inf, so limit range
assert_mpmath_equal(shi,
mpmath.shi,
[ComplexArg(complex(-np.inf, -1e8), complex(np.inf, 1e8))],
rtol=1e-12)
def test_si(self):
def si(x):
return sc.sici(x)[0]
assert_mpmath_equal(si, mpmath.si, [Arg()])
def test_si_complex(self):
def si(z):
return sc.sici(z)[0]
# si oscillates as Re[z] -> +- inf, so limit range
assert_mpmath_equal(si,
mpmath.si,
[ComplexArg(complex(-1e8, -np.inf), complex(1e8, np.inf))],
rtol=1e-12)
def test_spence(self):
# mpmath uses a different convention for the dilogarithm
def dilog(x):
return mpmath.polylog(2, 1 - x)
# Spence has a branch cut on the negative real axis
assert_mpmath_equal(sc.spence,
exception_to_nan(dilog),
[Arg(0, np.inf)], rtol=1e-14)
def test_spence_complex(self):
def dilog(z):
return mpmath.polylog(2, 1 - z)
assert_mpmath_equal(sc.spence,
exception_to_nan(dilog),
[ComplexArg()], rtol=1e-14)
def test_spherharm(self):
def spherharm(l, m, theta, phi):
if m > l:
return np.nan
return sc.sph_harm(m, l, phi, theta)
assert_mpmath_equal(spherharm,
mpmath.spherharm,
[IntArg(0, 100), IntArg(0, 100),
Arg(a=0, b=pi), Arg(a=0, b=2*pi)],
atol=1e-8, n=6000,
dps=150)
def test_struveh(self):
assert_mpmath_equal(sc.struve,
exception_to_nan(mpmath.struveh),
[Arg(-1e4, 1e4), Arg(0, 1e4)],
rtol=5e-10)
def test_struvel(self):
def mp_struvel(v, z):
if v < 0 and z < -v and abs(v) > 1000:
# larger DPS needed for correct results
old_dps = mpmath.mp.dps
try:
mpmath.mp.dps = 300
return mpmath.struvel(v, z)
finally:
mpmath.mp.dps = old_dps
return mpmath.struvel(v, z)
assert_mpmath_equal(sc.modstruve,
exception_to_nan(mp_struvel),
[Arg(-1e4, 1e4), Arg(0, 1e4)],
rtol=5e-10,
ignore_inf_sign=True)
def test_wrightomega_real(self):
def mpmath_wrightomega_real(x):
return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
# For x < -1000 the Wright Omega function is just 0 to double
# precision, and for x > 1e21 it is just x to double
# precision.
assert_mpmath_equal(
sc.wrightomega,
mpmath_wrightomega_real,
[Arg(-1000, 1e21)],
rtol=5e-15,
atol=0,
nan_ok=False,
)
def test_wrightomega(self):
assert_mpmath_equal(sc.wrightomega,
lambda z: _mpmath_wrightomega(z, 25),
[ComplexArg()], rtol=1e-14, nan_ok=False)
def test_hurwitz_zeta(self):
assert_mpmath_equal(sc.zeta,
exception_to_nan(mpmath.zeta),
[Arg(a=1, b=1e10, inclusive_a=False),
Arg(a=0, inclusive_a=False)])
def test_riemann_zeta(self):
assert_mpmath_equal(
sc.zeta,
lambda x: mpmath.zeta(x) if x != 1 else mpmath.inf,
[Arg(-100, 100)],
nan_ok=False,
rtol=5e-13,
)
def test_zetac(self):
assert_mpmath_equal(sc.zetac,
lambda x: (mpmath.zeta(x) - 1
if x != 1 else mpmath.inf),
[Arg(-100, 100)],
nan_ok=False, dps=45, rtol=5e-13)
def test_boxcox(self):
def mp_boxcox(x, lmbda):
x = mpmath.mp.mpf(x)
lmbda = mpmath.mp.mpf(lmbda)
if lmbda == 0:
return mpmath.mp.log(x)
else:
return mpmath.mp.powm1(x, lmbda) / lmbda
assert_mpmath_equal(sc.boxcox,
exception_to_nan(mp_boxcox),
[Arg(a=0, inclusive_a=False), Arg()],
n=200,
dps=60,
rtol=1e-13)
def test_boxcox1p(self):
def mp_boxcox1p(x, lmbda):
x = mpmath.mp.mpf(x)
lmbda = mpmath.mp.mpf(lmbda)
one = mpmath.mp.mpf(1)
if lmbda == 0:
return mpmath.mp.log(one + x)
else:
return mpmath.mp.powm1(one + x, lmbda) / lmbda
assert_mpmath_equal(sc.boxcox1p,
exception_to_nan(mp_boxcox1p),
[Arg(a=-1, inclusive_a=False), Arg()],
n=200,
dps=60,
rtol=1e-13)
def test_spherical_jn(self):
def mp_spherical_jn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n), z),
exception_to_nan(mp_spherical_jn),
[IntArg(0, 200), Arg(-1e8, 1e8)],
dps=300)
def test_spherical_jn_complex(self):
def mp_spherical_jn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselj(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_jn(int(n.real), z),
exception_to_nan(mp_spherical_jn),
[IntArg(0, 200), ComplexArg()])
def test_spherical_yn(self):
def mp_spherical_yn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n), z),
exception_to_nan(mp_spherical_yn),
[IntArg(0, 200), Arg(-1e10, 1e10)],
dps=100)
def test_spherical_yn_complex(self):
def mp_spherical_yn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.bessely(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_yn(int(n.real), z),
exception_to_nan(mp_spherical_yn),
[IntArg(0, 200), ComplexArg()])
def test_spherical_in(self):
def mp_spherical_in(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n), z),
exception_to_nan(mp_spherical_in),
[IntArg(0, 200), Arg()],
dps=200, atol=10**(-278))
def test_spherical_in_complex(self):
def mp_spherical_in(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besseli(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_in(int(n.real), z),
exception_to_nan(mp_spherical_in),
[IntArg(0, 200), ComplexArg()])
def test_spherical_kn(self):
def mp_spherical_kn(n, z):
out = (mpmath.besselk(n + mpmath.mpf(1)/2, z) *
mpmath.sqrt(mpmath.pi/(2*mpmath.mpmathify(z))))
if mpmath.mpmathify(z).imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n), z),
exception_to_nan(mp_spherical_kn),
[IntArg(0, 150), Arg()],
dps=100)
@pytest.mark.xfail(run=False, reason="Accuracy issues near z = -1 inherited from kv.")
def test_spherical_kn_complex(self):
def mp_spherical_kn(n, z):
arg = mpmath.mpmathify(z)
out = (mpmath.besselk(n + mpmath.mpf(1)/2, arg) /
mpmath.sqrt(2*arg/mpmath.pi))
if arg.imag == 0:
return out.real
else:
return out
assert_mpmath_equal(lambda n, z: sc.spherical_kn(int(n.real), z),
exception_to_nan(mp_spherical_kn),
[IntArg(0, 200), ComplexArg()],
dps=200)
| 75,189
| 36.075937
| 123
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_spfun_stats.py
|
import numpy as np
from numpy.testing import (assert_array_equal,
assert_array_almost_equal_nulp, assert_almost_equal)
from pytest import raises as assert_raises
from scipy.special import gammaln, multigammaln
class TestMultiGammaLn:
def test1(self):
# A test of the identity
# Gamma_1(a) = Gamma(a)
np.random.seed(1234)
a = np.abs(np.random.randn())
assert_array_equal(multigammaln(a, 1), gammaln(a))
def test2(self):
# A test of the identity
# Gamma_2(a) = sqrt(pi) * Gamma(a) * Gamma(a - 0.5)
a = np.array([2.5, 10.0])
result = multigammaln(a, 2)
expected = np.log(np.sqrt(np.pi)) + gammaln(a) + gammaln(a - 0.5)
assert_almost_equal(result, expected)
def test_bararg(self):
assert_raises(ValueError, multigammaln, 0.5, 1.2)
def _check_multigammaln_array_result(a, d):
# Test that the shape of the array returned by multigammaln
# matches the input shape, and that all the values match
# the value computed when multigammaln is called with a scalar.
result = multigammaln(a, d)
assert_array_equal(a.shape, result.shape)
a1 = a.ravel()
result1 = result.ravel()
for i in range(a.size):
assert_array_almost_equal_nulp(result1[i], multigammaln(a1[i], d))
def test_multigammaln_array_arg():
# Check that the array returned by multigammaln has the correct
# shape and contains the correct values. The cases have arrays
# with several differnent shapes.
# The cases include a regression test for ticket #1849
# (a = np.array([2.0]), an array with a single element).
np.random.seed(1234)
cases = [
# a, d
(np.abs(np.random.randn(3, 2)) + 5, 5),
(np.abs(np.random.randn(1, 2)) + 5, 5),
(np.arange(10.0, 18.0).reshape(2, 2, 2), 3),
(np.array([2.0]), 3),
(np.float64(2.0), 3),
]
for a, d in cases:
_check_multigammaln_array_result(a, d)
| 1,998
| 31.241935
| 74
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_loggamma.py
|
import numpy as np
from numpy.testing import assert_allclose, assert_
from scipy.special._testutils import FuncData
from scipy.special import gamma, gammaln, loggamma
def test_identities1():
# test the identity exp(loggamma(z)) = gamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, gamma(z))).T
def f(z):
return np.exp(loggamma(z))
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_identities2():
# test the identity loggamma(z + 1) = log(z) + loggamma(z)
x = np.array([-99.5, -9.5, -0.5, 0.5, 9.5, 99.5])
y = x.copy()
x, y = np.meshgrid(x, y)
z = (x + 1J*y).flatten()
dataset = np.vstack((z, np.log(z) + loggamma(z))).T
def f(z):
return loggamma(z + 1)
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_complex_dispatch_realpart():
# Test that the real parts of loggamma and gammaln agree on the
# real axis.
x = np.r_[-np.logspace(10, -10), np.logspace(-10, 10)] + 0.5
dataset = np.vstack((x, gammaln(x))).T
def f(z):
z = np.array(z, dtype='complex128')
return loggamma(z).real
FuncData(f, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
def test_real_dispatch():
x = np.logspace(-10, 10) + 0.5
dataset = np.vstack((x, gammaln(x))).T
FuncData(loggamma, dataset, 0, 1, rtol=1e-14, atol=1e-14).check()
assert_(loggamma(0) == np.inf)
assert_(np.isnan(loggamma(-1)))
def test_gh_6536():
z = loggamma(complex(-3.4, +0.0))
zbar = loggamma(complex(-3.4, -0.0))
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
def test_branch_cut():
# Make sure negative zero is treated correctly
x = -np.logspace(300, -30, 100)
z = np.asarray([complex(x0, 0.0) for x0 in x])
zbar = np.asarray([complex(x0, -0.0) for x0 in x])
assert_allclose(z, zbar.conjugate(), rtol=1e-15, atol=0)
| 1,992
| 27.070423
| 69
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_spence.py
|
import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
| 1,099
| 32.333333
| 71
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_precompute_expn_asy.py
|
from numpy.testing import assert_equal
from scipy.special._testutils import check_version, MissingModule
from scipy.special._precompute.expn_asy import generate_A
try:
import sympy
from sympy import Poly
except ImportError:
sympy = MissingModule("sympy")
@check_version(sympy, "1.0")
def test_generate_A():
# Data from DLMF 8.20.5
x = sympy.symbols('x')
Astd = [Poly(1, x),
Poly(1, x),
Poly(1 - 2*x),
Poly(1 - 8*x + 6*x**2)]
Ares = generate_A(len(Astd))
for p, q in zip(Astd, Ares):
assert_equal(p, q)
| 583
| 22.36
| 65
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_pcf.py
|
"""Tests for parabolic cylinder functions.
"""
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
def test_pbwa_segfault():
# Regression test for https://github.com/scipy/scipy/issues/6208.
#
# Data generated by mpmath.
#
w = 1.02276567211316867161
wp = -0.48887053372346189882
assert_allclose(sc.pbwa(0, 0), (w, wp), rtol=1e-13, atol=0)
def test_pbwa_nan():
# Check that NaN's are returned outside of the range in which the
# implementation is accurate.
pts = [(-6, -6), (-6, 6), (6, -6), (6, 6)]
for p in pts:
assert_equal(sc.pbwa(*p), (np.nan, np.nan))
| 664
| 25.6
| 69
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_zeta.py
|
import scipy.special as sc
import numpy as np
from numpy.testing import assert_equal, assert_allclose
def test_zeta():
assert_allclose(sc.zeta(2,2), np.pi**2/6 - 1, rtol=1e-12)
def test_zetac():
# Expected values in the following were computed using Wolfram
# Alpha's `Zeta[x] - 1`
x = [-2.1, 0.8, 0.9999, 9, 50, 75]
desired = [
-0.9972705002153750,
-5.437538415895550,
-10000.42279161673,
0.002008392826082214,
8.881784210930816e-16,
2.646977960169853e-23,
]
assert_allclose(sc.zetac(x), desired, rtol=1e-12)
def test_zetac_special_cases():
assert sc.zetac(np.inf) == 0
assert np.isnan(sc.zetac(-np.inf))
assert sc.zetac(0) == -1.5
assert sc.zetac(1.0) == np.inf
assert_equal(sc.zetac([-2, -50, -100]), -1)
def test_riemann_zeta_special_cases():
assert np.isnan(sc.zeta(np.nan))
assert sc.zeta(np.inf) == 1
assert sc.zeta(0) == -0.5
# Riemann zeta is zero add negative even integers.
assert_equal(sc.zeta([-2, -4, -6, -8, -10]), 0)
assert_allclose(sc.zeta(2), np.pi**2/6, rtol=1e-12)
assert_allclose(sc.zeta(4), np.pi**4/90, rtol=1e-12)
def test_riemann_zeta_avoid_overflow():
s = -260.00000000001
desired = -5.6966307844402683127e+297 # Computed with Mpmath
assert_allclose(sc.zeta(s), desired, atol=0, rtol=5e-14)
| 1,367
| 26.36
| 66
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_spherical_bessel.py
|
#
# Tests of spherical Bessel functions.
#
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_array_almost_equal, suppress_warnings)
import pytest
from numpy import sin, cos, sinh, cosh, exp, inf, nan, r_, pi
from scipy.special import spherical_jn, spherical_yn, spherical_in, spherical_kn
from scipy.integrate import quad
class TestSphericalJn:
def test_spherical_jn_exact(self):
# https://dlmf.nist.gov/10.49.E3
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_jn(2, x),
(-1/x + 3/x**3)*sin(x) - 3/x**2*cos(x))
def test_spherical_jn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1, x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_jn(n - 1, x) + spherical_jn(n + 1,x),
(2*n + 1)/x*spherical_jn(n, x))
def test_spherical_jn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_jn(n, x), np.array([0, 0]))
def test_spherical_jn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_jn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_jn_large_arg_1(self):
# https://github.com/scipy/scipy/issues/2165
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 3350.507), -0.00029846226538040747)
def test_spherical_jn_large_arg_2(self):
# https://github.com/scipy/scipy/issues/1641
# Reference value computed using mpmath, via
# besselj(n + mpf(1)/2, z)*sqrt(pi/(2*z))
assert_allclose(spherical_jn(2, 10000), 3.0590002633029811e-05)
def test_spherical_jn_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: j0 = sin(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_jn(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalYn:
def test_spherical_yn_exact(self):
# https://dlmf.nist.gov/10.49.E5
# Note: exact expression is numerically stable only for small
# n or z >> n.
x = np.array([0.12, 1.23, 12.34, 123.45, 1234.5])
assert_allclose(spherical_yn(2, x),
(1/x - 3/x**3)*cos(x) - 3/x**2*sin(x))
def test_spherical_yn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1,x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_yn(n - 1, x) + spherical_yn(n + 1, x),
(2*n + 1)/x*spherical_yn(n, x))
def test_spherical_yn_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 6
x = np.array([-inf, inf])
assert_allclose(spherical_yn(n, x), np.array([0, 0]))
def test_spherical_yn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E3
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in multiply")
assert_allclose(spherical_yn(n, x), np.array([0, 0, inf*(1+1j)]))
def test_spherical_yn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_yn(n, x), np.full(n.shape, -inf))
def test_spherical_yn_at_zero_complex(self):
# Consistently with numpy:
# >>> -np.cos(0)/0
# -inf
# >>> -np.cos(0+0j)/(0+0j)
# (-inf + nan*j)
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_yn(n, x), np.full(n.shape, nan))
class TestSphericalJnYnCrossProduct:
def test_spherical_jn_yn_cross_product_1(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 1, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 1, x))
right = 1/x**2
assert_allclose(left, right)
def test_spherical_jn_yn_cross_product_2(self):
# https://dlmf.nist.gov/10.50.E3
n = np.array([1, 5, 8])
x = np.array([0.1, 1, 10])
left = (spherical_jn(n + 2, x) * spherical_yn(n, x) -
spherical_jn(n, x) * spherical_yn(n + 2, x))
right = (2*n + 3)/x**3
assert_allclose(left, right)
class TestSphericalIn:
def test_spherical_in_exact(self):
# https://dlmf.nist.gov/10.49.E9
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_in(2, x),
(1/x + 3/x**3)*sinh(x) - 3/x**2*cosh(x))
def test_spherical_in_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E1
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose(spherical_in(n - 1, x) - spherical_in(n + 1,x),
(2*n + 1)/x*spherical_in(n, x))
def test_spherical_in_inf_real(self):
# https://dlmf.nist.gov/10.52.E3
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_in(n, x), np.array([-inf, inf]))
def test_spherical_in_inf_complex(self):
# https://dlmf.nist.gov/10.52.E5
# Ideally, i1n(n, 1j*inf) = 0 and i1n(n, (1+1j)*inf) = (1+1j)*inf, but
# this appears impossible to achieve because C99 regards any complex
# value with at least one infinite part as a complex infinity, so
# 1j*inf cannot be distinguished from (1+1j)*inf. Therefore, nan is
# the correct return value.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_in(n, x), np.array([-inf, inf, nan]))
def test_spherical_in_at_zero(self):
# https://dlmf.nist.gov/10.52.E1
# But note that n = 0 is a special case: i0 = sinh(x)/x -> 1
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_in(n, x), np.array([1, 0, 0, 0, 0, 0]))
class TestSphericalKn:
def test_spherical_kn_exact(self):
# https://dlmf.nist.gov/10.49.E13
x = np.array([0.12, 1.23, 12.34, 123.45])
assert_allclose(spherical_kn(2, x),
pi/2*exp(-x)*(1/x + 3/x**2 + 3/x**3))
def test_spherical_kn_recurrence_real(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 0.12
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_recurrence_complex(self):
# https://dlmf.nist.gov/10.51.E4
n = np.array([1, 2, 3, 7, 12])
x = 1.1 + 1.5j
assert_allclose((-1)**(n - 1)*spherical_kn(n - 1, x) - (-1)**(n + 1)*spherical_kn(n + 1,x),
(-1)**n*(2*n + 1)/x*spherical_kn(n, x))
def test_spherical_kn_inf_real(self):
# https://dlmf.nist.gov/10.52.E6
n = 5
x = np.array([-inf, inf])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0]))
def test_spherical_kn_inf_complex(self):
# https://dlmf.nist.gov/10.52.E6
# The behavior at complex infinity depends on the sign of the real
# part: if Re(z) >= 0, then the limit is 0; if Re(z) < 0, then it's
# z*inf. This distinction cannot be captured, so we return nan.
n = 7
x = np.array([-inf + 0j, inf + 0j, inf*(1+1j)])
assert_allclose(spherical_kn(n, x), np.array([-inf, 0, nan]))
def test_spherical_kn_at_zero(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0
assert_allclose(spherical_kn(n, x), np.full(n.shape, inf))
def test_spherical_kn_at_zero_complex(self):
# https://dlmf.nist.gov/10.52.E2
n = np.array([0, 1, 2, 5, 10, 100])
x = 0 + 0j
assert_allclose(spherical_kn(n, x), np.full(n.shape, nan))
class SphericalDerivativesTestCase:
def fundamental_theorem(self, n, a, b):
integral, tolerance = quad(lambda z: self.df(n, z), a, b)
assert_allclose(integral,
self.f(n, b) - self.f(n, a),
atol=tolerance)
@pytest.mark.slow
def test_fundamental_theorem_0(self):
self.fundamental_theorem(0, 3.0, 15.0)
@pytest.mark.slow
def test_fundamental_theorem_7(self):
self.fundamental_theorem(7, 0.5, 1.2)
class TestSphericalJnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_jn(n, z)
def df(self, n, z):
return spherical_jn(n, z, derivative=True)
def test_spherical_jn_d_zero(self):
n = np.array([0, 1, 2, 3, 7, 15])
assert_allclose(spherical_jn(n, 0, derivative=True),
np.array([0, 1/3, 0, 0, 0, 0]))
class TestSphericalYnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_yn(n, z)
def df(self, n, z):
return spherical_yn(n, z, derivative=True)
class TestSphericalInDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_in(n, z)
def df(self, n, z):
return spherical_in(n, z, derivative=True)
def test_spherical_in_d_zero(self):
n = np.array([1, 2, 3, 7, 15])
assert_allclose(spherical_in(n, 0, derivative=True),
np.zeros(5))
class TestSphericalKnDerivatives(SphericalDerivativesTestCase):
def f(self, n, z):
return spherical_kn(n, z)
def df(self, n, z):
return spherical_kn(n, z, derivative=True)
class TestSphericalOld:
# These are tests from the TestSpherical class of test_basic.py,
# rewritten to use spherical_* instead of sph_* but otherwise unchanged.
def test_sph_in(self):
# This test reproduces test_basic.TestSpherical.test_sph_in.
i1n = np.empty((2,2))
x = 0.2
i1n[0][0] = spherical_in(0, x)
i1n[0][1] = spherical_in(1, x)
i1n[1][0] = spherical_in(0, x, derivative=True)
i1n[1][1] = spherical_in(1, x, derivative=True)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],np.array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_in_kn_order0(self):
x = 1.
sph_i0 = np.empty((2,))
sph_i0[0] = spherical_in(0, x)
sph_i0[1] = spherical_in(0, x, derivative=True)
sph_i0_expected = np.array([np.sinh(x)/x,
np.cosh(x)/x-np.sinh(x)/x**2])
assert_array_almost_equal(r_[sph_i0], sph_i0_expected)
sph_k0 = np.empty((2,))
sph_k0[0] = spherical_kn(0, x)
sph_k0[1] = spherical_kn(0, x, derivative=True)
sph_k0_expected = np.array([0.5*pi*exp(-x)/x,
-0.5*pi*exp(-x)*(1/x+1/x**2)])
assert_array_almost_equal(r_[sph_k0], sph_k0_expected)
def test_sph_jn(self):
s1 = np.empty((2,3))
x = 0.2
s1[0][0] = spherical_jn(0, x)
s1[0][1] = spherical_jn(1, x)
s1[0][2] = spherical_jn(2, x)
s1[1][0] = spherical_jn(0, x, derivative=True)
s1[1][1] = spherical_jn(1, x, derivative=True)
s1[1][2] = spherical_jn(2, x, derivative=True)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_kn(self):
kn = np.empty((2,3))
x = 0.2
kn[0][0] = spherical_kn(0, x)
kn[0][1] = spherical_kn(1, x)
kn[0][2] = spherical_kn(2, x)
kn[1][0] = spherical_kn(0, x, derivative=True)
kn[1][1] = spherical_kn(1, x, derivative=True)
kn[1][2] = spherical_kn(2, x, derivative=True)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = spherical_yn(2, 0.2)
sy2 = spherical_yn(0, 0.2)
assert_almost_equal(sy1,-377.52483,5) # previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sphpy = (spherical_yn(0, 0.2) - 2*spherical_yn(2, 0.2))/3
sy3 = spherical_yn(1, 0.2, derivative=True)
assert_almost_equal(sy3,sphpy,4) # compare correct derivative val. (correct =-system val).
| 14,284
| 36.592105
| 99
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_hypergeometric.py
|
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_equal
import scipy.special as sc
class TestHyperu:
def test_negative_x(self):
a, b, x = np.meshgrid(
[-1, -0.5, 0, 0.5, 1],
[-1, -0.5, 0, 0.5, 1],
np.linspace(-100, -1, 10),
)
assert np.all(np.isnan(sc.hyperu(a, b, x)))
def test_special_cases(self):
assert sc.hyperu(0, 1, 1) == 1.0
@pytest.mark.parametrize('a', [0.5, 1, np.nan])
@pytest.mark.parametrize('b', [1, 2, np.nan])
@pytest.mark.parametrize('x', [0.25, 3, np.nan])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyperu(a, b, x)) == np.any(np.isnan([a, b, x]))
class TestHyp1f1:
@pytest.mark.parametrize('a, b, x', [
(np.nan, 1, 1),
(1, np.nan, 1),
(1, 1, np.nan)
])
def test_nan_inputs(self, a, b, x):
assert np.isnan(sc.hyp1f1(a, b, x))
def test_poles(self):
assert_equal(sc.hyp1f1(1, [0, -1, -2, -3, -4], 0.5), np.inf)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 0.5, 0.5),
(1, 1, 0.5, 1.6487212707001281468),
(2, 1, 0.5, 2.4730819060501922203),
(1, 2, 0.5, 1.2974425414002562937),
(-10, 1, 0.5, -0.38937441413785204475)
])
def test_special_cases(self, a, b, x, result):
# Hit all the special case branches at the beginning of the
# function. Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(1, 1, 0.44, 1.5527072185113360455),
(-1, 1, 0.44, 0.55999999999999999778),
(100, 100, 0.89, 2.4351296512898745592),
(-100, 100, 0.89, 0.40739062490768104667),
(1.5, 100, 59.99, 3.8073513625965598107),
(-1.5, 100, 59.99, 0.25099240047125826943)
])
def test_geometric_convergence(self, a, b, x, result):
# Test the region where we are relying on the ratio of
#
# (|a| + 1) * |x| / |b|
#
# being small. Desired answers computed using Mpmath
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1e-15)
@pytest.mark.parametrize('a, b, x, result', [
(-1, 1, 1.5, -0.5),
(-10, 1, 1.5, 0.41801777430943080357),
(-25, 1, 1.5, 0.25114491646037839809),
(-50, 1, 1.5, -0.25683643975194756115),
(-80, 1, 1.5, -0.24554329325751503601),
(-150, 1, 1.5, -0.173364795515420454496),
])
def test_a_negative_integer(self, a, b, x, result):
# Desired answers computed using Mpmath.
assert_allclose(sc.hyp1f1(a, b, x), result, atol=0, rtol=1.5e-14)
@pytest.mark.parametrize('a, b, x, expected', [
(0.01, 150, -4, 0.99973683897677527773), # gh-3492
(1, 5, 0.01, 1.0020033381011970966), # gh-3593
(50, 100, 0.01, 1.0050126452421463411), # gh-3593
(1, 0.3, -1e3, -7.011932249442947651455e-04), # gh-14149
(1, 0.3, -1e4, -7.001190321418937164734e-05), # gh-14149
(9, 8.5, -350, -5.224090831922378361082e-20), # gh-17120
(9, 8.5, -355, -4.595407159813368193322e-20), # gh-17120
(75, -123.5, 15, 3.425753920814889017493e+06),
])
def test_assorted_cases(self, a, b, x, expected):
# Expected values were computed with mpmath.hyp1f1(a, b, x).
assert_allclose(sc.hyp1f1(a, b, x), expected, atol=0, rtol=1e-14)
def test_a_neg_int_and_b_equal_x(self):
# This is a case where the Boost wrapper will call hypergeometric_pFq
# instead of hypergeometric_1F1. When we use a version of Boost in
# which https://github.com/boostorg/math/issues/833 is fixed, this
# test case can probably be moved into test_assorted_cases.
# The expected value was computed with mpmath.hyp1f1(a, b, x).
a = -10.0
b = 2.5
x = 2.5
expected = 0.0365323664364104338721
computed = sc.hyp1f1(a, b, x)
assert_allclose(computed, expected, atol=0, rtol=1e-13)
@pytest.mark.parametrize('a, b, x, desired', [
(-1, -2, 2, 2),
(-1, -4, 10, 3.5),
(-2, -2, 1, 2.5)
])
def test_gh_11099(self, a, b, x, desired):
# All desired results computed using Mpmath
assert sc.hyp1f1(a, b, x) == desired
@pytest.mark.parametrize('a', [-3, -2])
def test_x_zero_a_and_b_neg_ints_and_a_ge_b(self, a):
assert sc.hyp1f1(a, -3, 0) == 1
# The "legacy edge cases" mentioned in the comments in the following
# tests refers to the behavior of hyp1f1(a, b, x) when b is a nonpositive
# integer. In some subcases, the behavior of SciPy does not match that
# of Boost (1.81+), mpmath and Mathematica (via Wolfram Alpha online).
# If the handling of these edges cases is changed to agree with those
# libraries, these test will have to be updated.
@pytest.mark.parametrize('b', [0, -1, -5])
def test_legacy_case1(self, b):
# Test results of hyp1f1(0, n, x) for n <= 0.
# This is a legacy edge case.
# Boost (versions greater than 1.80), Mathematica (via Wolfram Alpha
# online) and mpmath all return 1 in this case, but SciPy's hyp1f1
# returns inf.
assert_equal(sc.hyp1f1(0, b, [-1.5, 0, 1.5]), [np.inf, np.inf, np.inf])
def test_legacy_case2(self):
# This is a legacy edge case.
# In software such as boost (1.81+), mpmath and Mathematica,
# the value is 1.
assert sc.hyp1f1(-4, -3, 0) == np.inf
| 5,598
| 38.70922
| 79
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_bdtr.py
|
import numpy as np
import scipy.special as sc
import pytest
from numpy.testing import assert_allclose, assert_array_equal, suppress_warnings
class TestBdtr:
def test(self):
val = sc.bdtr(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.25, 0.75, 1.0])
def test_rounding(self):
double_val = sc.bdtr([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtr([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtr(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtr(-1.1, 1, 0.5)
assert np.isnan(val)
class TestBdtrc:
def test_value(self):
val = sc.bdtrc(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(val, [0.75, 0.25, 0.0])
def test_rounding(self):
double_val = sc.bdtrc([0.1, 1.1, 2.1], 2, 0.5)
int_val = sc.bdtrc([0, 1, 2], 2, 0.5)
assert_array_equal(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtrc(k, n, p)
assert np.isnan(val)
def test_domain(self):
val = sc.bdtrc(-1.1, 1, 0.5)
val2 = sc.bdtrc(2.1, 1, 0.5)
assert np.isnan(val2)
assert_allclose(val, 1.0)
def test_bdtr_bdtrc_sum_to_one(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
bdtrc_vals = sc.bdtrc([0, 1, 2], 2, 0.5)
vals = bdtr_vals + bdtrc_vals
assert_allclose(vals, [1.0, 1.0, 1.0])
class TestBdtri:
def test_value(self):
val = sc.bdtri(0, 1, 0.5)
assert_allclose(val, 0.5)
def test_sum_is_one(self):
val = sc.bdtri([0, 1], 2, 0.5)
actual = np.asarray([1 - 1/np.sqrt(2), 1/np.sqrt(2)])
assert_allclose(val, actual)
def test_rounding(self):
double_val = sc.bdtri([0.1, 1.1], 2, 0.5)
int_val = sc.bdtri([0, 1], 2, 0.5)
assert_allclose(double_val, int_val)
@pytest.mark.parametrize('k, n, p', [
(np.inf, 2, 0.5),
(1.0, np.inf, 0.5),
(1.0, 2, np.inf)
])
def test_inf(self, k, n, p):
with suppress_warnings() as sup:
sup.filter(DeprecationWarning)
val = sc.bdtri(k, n, p)
assert np.isnan(val)
@pytest.mark.parametrize('k, n, p', [
(-1.1, 1, 0.5),
(2.1, 1, 0.5)
])
def test_domain(self, k, n, p):
val = sc.bdtri(k, n, p)
assert np.isnan(val)
def test_bdtr_bdtri_roundtrip(self):
bdtr_vals = sc.bdtr([0, 1, 2], 2, 0.5)
roundtrip_vals = sc.bdtri([0, 1, 2], 2, bdtr_vals)
assert_allclose(roundtrip_vals, [0.5, 0.5, np.nan])
| 3,231
| 27.60177
| 80
|
py
|
scipy
|
scipy-main/scipy/special/tests/test_hyp2f1.py
|
"""Tests for hyp2f1 for complex values.
Author: Albert Steppi, with credit to Adam Kullberg (FormerPhycisist) for
the implementation of mp_hyp2f1 below, which modifies mpmath's hyp2f1 to
return the same branch as scipy's on the standard branch cut.
"""
import sys
import pytest
import numpy as np
from typing import NamedTuple
from numpy.testing import assert_allclose
from scipy.special import hyp2f1
from scipy.special._testutils import check_version, MissingModule
try:
import mpmath
except ImportError:
mpmath = MissingModule("mpmath")
def mp_hyp2f1(a, b, c, z):
"""Return mpmath hyp2f1 calculated on same branch as scipy hyp2f1.
For most values of a,b,c mpmath returns the x - 0j branch of hyp2f1 on the
branch cut x=(1,inf) whereas scipy's hyp2f1 calculates the x + 0j branch.
Thus, to generate the right comparison values on the branch cut, we
evaluate mpmath.hyp2f1 at x + 1e-15*j.
The exception to this occurs when c-a=-m in which case both mpmath and
scipy calculate the x + 0j branch on the branch cut. When this happens
mpmath.hyp2f1 will be evaluated at the original z point.
"""
on_branch_cut = z.real > 1.0 and abs(z.imag) < 1.0e-15
cond1 = abs(c - a - round(c - a)) < 1.0e-15 and round(c - a) <= 0
cond2 = abs(c - b - round(c - b)) < 1.0e-15 and round(c - b) <= 0
# Make sure imaginary part is *exactly* zero
if on_branch_cut:
z = z.real + 0.0j
if on_branch_cut and not (cond1 or cond2):
z_mpmath = z.real + 1.0e-15j
else:
z_mpmath = z
return complex(mpmath.hyp2f1(a, b, c, z_mpmath))
class Hyp2f1TestCase(NamedTuple):
a: float
b: float
c: float
z: complex
expected: complex
rtol: float
class TestHyp2f1:
"""Tests for hyp2f1 for complex values.
Expected values for test cases were computed using mpmath. See
`scipy.special._precompute.hyp2f1_data`. The verbose style of specifying
test cases is used for readability and to make it easier to mark individual
cases as expected to fail. Expected failures are used to highlight cases
where improvements are needed. See
`scipy.special._precompute.hyp2f1_data.make_hyp2f1_test_cases` for a
function to generate the boilerplate for the test cases.
Assertions have been added to each test to ensure that the test cases match
the situations that are intended. A final test `test_test_hyp2f1` checks
that the expected values in the test cases actually match what is computed
by mpmath. This test is marked slow even though it isn't particularly slow
so that it won't run by default on continuous integration builds.
"""
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0
)
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=-10,
z=0 + 0j,
expected=1 + 0j,
rtol=0
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=-10,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0,
c=0,
z=0.2 + 0.2j,
expected=1 + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0.2 + 0.2j,
expected=np.inf + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=0,
z=0 + 0j,
expected=np.nan + 0j,
rtol=0,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-5,
c=-10,
z=0.2 + 0.2j,
expected=(1.0495404166666666+0.05708208333333334j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=-10,
c=-10,
z=0.2 + 0.2j,
expected=(1.092966013125+0.13455014673750001j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-10,
b=-20,
c=-10,
z=0.2 + 0.2j,
expected=(-0.07712512000000005+0.12752814080000005j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1,
b=3.2,
c=-1,
z=0.2 + 0.2j,
expected=(1.6400000000000001+0.6400000000000001j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-2,
b=1.2,
c=-4,
z=1 + 0j,
expected=1.8200000000000001 + 0j,
rtol=1e-15,
),
),
]
)
def test_c_non_positive_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.5,
z=1 + 0j,
expected=1.1496439092239847 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=20.31,
z=1 + 0j,
expected=69280986.75273195 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=290.2,
b=321.5,
c=700.1,
z=1 + 0j,
expected=1.3396562400934e117 + 0j,
rtol=1e-12,
),
),
# Note that here even mpmath produces different results for
# results that should be equivalent.
pytest.param(
Hyp2f1TestCase(
a=9.2,
b=621.5,
c=700.1,
z=(1+0j),
expected=(952726652.4158565+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=621.5,
b=9.2,
c=700.1,
z=(1+0j),
expected=(952726652.4160284+0j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-101.2,
b=-400.4,
c=-172.1,
z=(1+0j),
expected=(2.2253618341394838e+37+0j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-400.4,
b=-101.2,
c=-172.1,
z=(1+0j),
expected=(2.2253618341394838e+37+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=172.5,
b=-201.3,
c=151.2,
z=(1+0j),
expected=(7.072266653650905e-135+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-201.3,
b=172.5,
c=151.2,
z=(1+0j),
expected=(7.072266653650905e-135+0j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=1.3,
z=1 + 0j,
expected=2.7899070752746906e22 + 0j,
rtol=3e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-202.6,
b=60.3,
c=1.5,
z=1 + 0j,
expected=-1.3113641413099326e-56 + 0j,
rtol=1e-12,
),
),
],
)
def test_unital_argument(self, hyp2f1_test_case):
"""Tests for case z = 1, c - a - b > 0.
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert z == 1 and c - a - b > 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=0.5,
b=0.2,
c=1.3,
z=-1 + 0j,
expected=0.9428846409614143 + 0j,
rtol=1e-15),
),
pytest.param(
Hyp2f1TestCase(
a=12.3,
b=8.0,
c=5.300000000000001,
z=-1 + 0j,
expected=-4.845809986595704e-06 + 0j,
rtol=1e-15
),
),
pytest.param(
Hyp2f1TestCase(
a=221.5,
b=90.2,
c=132.3,
z=-1 + 0j,
expected=2.0490488728377282e-42 + 0j,
rtol=1e-7,
),
),
pytest.param(
Hyp2f1TestCase(
a=-102.1,
b=-20.3,
c=-80.8,
z=-1 + 0j,
expected=45143784.46783885 + 0j,
rtol=1e-7,
),
marks=pytest.mark.xfail(
condition=sys.maxsize < 2**32,
reason="Fails on 32 bit.",
)
),
],
)
def test_special_case_z_near_minus_1(self, hyp2f1_test_case):
"""Tests for case z ~ -1, c ~ 1 + a - b
Expected answers computed using mpmath.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(1 + a - b - c) < 1e-15 and abs(z + 1) < 1e-15
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-4,
b=2.02764642551431,
c=1.0561196186065624,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0031961077109535375-0.0011313924606557173j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=-7.937789122896016,
c=-15.964218273004214,
z=(2-0.10526315789473695j),
expected=(0.005543763196412503-0.0025948879065698306j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-8,
b=8.095813935368371,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.0003054674127221263-9.261359291755414e-05j),
rtol=1e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=-4,
b=-3.956227226099288,
c=-3.9316537064827854,
z=(1.1578947368421053-0.3157894736842106j),
expected=(-0.0020809502580892937-0.0041877333232365095j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-4,
c=2.050308316530781,
z=(0.9473684210526314-0.10526315789473695j),
expected=(0.0011282435590058734+0.0002027062303465851j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-8,
c=-15.964218273004214,
z=(1.3684210526315788+0.10526315789473673j),
expected=(-9.134907719238265e-05-0.00040219233987390723j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-4,
c=4.0013768449590685,
z=(0.9473684210526314-0.10526315789473695j),
expected=(-0.000519013062087489-0.0005855883076830948j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-10000,
b=2.2,
c=93459345.3,
z=(2+2j),
expected=(0.9995292071559088-0.00047047067522659253j),
rtol=1e-12,
),
),
]
)
def test_a_b_negative_int(self, hyp2f1_test_case):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert a == int(a) and a < 0 or b == int(b) and b < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.1578947368421053-1.1578947368421053j),
expected=(0.9778506962676361+0.044083801141231616j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-3.9316537064827854,
c=1.5,
z=(0.9473684210526314-0.10526315789473695j),
expected=(4.0793167523167675-10.11694246310966j),
rtol=6e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-0.9629749245209605,
c=2.5,
z=(1.1578947368421053-0.10526315789473695j),
expected=(-2.9692999501916915+0.6394599899845594j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-0.9629749245209605,
c=-15.5,
z=(1.5789473684210522-1.1578947368421053j),
expected=(0.9493076367106102-0.04316852977183447j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.5,
c=-15.5,
z=(0.5263157894736841+0.10526315789473673j),
expected=(0.9844377175631795-0.003120587561483841j),
rtol=1e-10,
),
),
],
)
def test_a_b_neg_int_after_euler_hypergeometric_transformation(
self, hyp2f1_test_case
):
a, b, c, z, expected, rtol = hyp2f1_test_case
assert ( # Tests the test
(abs(c - a - int(c - a)) < 1e-15 and c - a < 0) or
(abs(c - b - int(c - b)) < 1e-15 and c - b < 0)
)
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.10526315789473673-0.3157894736842106j),
expected=(0.9941449585778349+0.01756335047931358j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.5263157894736841+0.5263157894736841j),
expected=(1.0388722293372104-0.09549450380041416j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=-7.93846038215665,
z=(0.10526315789473673+0.7368421052631575j),
expected=(2.1948378809826434+24.934157235172222j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.3157894736842106-0.736842105263158j),
expected=(-0.4075277891264672-0.06819344579666956j),
rtol=2e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=2.050308316530781,
c=8.031683612216888,
z=(0.7368421052631575-0.10526315789473695j),
expected=(2.833535530740603-0.6925373701408158j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=4.078873014294075,
z=(0.10526315789473673-0.3157894736842106j),
expected=(1.005347176329683-0.3580736009337313j),
rtol=5e-16,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-15.963511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9824353641135369+0.029271018868990268j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-0.9629749245209605,
c=-159.63511401609862,
z=(0.3157894736842106-0.5263157894736843j),
expected=(0.9982436200365834+0.002927268199671111j),
rtol=1e-7,
),
marks=pytest.mark.xfail(reason="Poor convergence.")
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=16.088264119063613,
c=8.031683612216888,
z=(0.5263157894736841-0.5263157894736843j),
expected=(-0.6906825165778091+0.8176575137504892j),
rtol=5e-13,
),
),
]
)
def test_region1(self, hyp2f1_test_case):
"""|z| < 0.9 and real(z) >= 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 0.9 and z.real >= 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.0561196186065624,
c=4.078873014294075,
z=(-0.3157894736842106+0.7368421052631575j),
expected=(0.7751915029081136+0.24068493258607315j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=16.088264119063613,
c=2.0397202577726152,
z=(-0.9473684210526316-0.3157894736842106j),
expected=(6.564549348474962e-07+1.6761570598334562e-06j),
rtol=5e-09,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=2.050308316530781,
c=16.056809865262608,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(0.9862043298997204-0.013293151372712681j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=8.077282662161238,
c=16.056809865262608,
z=(-0.3157894736842106-0.736842105263158j),
expected=(0.16163826638754716-0.41378530376373734j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=-0.906685989801748,
z=(-0.5263157894736843+0.3157894736842106j),
expected=(-6.256871535165936+0.13824973858225484j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=8.077282662161238,
c=-3.9924618758357022,
z=(-0.9473684210526316-0.3157894736842106j),
expected=(75.54672526086316+50.56157041797548j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=8.077282662161238,
c=-1.9631175993998025,
z=(-0.5263157894736843+0.5263157894736841j),
expected=(282.0602536306534-82.31597306936214j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-3.9316537064827854,
c=8.031683612216888,
z=(-0.5263157894736843-0.10526315789473695j),
expected=(5.179603735575851+1.4445374002099813j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-7.949900487447654,
c=1.0651378143226575,
z=(-0.3157894736842106-0.9473684210526316j),
expected=(2317.623517606141-269.51476321010324j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-1.92872979730171,
c=2.0397202577726152,
z=(-0.736842105263158-0.3157894736842106j),
expected=(29.179154096175836+22.126690357535043j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-3.9316537064827854,
c=-15.963511401609862,
z=(-0.736842105263158-0.10526315789473695j),
expected=(0.20820247892032057-0.04763956711248794j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.3157894736842106-0.5263157894736843j),
expected=(-157471.63920142158+991294.0587828817j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-7.949900487447654,
c=-7.93846038215665,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(0.30765349653210194-0.2979706363594157j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=1.0561196186065624,
c=8.031683612216888,
z=(-0.9473684210526316-0.10526315789473695j),
expected=(1.6787607400597109+0.10056620134616838j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.088264119063613,
c=4.078873014294075,
z=(-0.5263157894736843-0.736842105263158j),
expected=(7062.07842506049-12768.77955655703j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.088264119063613,
c=2.0397202577726152,
z=(-0.3157894736842106+0.7368421052631575j),
expected=(54749.216391029935-23078.144720887536j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=1.0561196186065624,
c=-0.906685989801748,
z=(-0.10526315789473695-0.10526315789473695j),
expected=(1.21521766411428-4.449385173946672j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=4.0013768449590685,
c=-1.9631175993998025,
z=(-0.736842105263158+0.5263157894736841j),
expected=(19234693144.196907+1617913967.7294445j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=1.0561196186065624,
c=-15.963511401609862,
z=(-0.5263157894736843+0.3157894736842106j),
expected=(0.9345201094534371+0.03745712558992195j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(-0.10526315789473695+0.10526315789473673j),
expected=(0.605732446296829+0.398171533680972j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-15.964218273004214,
c=2.0397202577726152,
z=(-0.10526315789473695-0.5263157894736843j),
expected=(-9.753761888305416-4.590126012666959j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-1.92872979730171,
c=2.0397202577726152,
z=(-0.10526315789473695+0.3157894736842106j),
expected=(0.45587226291120714+1.0694545265819797j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=-0.906685989801748,
z=(-0.736842105263158+0.3157894736842106j),
expected=(12.334808243233418-76.26089051819054j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=-15.963511401609862,
z=(-0.5263157894736843+0.10526315789473673j),
expected=(1.2396019687632678-0.047507973161146286j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-0.9629749245209605,
c=-0.906685989801748,
z=(-0.3157894736842106-0.5263157894736843j),
expected=(97.7889554372208-18.999754543400016j),
rtol=5e-13,
),
),
]
)
def test_region2(self, hyp2f1_test_case):
"""|z| < 1 and real(z) < 0."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert abs(z) < 1 and z.real < 0 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=4.25,
c=2.5,
z=(0.4931034482758623-0.7965517241379311j),
expected=(38.41207903409937-30.510151276075792j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.0,
b=16.087593263474208,
c=16.088264119063613,
z=(0.5689655172413794-0.7965517241379311j),
expected=(-0.6667857912761286-1.0206224321443573j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.0,
b=1.0272592605282642,
c=-7.949900487447654,
z=(0.4931034482758623-0.7965517241379311j),
expected=(1679024.1647997478-2748129.775857212j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=16.0,
c=-7.949900487447654,
z=(0.4931034482758623-0.7965517241379311j),
expected=(424747226301.16986-1245539049327.2856j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-15.964218273004214,
c=4.0,
z=(0.4931034482758623-0.7965517241379311j),
expected=(-0.0057826199201757595+0.026359861999025885j),
rtol=5e-06,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.5689655172413794-0.7965517241379311j),
expected=(0.4671901063492606+0.7769632229834897j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.0,
b=-3.956227226099288,
c=-7.949900487447654,
z=(0.4931034482758623+0.7965517241379312j),
expected=(0.9422283708145973+1.3476905754773343j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0,
b=-15.980848054962111,
c=-15.964218273004214,
z=(0.4931034482758623-0.7965517241379311j),
expected=(0.4168719497319604-0.9770953555235625j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=16.088264119063613,
c=2.5,
z=(0.5689655172413794+0.7965517241379312j),
expected=(1.279096377550619-2.173827694297929j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=4.0013768449590685,
c=2.0397202577726152,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-2.071520656161738-0.7846098268395909j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=8.0,
c=-0.9629749245209605,
z=(0.5689655172413794-0.7965517241379311j),
expected=(-7.740015495862889+3.386766435696699j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.088264119063613,
c=-7.93846038215665,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-6318.553685853241-7133.416085202879j),
rtol=1e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-3.9316537064827854,
c=16.056809865262608,
z=(0.5689655172413794+0.7965517241379312j),
expected=(-0.8854577905547399+8.135089099967278j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-0.9629749245209605,
c=4.078873014294075,
z=(0.4931034482758623+0.7965517241379312j),
expected=(1.224291301521487+0.36014711766402485j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-0.75,
c=-1.5,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-1.5765685855028473-3.9399766961046323j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=-1.92872979730171,
c=-7.93846038215665,
z=(0.5689655172413794-0.7965517241379311j),
expected=(56.794588688231194+4.556286783533971j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=4.5,
c=2.050308316530781,
z=(0.5689655172413794+0.7965517241379312j),
expected=(-4.251456563455306+6.737837111569671j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=8.5,
c=-1.92872979730171,
z=(0.4931034482758623-0.7965517241379311j),
expected=(2177143.9156599627-3313617.2748088865j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-1.5,
c=4.0013768449590685,
z=(0.4931034482758623-0.7965517241379311j),
expected=(0.45563554481603946+0.6212000158060831j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.5,
c=-15.964218273004214,
z=(0.4931034482758623+0.7965517241379312j),
expected=(61.03201617828073-37.185626416756214j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=16.5,
c=4.0013768449590685,
z=(0.4931034482758623+0.7965517241379312j),
expected=(-33143.425963520735+20790.608514722644j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=4.5,
c=-0.9629749245209605,
z=(0.5689655172413794+0.7965517241379312j),
expected=(30.778600270824423-26.65160354466787j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-3.5,
c=16.088264119063613,
z=(0.5689655172413794-0.7965517241379311j),
expected=(1.0629792615560487-0.08308454486044772j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=-7.5,
c=-0.9629749245209605,
z=(0.4931034482758623-0.7965517241379311j),
expected=(17431.571802591767+3553.7129767034507j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.25,
b=8.25,
c=16.5,
z=(0.11379310344827598+0.9482758620689657j),
expected=(0.4468600750211926+0.7313214934036885j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=16.25,
c=4.5,
z=(0.3413793103448277+0.8724137931034486j),
expected=(-3.905704438293991+3.693347860329299j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=4.25,
c=-0.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-40.31777941834244-89.89852492432011j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=8.0,
c=-15.964218273004214,
z=(0.11379310344827598-0.9482758620689655j),
expected=(52584.347773055284-109197.86244309516j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-15.964218273004214,
c=16.056809865262608,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-1.187733570412592-1.5147865053584582j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.9316537064827854,
c=1.0651378143226575,
z=(0.26551724137931054+0.9482758620689657j),
expected=(13.077494677898947+35.071599628224966j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.5,
c=-3.5,
z=(0.26551724137931054+0.8724137931034486j),
expected=(-0.5359656237994614-0.2344483936591811j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.25,
b=-3.75,
c=-1.5,
z=(0.26551724137931054+0.9482758620689657j),
expected=(1204.8114871663133+64.41022826840198j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.0,
c=4.0013768449590685,
z=(0.03793103448275881-0.9482758620689655j),
expected=(-9.85268872413994+7.011107558429154j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=16.0,
c=4.0013768449590685,
z=(0.3413793103448277-0.8724137931034484j),
expected=(528.5522951158454-1412.21630264791j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=1.0561196186065624,
c=-7.5,
z=(0.4172413793103451+0.8724137931034486j),
expected=(133306.45260685298+256510.7045225382j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=8.077282662161238,
c=-15.963511401609862,
z=(0.3413793103448277-0.8724137931034484j),
expected=(-0.998555715276967+2.774198742229889j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.75,
b=-0.75,
c=1.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(2.072445019723025-2.9793504811373515j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.92872979730171,
c=1.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-41.87581944176649-32.52980303527139j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-15.75,
c=-0.5,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-3729.6214864209774-30627.510509112635j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-0.906685989801748,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-131615.07820609974+145596.13384245415j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=16.5,
c=16.088264119063613,
z=(0.26551724137931054+0.8724137931034486j),
expected=(0.18981844071070744+0.7855036242583742j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.5,
b=8.5,
c=-3.9316537064827854,
z=(0.11379310344827598-0.9482758620689655j),
expected=(110224529.2376068+128287212.04290268j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-7.5,
c=4.0013768449590685,
z=(0.3413793103448277-0.8724137931034484j),
expected=(0.2722302180888523-0.21790187837266162j),
rtol=1e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.5,
c=-15.964218273004214,
z=(0.11379310344827598-0.9482758620689655j),
expected=(-2.8252338010989035+2.430661949756161j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=16.5,
c=4.0013768449590685,
z=(0.03793103448275881+0.9482758620689657j),
expected=(-20.604894257647945+74.5109432558078j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=8.5,
c=-0.9629749245209605,
z=(0.3413793103448277+0.8724137931034486j),
expected=(-2764422.521269463-3965966.9965808876j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.5,
b=-0.5,
c=1.0561196186065624,
z=(0.26551724137931054+0.9482758620689657j),
expected=(1.2262338560994905+0.6545051266925549j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.5,
b=-15.5,
c=-7.949900487447654,
z=(0.4172413793103451-0.8724137931034484j),
expected=(-2258.1590330318213+8860.193389158803j),
rtol=1e-10,
),
),
]
)
def test_region4(self, hyp2f1_test_case):
"""0.9 <= |z| <= 1 and |1 - z| >= 1.
This region is unhandled by of the standard transformations and
needs special care.
"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9 # Tests the test
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=16.088264119063613,
c=8.5,
z=(0.6448275862068968+0.8724137931034486j),
expected=(0.018601324701770394-0.07618420586062377j),
rtol=5e-08,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.25,
b=4.25,
c=4.5,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-1.391549471425551-0.118036604903893j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=2.050308316530781,
c=-1.9631175993998025,
z=(0.6448275862068968+0.8724137931034486j),
expected=(-2309.178768155151-1932.7247727595172j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=1.0,
c=-15.964218273004214,
z=(0.6448275862068968+0.8724137931034486j),
expected=(85592537010.05054-8061416766688.324j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-0.5,
c=1.5,
z=(0.6448275862068968+0.8724137931034486j),
expected=(1.2334498208515172-2.1639498536219732j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-15.964218273004214,
c=4.0,
z=(0.6448275862068968+0.8724137931034486j),
expected=(102266.35398605966-44976.97828737755j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=4.0,
b=-3.956227226099288,
c=-15.964218273004214,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-2.9590030930007236-4.190770764773225j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-15.5,
c=-7.5,
z=(0.5689655172413794-0.8724137931034484j),
expected=(-112554838.92074208+174941462.9202412j),
rtol=5e-05,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=2.050308316530781,
c=1.0,
z=(0.6448275862068968-0.8724137931034484j),
expected=(3.7519882374080145+7.360753798667486j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=2.050308316530781,
c=4.0,
z=(0.6448275862068968-0.8724137931034484j),
expected=(0.000181132943964693+0.07742903103815582j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=4.0013768449590685,
c=-1.9631175993998025,
z=(0.5689655172413794+0.8724137931034486j),
expected=(386338.760913596-386166.51762171905j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.980848054962111,
b=8.0,
c=-1.92872979730171,
z=(0.6448275862068968+0.8724137931034486j),
expected=(1348667126.3444858-2375132427.158893j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=-0.9629749245209605,
c=4.5,
z=(0.5689655172413794+0.8724137931034486j),
expected=(1.428353429538678+0.6472718120804372j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.5689655172413794-0.8724137931034484j),
expected=(3.1439267526119643-3.145305240375117j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=-15.964218273004214,
c=-7.93846038215665,
z=(0.6448275862068968-0.8724137931034484j),
expected=(75.27467675681773+144.0946946292215j),
rtol=1e-07,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.75,
b=-7.75,
c=-7.5,
z=(0.5689655172413794+0.8724137931034486j),
expected=(-0.3699450626264222+0.8732812475910993j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=16.5,
c=1.0561196186065624,
z=(0.5689655172413794-0.8724137931034484j),
expected=(5.5361025821300665-2.4709693474656285j),
rtol=5e-09,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=8.5,
c=-3.9316537064827854,
z=(0.6448275862068968-0.8724137931034484j),
expected=(-782805.6699207705-537192.581278909j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=-15.5,
c=1.0561196186065624,
z=(0.6448275862068968+0.8724137931034486j),
expected=(12.345113400639693-14.993248992902007j),
rtol=0.0005,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=-0.5,
c=-15.964218273004214,
z=(0.6448275862068968+0.8724137931034486j),
expected=(23.698109392667842+97.15002033534108j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=16.5,
c=4.0013768449590685,
z=(0.6448275862068968-0.8724137931034484j),
expected=(1115.2978631811834+915.9212658718577j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=16.5,
c=-0.9629749245209605,
z=(0.6448275862068968+0.8724137931034486j),
expected=(642077722221.6489+535274495398.21027j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=-3.5,
c=4.0013768449590685,
z=(0.5689655172413794+0.8724137931034486j),
expected=(-5.689219222945697+16.877463062787143j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.5,
c=-0.9629749245209605,
z=(0.5689655172413794-0.8724137931034484j),
expected=(-44.32070290703576+1026.9127058617403j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=2.25,
c=4.5,
z=(0.11379310344827598-1.024137931034483j),
expected=(-0.021965227124574663+0.009908300237809064j),
rtol=1e-3,
),
marks=pytest.mark.xfail(
reason="Unhandled parameters."
)
),
pytest.param(
Hyp2f1TestCase(
a=2.02764642551431,
b=1.5,
c=16.5,
z=(0.26551724137931054+1.024137931034483j),
expected=(1.0046072901244183+0.19945500134119992j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=1.0,
c=-3.9316537064827854,
z=(0.3413793103448277+0.9482758620689657j),
expected=(21022.30133421465+49175.98317370489j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=16.088264119063613,
c=-1.9631175993998025,
z=(0.4172413793103451-0.9482758620689655j),
expected=(-7024239.358547302+2481375.02681063j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-15.75,
c=1.5,
z=(0.18965517241379315+1.024137931034483j),
expected=(92371704.94848-403546832.548352j),
rtol=5e-06,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.5,
b=-7.949900487447654,
c=8.5,
z=(0.26551724137931054-1.024137931034483j),
expected=(1.9335109845308265+5.986542524829654j),
rtol=5e-10,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-1.92872979730171,
c=-7.93846038215665,
z=(0.4931034482758623+0.8724137931034486j),
expected=(-122.52639696039328-59.72428067512221j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.25,
b=-1.75,
c=-1.5,
z=(0.4931034482758623+0.9482758620689657j),
expected=(-90.40642053579428+50.50649180047921j),
rtol=5e-08,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.5,
b=8.077282662161238,
c=16.5,
z=(0.4931034482758623+0.9482758620689657j),
expected=(-0.2155745818150323-0.564628986876639j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=1.0561196186065624,
c=8.031683612216888,
z=(0.4172413793103451-0.9482758620689655j),
expected=(0.9503140488280465+0.11574960074292677j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.75,
b=2.25,
c=-15.5,
z=(0.4172413793103451+0.9482758620689657j),
expected=(0.9285862488442175+0.8203699266719692j),
rtol=5e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.75,
b=4.25,
c=-15.5,
z=(0.3413793103448277-0.9482758620689655j),
expected=(-1.0509834850116921-1.1145522325486075j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-0.9629749245209605,
c=2.0397202577726152,
z=(0.4931034482758623-0.9482758620689655j),
expected=(2.88119116536769-3.4249933450696806j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-15.964218273004214,
c=16.5,
z=(0.18965517241379315+1.024137931034483j),
expected=(199.65868451496038+347.79384207302877j),
rtol=1e-13,
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.75,
b=-15.75,
c=-3.5,
z=(0.4931034482758623-0.8724137931034484j),
expected=(-208138312553.07013+58631611809.026955j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-15.5,
c=-7.5,
z=(0.3413793103448277+0.9482758620689657j),
expected=(-23032.90519856288-18256.94050457296j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=1.5,
c=1.0561196186065624,
z=(0.4931034482758623-0.8724137931034484j),
expected=(1.507342459587056+1.2332023580148403j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=2.5,
b=4.5,
c=-3.9316537064827854,
z=(0.4172413793103451+0.9482758620689657j),
expected=(7044.766127108853-40210.365567285575j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.5,
b=-1.5,
c=1.0561196186065624,
z=(0.03793103448275881+1.024137931034483j),
expected=(0.2725347741628333-2.247314875514784j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.5,
b=-1.5,
c=-7.949900487447654,
z=(0.26551724137931054+1.024137931034483j),
expected=(-11.250200011017546+12.597393659160472j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=8.5,
c=16.088264119063613,
z=(0.26551724137931054+1.024137931034483j),
expected=(-0.18515160890991517+0.7959014164484782j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.5,
b=16.5,
c=-3.9316537064827854,
z=(0.3413793103448277-1.024137931034483j),
expected=(998246378.8556538+1112032928.103645j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.5,
b=-3.5,
c=2.050308316530781,
z=(0.03793103448275881+1.024137931034483j),
expected=(0.5527670397711952+2.697662715303637j),
rtol=1.2e-15, # rtol bumped from 1e-15 in gh18414
),
),
pytest.param(
Hyp2f1TestCase(
a=-15.5,
b=-1.5,
c=-0.9629749245209605,
z=(0.4931034482758623-0.8724137931034484j),
expected=(55.396931662136886+968.467463806326j),
rtol=5e-14,
),
),
]
)
def test_region5(self, hyp2f1_test_case):
"""1 < |z| < 1.1 and |1 - z| >= 0.9 and real(z) >= 0"""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.parametrize(
"hyp2f1_test_case",
[
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=4.0013768449590685,
c=4.078873014294075,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(-0.0018093573941378783+0.003481887377423739j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=2.050308316530781,
c=1.0651378143226575,
z=(-0.736842105263158-0.736842105263158j),
expected=(-0.00023401243818780545-1.7983496305603562e-05j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=8.077282662161238,
c=4.078873014294075,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(0.22359773002226846-0.24092487123993353j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=2.050308316530781,
c=-15.963511401609862,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(1.191573745740011+0.14347394589721466j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=4.0013768449590685,
c=-15.963511401609862,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(31.822620756901784-66.09094396747611j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=8.077282662161238,
c=-7.93846038215665,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(207.16750179245952+34.80478274924269j),
rtol=5e-12,
),
),
pytest.param(
Hyp2f1TestCase(
a=8.095813935368371,
b=-7.949900487447654,
c=8.031683612216888,
z=(-0.736842105263158+0.7368421052631575j),
expected=(-159.62429364277145+9.154224290644898j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-1.92872979730171,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1.121122351247184-0.07170260470126685j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=16.087593263474208,
b=-0.9629749245209605,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1.9040596681316053-0.4951799449960107j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-1.92872979730171,
c=-0.906685989801748,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-14.496623497780739-21.897524523299875j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=4.080187217753502,
b=-3.9316537064827854,
c=-3.9924618758357022,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(36.33473466026878+253.88728442029577j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=1.0272592605282642,
b=-15.964218273004214,
c=-0.906685989801748,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(1505052.5653144997-50820766.81043443j),
rtol=1e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=4.0013768449590685,
c=1.0651378143226575,
z=(-0.5263157894736843+0.9473684210526314j),
expected=(-127.79407519260877-28.69899444941112j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=8.077282662161238,
c=16.056809865262608,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(2.0623331933754976+0.741234463565458j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=8.077282662161238,
c=2.0397202577726152,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(30.729193458862525-292.5700835046965j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=1.0561196186065624,
c=-1.9631175993998025,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(1.1285917906203495-0.735264575450189j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=1.0561196186065624,
c=-3.9924618758357022,
z=(-0.736842105263158+0.7368421052631575j),
expected=(0.6356474446678052-0.02429663008952248j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-1.9214641416286231,
b=16.088264119063613,
c=-7.93846038215665,
z=(-0.736842105263158+0.7368421052631575j),
expected=(0.4718880510273174+0.655083067736377j),
rtol=1e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-7.937789122896016,
b=-3.9316537064827854,
c=16.056809865262608,
z=(-0.9473684210526316+0.5263157894736841j),
expected=(-0.14681550942352714+0.16092206364265146j),
rtol=5e-11,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-15.964218273004214,
c=1.0651378143226575,
z=(-0.5263157894736843+0.9473684210526314j),
expected=(-6.436835190526225+22.883156700606182j),
rtol=5e-14,
),
),
pytest.param(
Hyp2f1TestCase(
a=-0.9220024191881196,
b=-7.949900487447654,
c=4.078873014294075,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-0.7505682955068583-1.1026583264249945j),
rtol=1e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-3.9316537064827854,
c=-7.93846038215665,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(3.6247814989198166+2.596041360148318j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.5263157894736843-0.9473684210526316j),
expected=(-59537.65287927933-669074.4342539902j),
rtol=5e-15,
),
),
pytest.param(
Hyp2f1TestCase(
a=-3.956227226099288,
b=-15.964218273004214,
c=-1.9631175993998025,
z=(-0.9473684210526316-0.5263157894736843j),
expected=(-433084.9970266166+431088.393918521j),
rtol=5e-14,
),
),
]
)
def test_region6(self, hyp2f1_test_case):
"""|z| > 1 but not in region 5."""
a, b, c, z, expected, rtol = hyp2f1_test_case
assert (
abs(z) > 1 and
not (1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0)
)
assert_allclose(hyp2f1(a, b, c, z), expected, rtol=rtol)
@pytest.mark.slow
@check_version(mpmath, "1.0.0")
def test_test_hyp2f1(self):
"""Test that expected values match what is computed by mpmath.
This gathers the parameters for the test cases out of the pytest marks.
The parameters are a, b, c, z, expected, rtol, where expected should
be the value of hyp2f1(a, b, c, z) computed with mpmath. The test
recomputes hyp2f1(a, b, c, z) using mpmath and verifies that expected
actually is the correct value. This allows the data for the tests to
live within the test code instead of an external datafile, while
avoiding having to compute the results with mpmath during the test,
except for when slow tests are being run.
"""
test_methods = [
test_method for test_method in dir(self)
if test_method.startswith('test') and
# Filter properties and attributes (futureproofing).
callable(getattr(self, test_method)) and
# Filter out this test
test_method != 'test_test_hyp2f1'
]
for test_method in test_methods:
params = self._get_test_parameters(getattr(self, test_method))
for a, b, c, z, expected, _ in params:
assert_allclose(mp_hyp2f1(a, b, c, z), expected, rtol=2.25e-16)
def _get_test_parameters(self, test_method):
"""Get pytest.mark parameters for a test in this class."""
return [
case.values[0] for mark in test_method.pytestmark
if mark.name == 'parametrize'
for case in mark.args[1]
]
| 78,547
| 35.014672
| 79
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/gammainc_asy.py
|
"""
Precompute coefficients of Temme's asymptotic expansion for gammainc.
This takes about 8 hours to run on a 2.3 GHz Macbook Pro with 4GB ram.
Sources:
[1] NIST, "Digital Library of Mathematical Functions",
https://dlmf.nist.gov/
"""
import os
from scipy.special._precompute.utils import lagrange_inversion
try:
import mpmath as mp
except ImportError:
pass
def compute_a(n):
"""a_k from DLMF 5.11.6"""
a = [mp.sqrt(2)/2]
for k in range(1, n):
ak = a[-1]/k
for j in range(1, len(a)):
ak -= a[j]*a[-j]/(j + 1)
ak /= a[0]*(1 + mp.mpf(1)/(k + 1))
a.append(ak)
return a
def compute_g(n):
"""g_k from DLMF 5.11.3/5.11.5"""
a = compute_a(2*n)
g = [mp.sqrt(2)*mp.rf(0.5, k)*a[2*k] for k in range(n)]
return g
def eta(lam):
"""Function from DLMF 8.12.1 shifted to be centered at 0."""
if lam > 0:
return mp.sqrt(2*(lam - mp.log(lam + 1)))
elif lam < 0:
return -mp.sqrt(2*(lam - mp.log(lam + 1)))
else:
return 0
def compute_alpha(n):
"""alpha_n from DLMF 8.12.13"""
coeffs = mp.taylor(eta, 0, n - 1)
return lagrange_inversion(coeffs)
def compute_d(K, N):
"""d_{k, n} from DLMF 8.12.12"""
M = N + 2*K
d0 = [-mp.mpf(1)/3]
alpha = compute_alpha(M + 2)
for n in range(1, M):
d0.append((n + 2)*alpha[n+2])
d = [d0]
g = compute_g(K)
for k in range(1, K):
dk = []
for n in range(M - 2*k):
dk.append((-1)**k*g[k]*d[0][n] + (n + 2)*d[k-1][n+2])
d.append(dk)
for k in range(K):
d[k] = d[k][:N]
return d
header = \
r"""/* This file was automatically generated by _precomp/gammainc.py.
* Do not edit it manually!
*/
#ifndef IGAM_H
#define IGAM_H
#define K {}
#define N {}
static const double d[K][N] =
{{"""
footer = \
r"""
#endif
"""
def main():
print(__doc__)
K = 25
N = 25
with mp.workdps(50):
d = compute_d(K, N)
fn = os.path.join(os.path.dirname(__file__), '..', 'cephes', 'igam.h')
with open(fn + '.new', 'w') as f:
f.write(header.format(K, N))
for k, row in enumerate(d):
row = [mp.nstr(x, 17, min_fixed=0, max_fixed=0) for x in row]
f.write('{')
f.write(", ".join(row))
if k < K - 1:
f.write('},\n')
else:
f.write('}};\n')
f.write(footer)
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| 2,502
| 20.393162
| 74
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/loggamma.py
|
"""Precompute series coefficients for log-Gamma."""
try:
import mpmath
except ImportError:
pass
def stirling_series(N):
with mpmath.workdps(100):
coeffs = [mpmath.bernoulli(2*n)/(2*n*(2*n - 1))
for n in range(1, N + 1)]
return coeffs
def taylor_series_at_1(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-mpmath.euler)
for n in range(2, N + 1):
coeffs.append((-1)**n*mpmath.zeta(n)/n)
return coeffs
def main():
print(__doc__)
print()
stirling_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in stirling_series(8)[::-1]]
taylor_coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in taylor_series_at_1(23)[::-1]]
print("Stirling series coefficients")
print("----------------------------")
print("\n".join(stirling_coeffs))
print()
print("Taylor series coefficients")
print("--------------------------")
print("\n".join(taylor_coeffs))
print()
if __name__ == '__main__':
main()
| 1,094
| 23.886364
| 67
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/wright_bessel.py
|
"""Precompute coefficients of several series expansions
of Wright's generalized Bessel function Phi(a, b, x).
See https://dlmf.nist.gov/10.46.E1 with rho=a, beta=b, z=x.
"""
from argparse import ArgumentParser, RawTextHelpFormatter
import numpy as np
from scipy.integrate import quad
from scipy.optimize import minimize_scalar, curve_fit
from time import time
try:
import sympy
from sympy import EulerGamma, Rational, S, Sum, \
factorial, gamma, gammasimp, pi, polygamma, symbols, zeta
from sympy.polys.polyfuncs import horner
except ImportError:
pass
def series_small_a():
"""Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.
"""
order = 5
a, b, x, k = symbols("a b x k")
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas)
# Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i])
expression = Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
expression = gamma(b)/sympy.exp(x) * expression
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(horner((term/x_part).simplify()))
s = "Tylor series expansion of Phi(a, b, x) in a=0 up to order 5.\n"
s += "Phi(a, b, x) = exp(x)/gamma(b) * sum(A[i] * X[i] * B[i], i=0..5)\n"
for name, c in zip(['A', 'X', 'B'], [A, X, B]):
for i in range(len(c)):
s += f"\n{name}[{i}] = " + str(c[i])
return s
# expansion of digamma
def dg_series(z, n):
"""Symbolic expansion of digamma(z) in z=0 to order n.
See https://dlmf.nist.gov/5.7.E4 and with https://dlmf.nist.gov/5.5.E2
"""
k = symbols("k")
return -1/z - EulerGamma + \
sympy.summation((-1)**k * zeta(k) * z**(k-1), (k, 2, n+1))
def pg_series(k, z, n):
"""Symbolic expansion of polygamma(k, z) in z=0 to order n."""
return sympy.diff(dg_series(z, n+k), z, k)
def series_small_a_small_b():
"""Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5.
Be aware of cancellation of poles in b=0 of digamma(b)/Gamma(b) and
polygamma functions.
digamma(b)/Gamma(b) = -1 - 2*M_EG*b + O(b^2)
digamma(b)^2/Gamma(b) = 1/b + 3*M_EG + b*(-5/12*PI^2+7/2*M_EG^2) + O(b^2)
polygamma(1, b)/Gamma(b) = 1/b + M_EG + b*(1/12*PI^2 + 1/2*M_EG^2) + O(b^2)
and so on.
"""
order = 5
a, b, x, k = symbols("a b x k")
M_PI, M_EG, M_Z3 = symbols("M_PI M_EG M_Z3")
c_subs = {pi: M_PI, EulerGamma: M_EG, zeta(3): M_Z3}
A = [] # terms with a
X = [] # terms with x
B = [] # terms with b (polygammas expanded)
C = [] # terms that generate B
# Phi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i])
# B[0] = 1
# B[k] = sum(C[k] * b**k/k!, k=0..)
# Note: C[k] can be obtained from a series expansion of 1/gamma(b).
expression = gamma(b)/sympy.exp(x) * \
Sum(x**k/factorial(k)/gamma(a*k+b), (k, 0, S.Infinity))
# nth term of taylor series in a=0: a^n/n! * (d^n Phi(a, b, x)/da^n at a=0)
for n in range(0, order+1):
term = expression.diff(a, n).subs(a, 0).simplify().doit()
# set the whole bracket involving polygammas to 1
x_part = (term.subs(polygamma(0, b), 1)
.replace(polygamma, lambda *args: 0))
# sign convetion: x part always positive
x_part *= (-1)**n
# expansion of polygamma part with 1/gamma(b)
pg_part = term/x_part/gamma(b)
if n >= 1:
# Note: highest term is digamma^n
pg_part = pg_part.replace(polygamma,
lambda k, x: pg_series(k, x, order+1+n))
pg_part = (pg_part.series(b, 0, n=order+1-n)
.removeO()
.subs(polygamma(2, 1), -2*zeta(3))
.simplify()
)
A.append(a**n/factorial(n))
X.append(horner(x_part))
B.append(pg_part)
# Calculate C and put in the k!
C = sympy.Poly(B[1].subs(c_subs), b).coeffs()
C.reverse()
for i in range(len(C)):
C[i] = (C[i] * factorial(i)).simplify()
s = "Tylor series expansion of Phi(a, b, x) in a=0 and b=0 up to order 5."
s += "\nPhi(a, b, x) = exp(x) * sum(A[i] * X[i] * B[i], i=0..5)\n"
s += "B[0] = 1\n"
s += "B[i] = sum(C[k+i-1] * b**k/k!, k=0..)\n"
s += "\nM_PI = pi"
s += "\nM_EG = EulerGamma"
s += "\nM_Z3 = zeta(3)"
for name, c in zip(['A', 'X'], [A, X]):
for i in range(len(c)):
s += f"\n{name}[{i}] = "
s += str(c[i])
# For C, do also compute the values numerically
for i in range(len(C)):
s += f"\n# C[{i}] = "
s += str(C[i])
s += f"\nC[{i}] = "
s += str(C[i].subs({M_EG: EulerGamma, M_PI: pi, M_Z3: zeta(3)})
.evalf(17))
# Does B have the assumed structure?
s += "\n\nTest if B[i] does have the assumed structure."
s += "\nC[i] are derived from B[1] allone."
s += "\nTest B[2] == C[1] + b*C[2] + b^2/2*C[3] + b^3/6*C[4] + .."
test = sum([b**k/factorial(k) * C[k+1] for k in range(order-1)])
test = (test - B[2].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
s += "\nTest B[3] == C[2] + b*C[3] + b^2/2*C[4] + .."
test = sum([b**k/factorial(k) * C[k+2] for k in range(order-2)])
test = (test - B[3].subs(c_subs)).simplify()
s += f"\ntest successful = {test==S(0)}"
return s
def asymptotic_series():
"""Asymptotic expansion for large x.
Phi(a, b, x) ~ Z^(1/2-b) * exp((1+a)/a * Z) * sum_k (-1)^k * C_k / Z^k
Z = (a*x)^(1/(1+a))
Wright (1935) lists the coefficients C_0 and C_1 (he calls them a_0 and
a_1). With slightly different notation, Paris (2017) lists coefficients
c_k up to order k=3.
Paris (2017) uses ZP = (1+a)/a * Z (ZP = Z of Paris) and
C_k = C_0 * (-a/(1+a))^k * c_k
"""
order = 8
class g(sympy.Function):
"""Helper function g according to Wright (1935)
g(n, rho, v) = (1 + (rho+2)/3 * v + (rho+2)*(rho+3)/(2*3) * v^2 + ...)
Note: Wright (1935) uses square root of above definition.
"""
nargs = 3
@classmethod
def eval(cls, n, rho, v):
if not n >= 0:
raise ValueError("must have n >= 0")
elif n == 0:
return 1
else:
return g(n-1, rho, v) \
+ gammasimp(gamma(rho+2+n)/gamma(rho+2)) \
/ gammasimp(gamma(3+n)/gamma(3))*v**n
class coef_C(sympy.Function):
"""Calculate coefficients C_m for integer m.
C_m is the coefficient of v^(2*m) in the Taylor expansion in v=0 of
Gamma(m+1/2)/(2*pi) * (2/(rho+1))^(m+1/2) * (1-v)^(-b)
* g(rho, v)^(-m-1/2)
"""
nargs = 3
@classmethod
def eval(cls, m, rho, beta):
if not m >= 0:
raise ValueError("must have m >= 0")
v = symbols("v")
expression = (1-v)**(-beta) * g(2*m, rho, v)**(-m-Rational(1, 2))
res = expression.diff(v, 2*m).subs(v, 0) / factorial(2*m)
res = res * (gamma(m + Rational(1, 2)) / (2*pi)
* (2/(rho+1))**(m + Rational(1, 2)))
return res
# in order to have nice ordering/sorting of expressions, we set a = xa.
xa, b, xap1 = symbols("xa b xap1")
C0 = coef_C(0, xa, b)
# a1 = a(1, rho, beta)
s = "Asymptotic expansion for large x\n"
s += "Phi(a, b, x) = Z**(1/2-b) * exp((1+a)/a * Z) \n"
s += " * sum((-1)**k * C[k]/Z**k, k=0..6)\n\n"
s += "Z = pow(a * x, 1/(1+a))\n"
s += "A[k] = pow(a, k)\n"
s += "B[k] = pow(b, k)\n"
s += "Ap1[k] = pow(1+a, k)\n\n"
s += "C[0] = 1./sqrt(2. * M_PI * Ap1[1])\n"
for i in range(1, order+1):
expr = (coef_C(i, xa, b) / (C0/(1+xa)**i)).simplify()
factor = [x.denominator() for x in sympy.Poly(expr).coeffs()]
factor = sympy.lcm(factor)
expr = (expr * factor).simplify().collect(b, sympy.factor)
expr = expr.xreplace({xa+1: xap1})
s += f"C[{i}] = C[0] / ({factor} * Ap1[{i}])\n"
s += f"C[{i}] *= {str(expr)}\n\n"
import re
re_a = re.compile(r'xa\*\*(\d+)')
s = re_a.sub(r'A[\1]', s)
re_b = re.compile(r'b\*\*(\d+)')
s = re_b.sub(r'B[\1]', s)
s = s.replace('xap1', 'Ap1[1]')
s = s.replace('xa', 'a')
# max integer = 2^31-1 = 2,147,483,647. Solution: Put a point after 10
# or more digits.
re_digits = re.compile(r'(\d{10,})')
s = re_digits.sub(r'\1.', s)
return s
def optimal_epsilon_integral():
"""Fit optimal choice of epsilon for integral representation.
The integrand of
int_0^pi P(eps, a, b, x, phi) * dphi
can exhibit oscillatory behaviour. It stems from the cosine of P and can be
minimized by minimizing the arc length of the argument
f(phi) = eps * sin(phi) - x * eps^(-a) * sin(a * phi) + (1 - b) * phi
of cos(f(phi)).
We minimize the arc length in eps for a grid of values (a, b, x) and fit a
parametric function to it.
"""
def fp(eps, a, b, x, phi):
"""Derivative of f w.r.t. phi."""
eps_a = np.power(1. * eps, -a)
return eps * np.cos(phi) - a * x * eps_a * np.cos(a * phi) + 1 - b
def arclength(eps, a, b, x, epsrel=1e-2, limit=100):
"""Compute Arc length of f.
Note that the arg length of a function f fro t0 to t1 is given by
int_t0^t1 sqrt(1 + f'(t)^2) dt
"""
return quad(lambda phi: np.sqrt(1 + fp(eps, a, b, x, phi)**2),
0, np.pi,
epsrel=epsrel, limit=100)[0]
# grid of minimal arc length values
data_a = [1e-3, 0.1, 0.5, 0.9, 1, 2, 4, 5, 6, 8]
data_b = [0, 1, 4, 7, 10]
data_x = [1, 1.5, 2, 4, 10, 20, 50, 100, 200, 500, 1e3, 5e3, 1e4]
data_a, data_b, data_x = np.meshgrid(data_a, data_b, data_x)
data_a, data_b, data_x = (data_a.flatten(), data_b.flatten(),
data_x.flatten())
best_eps = []
for i in range(data_x.size):
best_eps.append(
minimize_scalar(lambda eps: arclength(eps, data_a[i], data_b[i],
data_x[i]),
bounds=(1e-3, 1000),
method='Bounded', options={'xatol': 1e-3}).x
)
best_eps = np.array(best_eps)
# pandas would be nice, but here a dictionary is enough
df = {'a': data_a,
'b': data_b,
'x': data_x,
'eps': best_eps,
}
def func(data, A0, A1, A2, A3, A4, A5):
"""Compute parametric function to fit."""
a = data['a']
b = data['b']
x = data['x']
return (A0 * b * np.exp(-0.5 * a)
+ np.exp(A1 + 1 / (1 + a) * np.log(x) - A2 * np.exp(-A3 * a)
+ A4 / (1 + np.exp(A5 * a))))
func_params = list(curve_fit(func, df, df['eps'], method='trf')[0])
s = "Fit optimal eps for integrand P via minimal arc length\n"
s += "with parametric function:\n"
s += "optimal_eps = (A0 * b * exp(-a/2) + exp(A1 + 1 / (1 + a) * log(x)\n"
s += " - A2 * exp(-A3 * a) + A4 / (1 + exp(A5 * a)))\n\n"
s += "Fitted parameters A0 to A5 are:\n"
s += ', '.join([f'{x:.5g}' for x in func_params])
return s
def main():
t0 = time()
parser = ArgumentParser(description=__doc__,
formatter_class=RawTextHelpFormatter)
parser.add_argument('action', type=int, choices=[1, 2, 3, 4],
help='chose what expansion to precompute\n'
'1 : Series for small a\n'
'2 : Series for small a and small b\n'
'3 : Asymptotic series for large x\n'
' This may take some time (>4h).\n'
'4 : Fit optimal eps for integral representation.'
)
args = parser.parse_args()
switch = {1: lambda: print(series_small_a()),
2: lambda: print(series_small_a_small_b()),
3: lambda: print(asymptotic_series()),
4: lambda: print(optimal_epsilon_integral())
}
switch.get(args.action, lambda: print("Invalid input."))()
print(f"\n{(time() - t0)/60:.1f} minutes elapsed.\n")
if __name__ == '__main__':
main()
| 12,866
| 36.51312
| 79
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/hyp2f1_data.py
|
"""This script evaluates scipy's implementation of hyp2f1 against mpmath's.
Author: Albert Steppi
This script is long running and generates a large output file. With default
arguments, the generated file is roughly 700MB in size and it takes around
40 minutes using an Intel(R) Core(TM) i5-8250U CPU with n_jobs set to 8
(full utilization). There are optional arguments which can be used to restrict
(or enlarge) the computations performed. These are described below.
The output of this script can be analyzed to identify suitable test cases and
to find parameter and argument regions where hyp2f1 needs to be improved.
The script has one mandatory positional argument for specifying the path to
the location where the output file is to be placed, and 4 optional arguments
--n_jobs, --grid_size, --regions, and --parameter_groups. --n_jobs specifies
the number of processes to use if running in parallel. The default value is 1.
The other optional arguments are explained below.
Produces a tab separated values file with 11 columns. The first four columns
contain the parameters a, b, c and the argument z. The next two contain |z| and
a region code for which region of the complex plane belongs to. The regions are
0) z == 1
1) |z| < 0.9 and real(z) >= 0
2) |z| <= 1 and real(z) < 0
3) 0.9 <= |z| <= 1 and |1 - z| < 0.9:
4) 0.9 <= |z| <= 1 and |1 - z| >= 0.9 and real(z) >= 0:
5) 1 < |z| < 1.1 and |1 - z| >= 0.9 and real(z) >= 0
6) |z| > 1 and not in 5)
The --regions optional argument allows the user to specify a list of regions
to which computation will be restricted.
Parameters a, b, c are taken from a 10 * 10 * 10 grid with values at
-16, -8, -4, -2, -1, 1, 2, 4, 8, 16
with random perturbations applied.
There are 9 parameter groups handling the following cases.
1) A, B, C, B - A, C - A, C - B, C - A - B all non-integral.
2) B - A integral
3) C - A integral
4) C - B integral
5) C - A - B integral
6) A integral
7) B integral
8) C integral
9) Wider range with c - a - b > 0.
The seventh column of the output file is an integer between 1 and 8 specifying
the parameter group as above.
The --parameter_groups optional argument allows the user to specify a list of
parameter groups to which computation will be restricted.
The argument z is taken from a grid in the box
-box_size <= real(z) <= box_size, -box_size <= imag(z) <= box_size.
with grid size specified using the optional command line argument --grid_size,
and box_size specificed with the command line argument --box_size.
The default value of grid_size is 20 and the default value of box_size is 2.0,
yielding a 20 * 20 grid in the box with corners -2-2j, -2+2j, 2-2j, 2+2j.
The final four columns have the expected value of hyp2f1 for the given
parameters and argument as calculated with mpmath, the observed value
calculated with scipy's hyp2f1, the relative error, and the absolute error.
As special cases of hyp2f1 are moved from the original Fortran implementation
into Cython, this script can be used to ensure that no regressions occur and
to point out where improvements are needed.
"""
import os
import csv
import argparse
import numpy as np
from itertools import product
from multiprocessing import Pool
from scipy.special import hyp2f1
from scipy.special.tests.test_hyp2f1 import mp_hyp2f1
def get_region(z):
"""Assign numbers for regions where hyp2f1 must be handled differently."""
if z == 1 + 0j:
return 0
elif abs(z) < 0.9 and z.real >= 0:
return 1
elif abs(z) <= 1 and z.real < 0:
return 2
elif 0.9 <= abs(z) <= 1 and abs(1 - z) < 0.9:
return 3
elif 0.9 <= abs(z) <= 1 and abs(1 - z) >= 0.9:
return 4
elif 1 < abs(z) < 1.1 and abs(1 - z) >= 0.9 and z.real >= 0:
return 5
else:
return 6
def get_result(a, b, c, z, group):
"""Get results for given parameter and value combination."""
expected, observed = mp_hyp2f1(a, b, c, z), hyp2f1(a, b, c, z)
if (
np.isnan(observed) and np.isnan(expected) or
expected == observed
):
relative_error = 0.0
absolute_error = 0.0
elif np.isnan(observed):
# Set error to infinity if result is nan when not expected to be.
# Makes results easier to interpret.
relative_error = float("inf")
absolute_error = float("inf")
else:
absolute_error = abs(expected - observed)
relative_error = absolute_error / abs(expected)
return (
a,
b,
c,
z,
abs(z),
get_region(z),
group,
expected,
observed,
relative_error,
absolute_error,
)
def get_result_no_mp(a, b, c, z, group):
"""Get results for given parameter and value combination."""
expected, observed = complex('nan'), hyp2f1(a, b, c, z)
relative_error, absolute_error = float('nan'), float('nan')
return (
a,
b,
c,
z,
abs(z),
get_region(z),
group,
expected,
observed,
relative_error,
absolute_error,
)
def get_results(params, Z, n_jobs=1, compute_mp=True):
"""Batch compute results for multiple parameter and argument values.
Parameters
----------
params : iterable
iterable of tuples of floats (a, b, c) specificying parameter values
a, b, c for hyp2f1
Z : iterable of complex
Arguments at which to evaluate hyp2f1
n_jobs : Optional[int]
Number of jobs for parallel execution.
Returns
-------
list
List of tuples of results values. See return value in source code
of `get_result`.
"""
input_ = (
(a, b, c, z, group) for (a, b, c, group), z in product(params, Z)
)
with Pool(n_jobs) as pool:
rows = pool.starmap(
get_result if compute_mp else get_result_no_mp,
input_
)
return rows
def _make_hyp2f1_test_case(a, b, c, z, rtol):
"""Generate string for single test case as used in test_hyp2f1.py."""
expected = mp_hyp2f1(a, b, c, z)
return (
" pytest.param(\n"
" Hyp2f1TestCase(\n"
f" a={a},\n"
f" b={b},\n"
f" c={c},\n"
f" z={z},\n"
f" expected={expected},\n"
f" rtol={rtol},\n"
" ),\n"
" ),"
)
def make_hyp2f1_test_cases(rows):
"""Generate string for a list of test cases for test_hyp2f1.py.
Parameters
----------
rows : list
List of lists of the form [a, b, c, z, rtol] where a, b, c, z are
parameters and the argument for hyp2f1 and rtol is an expected
relative error for the associated test case.
Returns
-------
str
String for a list of test cases. The output string can be printed
or saved to a file and then copied into an argument for
`pytest.mark.parameterize` within `scipy.special.tests.test_hyp2f1.py`.
"""
result = "[\n"
result += '\n'.join(
_make_hyp2f1_test_case(a, b, c, z, rtol)
for a, b, c, z, rtol in rows
)
result += "\n]"
return result
def main(
outpath,
n_jobs=1,
box_size=2.0,
grid_size=20,
regions=None,
parameter_groups=None,
compute_mp=True,
):
outpath = os.path.realpath(os.path.expanduser(outpath))
random_state = np.random.RandomState(1234)
# Parameters a, b, c selected near these values.
root_params = np.array(
[-16, -8, -4, -2, -1, 1, 2, 4, 8, 16]
)
# Perturbations to apply to root values.
perturbations = 0.1 * random_state.random_sample(
size=(3, len(root_params))
)
params = []
# Parameter group 1
# -----------------
# No integer differences. This has been confirmed for the above seed.
A = root_params + perturbations[0, :]
B = root_params + perturbations[1, :]
C = root_params + perturbations[2, :]
params.extend(
sorted(
((a, b, c, 1) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 2
# -----------------
# B - A an integer
A = root_params + 0.5
B = root_params + 0.5
C = root_params + perturbations[1, :]
params.extend(
sorted(
((a, b, c, 2) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 3
# -----------------
# C - A an integer
A = root_params + 0.5
B = root_params + perturbations[1, :]
C = root_params + 0.5
params.extend(
sorted(
((a, b, c, 3) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 4
# -----------------
# C - B an integer
A = root_params + perturbations[0, :]
B = root_params + 0.5
C = root_params + 0.5
params.extend(
sorted(
((a, b, c, 4) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 5
# -----------------
# C - A - B an integer
A = root_params + 0.25
B = root_params + 0.25
C = root_params + 0.5
params.extend(
sorted(
((a, b, c, 5) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 6
# -----------------
# A an integer
A = root_params
B = root_params + perturbations[0, :]
C = root_params + perturbations[1, :]
params.extend(
sorted(
((a, b, c, 6) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 7
# -----------------
# B an integer
A = root_params + perturbations[0, :]
B = root_params
C = root_params + perturbations[1, :]
params.extend(
sorted(
((a, b, c, 7) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 8
# -----------------
# C an integer
A = root_params + perturbations[0, :]
B = root_params + perturbations[1, :]
C = root_params
params.extend(
sorted(
((a, b, c, 8) for a, b, c in product(A, B, C)),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
)
# Parameter group 9
# -----------------
# Wide range of magnitudes, c - a - b > 0.
phi = (1 + np.sqrt(5))/2
P = phi**np.arange(16)
P = np.hstack([-P, P])
group_9_params = sorted(
(
(a, b, c, 9) for a, b, c in product(P, P, P) if c - a - b > 0
),
key=lambda x: max(abs(x[0]), abs(x[1])),
)
if parameter_groups is not None:
# Group 9 params only used if specified in arguments.
params.extend(group_9_params)
params = [
(a, b, c, group) for a, b, c, group in params
if group in parameter_groups
]
# grid_size * grid_size grid in box with corners
# -2 - 2j, -2 + 2j, 2 - 2j, 2 + 2j
X, Y = np.meshgrid(
np.linspace(-box_size, box_size, grid_size),
np.linspace(-box_size, box_size, grid_size)
)
Z = X + Y * 1j
Z = Z.flatten().tolist()
# Add z = 1 + 0j (region 0).
Z.append(1 + 0j)
if regions is not None:
Z = [z for z in Z if get_region(z) in regions]
# Evaluate scipy and mpmath's hyp2f1 for all parameter combinations
# above against all arguments in the grid Z
rows = get_results(params, Z, n_jobs=n_jobs, compute_mp=compute_mp)
with open(outpath, "w", newline="") as f:
writer = csv.writer(f, delimiter="\t")
writer.writerow(
[
"a",
"b",
"c",
"z",
"|z|",
"region",
"parameter_group",
"expected", # mpmath's hyp2f1
"observed", # scipy's hyp2f1
"relative_error",
"absolute_error",
]
)
for row in rows:
writer.writerow(row)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Test scipy's hyp2f1 against mpmath's on a grid in the"
" complex plane over a grid of parameter values. Saves output to file"
" specified in positional argument \"outpath\"."
" Caution: With default arguments, the generated output file is"
" roughly 700MB in size. Script may take several hours to finish if"
" \"--n_jobs\" is set to 1."
)
parser.add_argument(
"outpath", type=str, help="Path to output tsv file."
)
parser.add_argument(
"--n_jobs",
type=int,
default=1,
help="Number of jobs for multiprocessing.",
)
parser.add_argument(
"--box_size",
type=float,
default=2.0,
help="hyp2f1 is evaluated in box of side_length 2*box_size centered"
" at the origin."
)
parser.add_argument(
"--grid_size",
type=int,
default=20,
help="hyp2f1 is evaluated on grid_size * grid_size grid in box of side"
" length 2*box_size centered at the origin."
)
parser.add_argument(
"--parameter_groups",
type=int,
nargs='+',
default=None,
help="Restrict to supplied parameter groups. See the Docstring for"
" this module for more info on parameter groups. Calculate for all"
" parameter groups by default."
)
parser.add_argument(
"--regions",
type=int,
nargs='+',
default=None,
help="Restrict to argument z only within the supplied regions. See"
" the Docstring for this module for more info on regions. Calculate"
" for all regions by default."
)
parser.add_argument(
"--no_mp",
action='store_true',
help="If this flag is set, do not compute results with mpmath. Saves"
" time if results have already been computed elsewhere. Fills in"
" \"expected\" column with None values."
)
args = parser.parse_args()
compute_mp = not args.no_mp
print(args.parameter_groups)
main(
args.outpath,
n_jobs=args.n_jobs,
box_size=args.box_size,
grid_size=args.grid_size,
parameter_groups=args.parameter_groups,
regions=args.regions,
compute_mp=compute_mp,
)
| 14,710
| 29.331959
| 79
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/expn_asy.py
|
"""Precompute the polynomials for the asymptotic expansion of the
generalized exponential integral.
Sources
-------
[1] NIST, Digital Library of Mathematical Functions,
https://dlmf.nist.gov/8.20#ii
"""
import os
try:
import sympy
from sympy import Poly
x = sympy.symbols('x')
except ImportError:
pass
def generate_A(K):
A = [Poly(1, x)]
for k in range(K):
A.append(Poly(1 - 2*k*x, x)*A[k] + Poly(x*(x + 1))*A[k].diff())
return A
WARNING = """\
/* This file was automatically generated by _precompute/expn_asy.py.
* Do not edit it manually!
*/
"""
def main():
print(__doc__)
fn = os.path.join('..', 'cephes', 'expn.h')
K = 12
A = generate_A(K)
with open(fn + '.new', 'w') as f:
f.write(WARNING)
f.write(f"#define nA {len(A)}\n")
for k, Ak in enumerate(A):
', '.join([str(x.evalf(18)) for x in Ak.coeffs()])
f.write(f"static const double A{k}[] = {{tmp}};\n")
", ".join([f"A{k}" for k in range(K + 1)])
f.write("static const double *A[] = {{tmp}};\n")
", ".join([str(Ak.degree()) for Ak in A])
f.write("static const int Adegs[] = {{tmp}};\n")
os.rename(fn + '.new', fn)
if __name__ == "__main__":
main()
| 1,265
| 22.018182
| 71
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/setup.py
|
def configuration(parent_name='special', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('_precompute', parent_name, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration().todict())
| 307
| 27
| 64
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/wrightomega.py
|
import numpy as np
try:
import mpmath
except ImportError:
pass
def mpmath_wrightomega(x):
return mpmath.lambertw(mpmath.exp(x), mpmath.mpf('-0.5'))
def wrightomega_series_error(x):
series = x
desired = mpmath_wrightomega(x)
return abs(series - desired) / desired
def wrightomega_exp_error(x):
exponential_approx = mpmath.exp(x)
desired = mpmath_wrightomega(x)
return abs(exponential_approx - desired) / desired
def main():
desired_error = 2 * np.finfo(float).eps
print('Series Error')
for x in [1e5, 1e10, 1e15, 1e20]:
with mpmath.workdps(100):
error = wrightomega_series_error(x)
print(x, error, error < desired_error)
print('Exp error')
for x in [-10, -25, -50, -100, -200, -400, -700, -740]:
with mpmath.workdps(100):
error = wrightomega_exp_error(x)
print(x, error, error < desired_error)
if __name__ == '__main__':
main()
| 955
| 21.761905
| 61
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/zetac.py
|
"""Compute the Taylor series for zeta(x) - 1 around x = 0."""
try:
import mpmath
except ImportError:
pass
def zetac_series(N):
coeffs = []
with mpmath.workdps(100):
coeffs.append(-1.5)
for n in range(1, N):
coeff = mpmath.diff(mpmath.zeta, 0, n)/mpmath.factorial(n)
coeffs.append(coeff)
return coeffs
def main():
print(__doc__)
coeffs = zetac_series(10)
coeffs = [mpmath.nstr(x, 20, min_fixed=0, max_fixed=0)
for x in coeffs]
print("\n".join(coeffs[::-1]))
if __name__ == '__main__':
main()
| 591
| 20.142857
| 70
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/utils.py
|
try:
import mpmath as mp
except ImportError:
pass
try:
from sympy.abc import x
except ImportError:
pass
def lagrange_inversion(a):
"""Given a series
f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1),
use the Lagrange inversion formula to compute a series
g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1)
so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so
necessarily b[0] = 0 too.
The algorithm is naive and could be improved, but speed isn't an
issue here and it's easy to read.
"""
n = len(a)
f = sum(a[i]*x**i for i in range(n))
h = (x/f).series(x, 0, n).removeO()
hpower = [h**0]
for k in range(n):
hpower.append((hpower[-1]*h).expand())
b = [mp.mpf(0)]
for k in range(1, n):
b.append(hpower[k].coeff(x, k - 1)/k)
b = [mp.mpf(x) for x in b]
return b
| 887
| 21.769231
| 69
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/wright_bessel_data.py
|
"""Compute a grid of values for Wright's generalized Bessel function
and save the values to data files for use in tests. Using mpmath directly in
tests would take too long.
This takes about 10 minutes to run on a 2.7 GHz i7 Macbook Pro.
"""
from functools import lru_cache
import os
from time import time
import numpy as np
from scipy.special._mptestutils import mpf2float
try:
import mpmath as mp
except ImportError:
pass
# exp_inf: smallest value x for which exp(x) == inf
exp_inf = 709.78271289338403
# 64 Byte per value
@lru_cache(maxsize=100_000)
def rgamma_cached(x, dps):
with mp.workdps(dps):
return mp.rgamma(x)
def mp_wright_bessel(a, b, x, dps=50, maxterms=2000):
"""Compute Wright's generalized Bessel function as Series with mpmath.
"""
with mp.workdps(dps):
a, b, x = mp.mpf(a), mp.mpf(b), mp.mpf(x)
res = mp.nsum(lambda k: x**k / mp.fac(k)
* rgamma_cached(a * k + b, dps=dps),
[0, mp.inf],
tol=dps, method='s', steps=[maxterms]
)
return mpf2float(res)
def main():
t0 = time()
print(__doc__)
pwd = os.path.dirname(__file__)
eps = np.finfo(float).eps * 100
a_range = np.array([eps,
1e-4 * (1 - eps), 1e-4, 1e-4 * (1 + eps),
1e-3 * (1 - eps), 1e-3, 1e-3 * (1 + eps),
0.1, 0.5,
1 * (1 - eps), 1, 1 * (1 + eps),
1.5, 2, 4.999, 5, 10])
b_range = np.array([0, eps, 1e-10, 1e-5, 0.1, 1, 2, 10, 20, 100])
x_range = np.array([0, eps, 1 - eps, 1, 1 + eps,
1.5,
2 - eps, 2, 2 + eps,
9 - eps, 9, 9 + eps,
10 * (1 - eps), 10, 10 * (1 + eps),
100 * (1 - eps), 100, 100 * (1 + eps),
500, exp_inf, 1e3, 1e5, 1e10, 1e20])
a_range, b_range, x_range = np.meshgrid(a_range, b_range, x_range,
indexing='ij')
a_range = a_range.flatten()
b_range = b_range.flatten()
x_range = x_range.flatten()
# filter out some values, especially too large x
bool_filter = ~((a_range < 5e-3) & (x_range >= exp_inf))
bool_filter = bool_filter & ~((a_range < 0.2) & (x_range > exp_inf))
bool_filter = bool_filter & ~((a_range < 0.5) & (x_range > 1e3))
bool_filter = bool_filter & ~((a_range < 0.56) & (x_range > 5e3))
bool_filter = bool_filter & ~((a_range < 1) & (x_range > 1e4))
bool_filter = bool_filter & ~((a_range < 1.4) & (x_range > 1e5))
bool_filter = bool_filter & ~((a_range < 1.8) & (x_range > 1e6))
bool_filter = bool_filter & ~((a_range < 2.2) & (x_range > 1e7))
bool_filter = bool_filter & ~((a_range < 2.5) & (x_range > 1e8))
bool_filter = bool_filter & ~((a_range < 2.9) & (x_range > 1e9))
bool_filter = bool_filter & ~((a_range < 3.3) & (x_range > 1e10))
bool_filter = bool_filter & ~((a_range < 3.7) & (x_range > 1e11))
bool_filter = bool_filter & ~((a_range < 4) & (x_range > 1e12))
bool_filter = bool_filter & ~((a_range < 4.4) & (x_range > 1e13))
bool_filter = bool_filter & ~((a_range < 4.7) & (x_range > 1e14))
bool_filter = bool_filter & ~((a_range < 5.1) & (x_range > 1e15))
bool_filter = bool_filter & ~((a_range < 5.4) & (x_range > 1e16))
bool_filter = bool_filter & ~((a_range < 5.8) & (x_range > 1e17))
bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
bool_filter = bool_filter & ~((a_range < 6.2) & (x_range > 1e18))
bool_filter = bool_filter & ~((a_range < 6.5) & (x_range > 1e19))
bool_filter = bool_filter & ~((a_range < 6.9) & (x_range > 1e20))
# filter out known values that do not meet the required numerical accuracy
# see test test_wright_data_grid_failures
failing = np.array([
[0.1, 100, 709.7827128933841],
[0.5, 10, 709.7827128933841],
[0.5, 10, 1000],
[0.5, 100, 1000],
[1, 20, 100000],
[1, 100, 100000],
[1.0000000000000222, 20, 100000],
[1.0000000000000222, 100, 100000],
[1.5, 0, 500],
[1.5, 2.220446049250313e-14, 500],
[1.5, 1.e-10, 500],
[1.5, 1.e-05, 500],
[1.5, 0.1, 500],
[1.5, 20, 100000],
[1.5, 100, 100000],
]).tolist()
does_fail = np.full_like(a_range, False, dtype=bool)
for i in range(x_range.size):
if [a_range[i], b_range[i], x_range[i]] in failing:
does_fail[i] = True
# filter and flatten
a_range = a_range[bool_filter]
b_range = b_range[bool_filter]
x_range = x_range[bool_filter]
does_fail = does_fail[bool_filter]
dataset = []
print(f"Computing {x_range.size} single points.")
print("Tests will fail for the following data points:")
for i in range(x_range.size):
a = a_range[i]
b = b_range[i]
x = x_range[i]
# take care of difficult corner cases
maxterms = 1000
if a < 1e-6 and x >= exp_inf/10:
maxterms = 2000
f = mp_wright_bessel(a, b, x, maxterms=maxterms)
if does_fail[i]:
print("failing data point a, b, x, value = "
f"[{a}, {b}, {x}, {f}]")
else:
dataset.append((a, b, x, f))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
'wright_bessel.txt')
np.savetxt(filename, dataset)
print(f"{(time() - t0)/60:.1f} minutes elapsed")
if __name__ == "__main__":
main()
| 5,647
| 35.915033
| 78
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/struve_convergence.py
|
"""
Convergence regions of the expansions used in ``struve.c``
Note that for v >> z both functions tend rapidly to 0,
and for v << -z, they tend to infinity.
The floating-point functions over/underflow in the lower left and right
corners of the figure.
Figure legend
=============
Red region
Power series is close (1e-12) to the mpmath result
Blue region
Asymptotic series is close to the mpmath result
Green region
Bessel series is close to the mpmath result
Dotted colored lines
Boundaries of the regions
Solid colored lines
Boundaries estimated by the routine itself. These will be used
for determining which of the results to use.
Black dashed line
The line z = 0.7*|v| + 12
"""
import numpy as np
import matplotlib.pyplot as plt
import mpmath
def err_metric(a, b, atol=1e-290):
m = abs(a - b) / (atol + abs(b))
m[np.isinf(b) & (a == b)] = 0
return m
def do_plot(is_h=True):
from scipy.special._ufuncs import (_struve_power_series,
_struve_asymp_large_z,
_struve_bessel_series)
vs = np.linspace(-1000, 1000, 91)
zs = np.sort(np.r_[1e-5, 1.0, np.linspace(0, 700, 91)[1:]])
rp = _struve_power_series(vs[:,None], zs[None,:], is_h)
ra = _struve_asymp_large_z(vs[:,None], zs[None,:], is_h)
rb = _struve_bessel_series(vs[:,None], zs[None,:], is_h)
mpmath.mp.dps = 50
if is_h:
def sh(v, z):
return float(mpmath.struveh(mpmath.mpf(v), mpmath.mpf(z)))
else:
def sh(v, z):
return float(mpmath.struvel(mpmath.mpf(v), mpmath.mpf(z)))
ex = np.vectorize(sh, otypes='d')(vs[:,None], zs[None,:])
err_a = err_metric(ra[0], ex) + 1e-300
err_p = err_metric(rp[0], ex) + 1e-300
err_b = err_metric(rb[0], ex) + 1e-300
err_est_a = abs(ra[1]/ra[0])
err_est_p = abs(rp[1]/rp[0])
err_est_b = abs(rb[1]/rb[0])
z_cutoff = 0.7*abs(vs) + 12
levels = [-1000, -12]
plt.cla()
plt.hold(1)
plt.contourf(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], alpha=0.1)
plt.contourf(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], alpha=0.1)
plt.contour(vs, zs, np.log10(err_p).T, levels=levels, colors=['r', 'r'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_a).T, levels=levels, colors=['b', 'b'], linestyles=[':', ':'])
plt.contour(vs, zs, np.log10(err_b).T, levels=levels, colors=['g', 'g'], linestyles=[':', ':'])
lp = plt.contour(vs, zs, np.log10(err_est_p).T, levels=levels, colors=['r', 'r'], linestyles=['-', '-'])
la = plt.contour(vs, zs, np.log10(err_est_a).T, levels=levels, colors=['b', 'b'], linestyles=['-', '-'])
lb = plt.contour(vs, zs, np.log10(err_est_b).T, levels=levels, colors=['g', 'g'], linestyles=['-', '-'])
plt.clabel(lp, fmt={-1000: 'P', -12: 'P'})
plt.clabel(la, fmt={-1000: 'A', -12: 'A'})
plt.clabel(lb, fmt={-1000: 'B', -12: 'B'})
plt.plot(vs, z_cutoff, 'k--')
plt.xlim(vs.min(), vs.max())
plt.ylim(zs.min(), zs.max())
plt.xlabel('v')
plt.ylabel('z')
def main():
plt.clf()
plt.subplot(121)
do_plot(True)
plt.title('Struve H')
plt.subplot(122)
do_plot(False)
plt.title('Struve L')
plt.savefig('struve_convergence.png')
plt.show()
if __name__ == "__main__":
main()
| 3,462
| 27.154472
| 108
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/gammainc_data.py
|
"""Compute gammainc and gammaincc for large arguments and parameters
and save the values to data files for use in tests. We can't just
compare to mpmath's gammainc in test_mpmath.TestSystematic because it
would take too long.
Note that mpmath's gammainc is computed using hypercomb, but since it
doesn't allow the user to increase the maximum number of terms used in
the series it doesn't converge for many arguments. To get around this
we copy the mpmath implementation but use more terms.
This takes about 17 minutes to run on a 2.3 GHz Macbook Pro with 4GB
ram.
Sources:
[1] Fredrik Johansson and others. mpmath: a Python library for
arbitrary-precision floating-point arithmetic (version 0.19),
December 2013. http://mpmath.org/.
"""
import os
from time import time
import numpy as np
from numpy import pi
from scipy.special._mptestutils import mpf2float
try:
import mpmath as mp
except ImportError:
pass
def gammainc(a, x, dps=50, maxterms=10**8):
"""Compute gammainc exactly like mpmath does but allow for more
summands in hypercomb. See
mpmath/functions/expintegrals.py#L134
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a, b = mp.mpf(a), mp.mpf(x), mp.mpf(x)
G = [z]
negb = mp.fneg(b, exact=True)
def h(z):
T1 = [mp.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
return (T1,)
res = mp.hypercomb(h, [z], maxterms=maxterms)
return mpf2float(res)
def gammaincc(a, x, dps=50, maxterms=10**8):
"""Compute gammaincc exactly like mpmath does but allow for more
terms in hypercomb. See
mpmath/functions/expintegrals.py#L187
in the mpmath github repository.
"""
with mp.workdps(dps):
z, a = a, x
if mp.isint(z):
try:
# mpmath has a fast integer path
return mpf2float(mp.gammainc(z, a=a, regularized=True))
except mp.libmp.NoConvergence:
pass
nega = mp.fneg(a, exact=True)
G = [z]
# Use 2F0 series when possible; fall back to lower gamma representation
try:
def h(z):
r = z-1
return [([mp.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
return mpf2float(mp.hypercomb(h, [z], force_series=True))
except mp.libmp.NoConvergence:
def h(z):
T1 = [], [1, z-1], [z], G, [], [], 0
T2 = [-mp.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
return T1, T2
return mpf2float(mp.hypercomb(h, [z], maxterms=maxterms))
def main():
t0 = time()
# It would be nice to have data for larger values, but either this
# requires prohibitively large precision (dps > 800) or mpmath has
# a bug. For example, gammainc(1e20, 1e20, dps=800) returns a
# value around 0.03, while the true value should be close to 0.5
# (DLMF 8.12.15).
print(__doc__)
pwd = os.path.dirname(__file__)
r = np.logspace(4, 14, 30)
ltheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(0.6)), 30)
utheta = np.logspace(np.log10(pi/4), np.log10(np.arctan(1.4)), 30)
regimes = [(gammainc, ltheta), (gammaincc, utheta)]
for func, theta in regimes:
rg, thetag = np.meshgrid(r, theta)
a, x = rg*np.cos(thetag), rg*np.sin(thetag)
a, x = a.flatten(), x.flatten()
dataset = []
for i, (a0, x0) in enumerate(zip(a, x)):
if func == gammaincc:
# Exploit the fast integer path in gammaincc whenever
# possible so that the computation doesn't take too
# long
a0, x0 = np.floor(a0), np.floor(x0)
dataset.append((a0, x0, func(a0, x0)))
dataset = np.array(dataset)
filename = os.path.join(pwd, '..', 'tests', 'data', 'local',
f'{func.__name__}.txt')
np.savetxt(filename, dataset)
print(f"{(time() - t0)/60} minutes elapsed")
if __name__ == "__main__":
main()
| 4,077
| 31.624
| 80
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/__init__.py
| 0
| 0
| 0
|
py
|
|
scipy
|
scipy-main/scipy/special/_precompute/cosine_cdf.py
|
import mpmath
def f(x):
return (mpmath.pi + x + mpmath.sin(x)) / (2*mpmath.pi)
# Note: 40 digits might be overkill; a few more digits than the default
# might be sufficient.
mpmath.mp.dps = 40
ts = mpmath.taylor(f, -mpmath.pi, 20)
p, q = mpmath.pade(ts, 9, 10)
p = [float(c) for c in p]
q = [float(c) for c in q]
print('p =', p)
print('q =', q)
| 354
| 18.722222
| 71
|
py
|
scipy
|
scipy-main/scipy/special/_precompute/lambertw.py
|
"""Compute a Pade approximation for the principal branch of the
Lambert W function around 0 and compare it to various other
approximations.
"""
import numpy as np
try:
import mpmath
import matplotlib.pyplot as plt
except ImportError:
pass
def lambertw_pade():
derivs = [mpmath.diff(mpmath.lambertw, 0, n=n) for n in range(6)]
p, q = mpmath.pade(derivs, 3, 2)
return p, q
def main():
print(__doc__)
with mpmath.workdps(50):
p, q = lambertw_pade()
p, q = p[::-1], q[::-1]
print(f"p = {p}")
print(f"q = {q}")
x, y = np.linspace(-1.5, 1.5, 75), np.linspace(-1.5, 1.5, 75)
x, y = np.meshgrid(x, y)
z = x + 1j*y
lambertw_std = []
for z0 in z.flatten():
lambertw_std.append(complex(mpmath.lambertw(z0)))
lambertw_std = np.array(lambertw_std).reshape(x.shape)
fig, axes = plt.subplots(nrows=3, ncols=1)
# Compare Pade approximation to true result
p = np.array([float(p0) for p0 in p])
q = np.array([float(q0) for q0 in q])
pade_approx = np.polyval(p, z)/np.polyval(q, z)
pade_err = abs(pade_approx - lambertw_std)
axes[0].pcolormesh(x, y, pade_err)
# Compare two terms of asymptotic series to true result
asy_approx = np.log(z) - np.log(np.log(z))
asy_err = abs(asy_approx - lambertw_std)
axes[1].pcolormesh(x, y, asy_err)
# Compare two terms of the series around the branch point to the
# true result
p = np.sqrt(2*(np.exp(1)*z + 1))
series_approx = -1 + p - p**2/3
series_err = abs(series_approx - lambertw_std)
im = axes[2].pcolormesh(x, y, series_err)
fig.colorbar(im, ax=axes.ravel().tolist())
plt.show()
fig, ax = plt.subplots(nrows=1, ncols=1)
pade_better = pade_err < asy_err
im = ax.pcolormesh(x, y, pade_better)
t = np.linspace(-0.3, 0.3)
ax.plot(-2.5*abs(t) - 0.2, t, 'r')
fig.colorbar(im, ax=ax)
plt.show()
if __name__ == '__main__':
main()
| 1,961
| 27.434783
| 69
|
py
|
scipy
|
scipy-main/scipy/special/utils/makenpz.py
|
"""
python makenpz.py DIRECTORY
Build a npz containing all data files in the directory.
"""
import os
import numpy as np
import argparse
from stat import ST_MTIME
def newer(source, target):
"""
Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % os.path.abspath(source))
if not os.path.exists(target):
return 1
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
def main():
p = argparse.ArgumentParser(usage=(__doc__ or '').strip())
p.add_argument('--use-timestamp', action='store_true', default=False,
help="don't rewrite npz file if it is newer than sources")
p.add_argument('dirname') # for Meson: 'boost' or 'gsl'
p.add_argument("-o", "--outdir", type=str,
help="Relative path to the output directory")
args = p.parse_args()
if not args.outdir:
# We're dealing with a distutils build here, write in-place:
inp = os.path.normpath(args.dirname)
outp = inp + ".npz"
else:
inp = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', 'tests', 'data', args.dirname)
outdir_abs = os.path.join(os.getcwd(), args.outdir)
outp = os.path.join(outdir_abs, args.dirname + ".npz")
# Skip rebuilding if no sources
if os.path.isfile(outp) and not os.path.isdir(inp):
return
# Find source files
files = []
for dirpath, dirnames, filenames in os.walk(inp):
for fn in filenames:
if fn.endswith('.txt'):
key = dirpath[len(inp)+1:] + '-' + fn[:-4]
key = key.strip('-')
files.append((key, os.path.join(dirpath, fn)))
# Check if changes required
if args.use_timestamp and os.path.isfile(outp):
try:
old_data = np.load(outp)
try:
changed = set(old_data.keys()) != {key for key, _ in files}
finally:
old_data.close()
except OSError:
# corrupted file
changed = True
changed = changed or any(newer(fn, outp) for key, fn in files)
changed = changed or newer(__file__, outp)
if not changed:
return
data = {}
for key, fn in files:
data[key] = np.loadtxt(fn)
np.savez_compressed(outp, **data)
if __name__ == "__main__":
main()
| 2,671
| 29.022472
| 78
|
py
|
scipy
|
scipy-main/scipy/special/utils/convert.py
|
# This script is used to parse BOOST special function test data into something
# we can easily import in numpy.
import re
import os
# Where to put the data (directory will be created)
DATA_DIR = 'scipy/special/tests/data/boost'
# Where to pull out boost data
BOOST_SRC = "boostmath/test"
CXX_COMMENT = re.compile(r'^\s+//')
DATA_REGEX = re.compile(r'^\s*/*\{*\s*SC_')
ITEM_REGEX = re.compile(r'[+-]?\d*\.?\d+(?:[eE][+-]?\d+)?')
HEADER_REGEX = re.compile(
r'const boost::array\<boost::array\<.*, (\d+)\>, (\d+)\> ([a-zA-Z_\d]+)')
IGNORE_PATTERNS = [
# Makes use of ldexp and casts
"hypergeometric_1F1_big_double_limited.ipp",
"hypergeometric_1F1_big_unsolved.ipp",
# Makes use of numeric_limits and ternary operator
"beta_small_data.ipp",
# Doesn't contain any data
"almost_equal.ipp",
# Derivatives functions don't exist
"bessel_y01_prime_data.ipp",
"bessel_yn_prime_data.ipp",
"sph_bessel_prime_data.ipp",
"sph_neumann_prime_data.ipp",
# Data files not needed by scipy special tests.
"ibeta_derivative_",
r"ellint_d2?_",
"jacobi_",
"heuman_lambda_",
"hypergeometric_",
"nct_",
r".*gammap1m1_",
"trig_",
"powm1_data.ipp",
]
def _raw_data(line):
items = line.split(',')
l = []
for item in items:
m = ITEM_REGEX.search(item)
if m:
q = m.group(0)
l.append(q)
return l
def parse_ipp_file(filename):
print(filename)
with open(filename) as a:
lines = a.readlines()
data = {}
i = 0
while (i < len(lines)):
line = lines[i]
m = HEADER_REGEX.search(line)
if m:
d = int(m.group(1))
n = int(m.group(2))
print(f"d = {d}, n = {n}")
cdata = []
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
while DATA_REGEX.match(line):
cdata.append(_raw_data(line))
i += 1
line = lines[i]
# Skip comments
while CXX_COMMENT.match(line):
i += 1
line = lines[i]
if not len(cdata) == n:
raise ValueError(f"parsed data: {len(cdata)}, expected {n}")
data[m.group(3)] = cdata
else:
i += 1
return data
def dump_dataset(filename, data):
fid = open(filename, 'w')
try:
for line in data:
fid.write("%s\n" % " ".join(line))
finally:
fid.close()
def dump_datasets(filename):
base, ext = os.path.splitext(os.path.basename(filename))
base += '_%s' % ext[1:]
datadir = os.path.join(DATA_DIR, base)
os.makedirs(datadir)
datasets = parse_ipp_file(filename)
for k, d in datasets.items():
print(k, len(d))
dfilename = os.path.join(datadir, k) + '.txt'
dump_dataset(dfilename, d)
if __name__ == '__main__':
for filename in sorted(os.listdir(BOOST_SRC)):
# Note: Misses data in hpp files (e.x. powm1_sqrtp1m1_test.hpp)
if filename.endswith(".ipp"):
if any(re.match(pattern, filename) for pattern in IGNORE_PATTERNS):
continue
path = os.path.join(BOOST_SRC, filename)
print(f"================= {path} ===============")
dump_datasets(path)
| 3,443
| 26.333333
| 79
|
py
|
scipy
|
scipy-main/scipy/special/utils/datafunc.py
|
import csv
import numpy as np
def parse_txt_data(filename):
f = open(filename)
try:
reader = csv.reader(f, delimiter=',')
data = [list(map(float, row)) for row in reader]
nc = len(data[0])
for i in data:
if not nc == len(i):
raise ValueError(i)
## guess number of columns/rows
#row0 = f.readline()
#nc = len(row0.split(',')) - 1
#nlines = len(f.readlines()) + 1
#f.seek(0)
#data = np.fromfile(f, sep=',')
#if not data.size == nc * nlines:
# raise ValueError("Inconsistency between array (%d items) and "
# "guessed data size %dx%d" % (data.size, nlines, nc))
#data = data.reshape((nlines, nc))
#return data
finally:
f.close()
return np.array(data)
def run_test(filename, funcs, args=[0]):
nargs = len(args)
if len(funcs) > 1 and nargs > 1:
raise ValueError("nargs > 1 and len(funcs) > 1 not supported")
data = parse_txt_data(filename)
if data.shape[1] != len(funcs) + nargs:
raise ValueError("data has %d items / row, but len(funcs) = %d and "
"nargs = %d" % (data.shape[1], len(funcs), nargs))
if nargs > 1:
f = funcs[0]
x = [data[args[i]] for i in nargs]
return f(*x)
else:
y = [f(data[:, 0]) - data[:, idx + 1] for idx, f in enumerate(funcs)]
return data[:, 0], y
if __name__ == '__main__':
from convert import DATA_DIR
import os
data = []
for root, dirs, files in os.walk(DATA_DIR):
for f in files:
name = os.path.join(root, f)
print(name)
data.append(parse_txt_data(name))
| 1,750
| 27.241935
| 82
|
py
|
scipy
|
scipy-main/scipy/constants/constants.py
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.constants` namespace for importing the functions
# included below.
import warnings
from . import _constants
__all__ = [ # noqa: F822
'Avogadro', 'Boltzmann', 'Btu', 'Btu_IT', 'Btu_th', 'G',
'Julian_year', 'N_A', 'Planck', 'R', 'Rydberg',
'Stefan_Boltzmann', 'Wien', 'acre', 'alpha',
'angstrom', 'arcmin', 'arcminute', 'arcsec',
'arcsecond', 'astronomical_unit', 'atm',
'atmosphere', 'atomic_mass', 'atto', 'au', 'bar',
'barrel', 'bbl', 'blob', 'c', 'calorie',
'calorie_IT', 'calorie_th', 'carat', 'centi',
'convert_temperature', 'day', 'deci', 'degree',
'degree_Fahrenheit', 'deka', 'dyn', 'dyne', 'e',
'eV', 'electron_mass', 'electron_volt',
'elementary_charge', 'epsilon_0', 'erg',
'exa', 'exbi', 'femto', 'fermi', 'fine_structure',
'fluid_ounce', 'fluid_ounce_US', 'fluid_ounce_imp',
'foot', 'g', 'gallon', 'gallon_US', 'gallon_imp',
'gas_constant', 'gibi', 'giga', 'golden', 'golden_ratio',
'grain', 'gram', 'gravitational_constant', 'h', 'hbar',
'hectare', 'hecto', 'horsepower', 'hour', 'hp',
'inch', 'k', 'kgf', 'kibi', 'kilo', 'kilogram_force',
'kmh', 'knot', 'lambda2nu', 'lb', 'lbf',
'light_year', 'liter', 'litre', 'long_ton', 'm_e',
'm_n', 'm_p', 'm_u', 'mach', 'mebi', 'mega',
'metric_ton', 'micro', 'micron', 'mil', 'mile',
'milli', 'minute', 'mmHg', 'mph', 'mu_0', 'nano',
'nautical_mile', 'neutron_mass', 'nu2lambda',
'ounce', 'oz', 'parsec', 'pebi', 'peta',
'pi', 'pico', 'point', 'pound', 'pound_force',
'proton_mass', 'psi', 'pt', 'short_ton',
'sigma', 'slinch', 'slug', 'speed_of_light',
'speed_of_sound', 'stone', 'survey_foot',
'survey_mile', 'tebi', 'tera', 'ton_TNT',
'torr', 'troy_ounce', 'troy_pound', 'u',
'week', 'yard', 'year', 'yobi', 'yocto',
'yotta', 'zebi', 'zepto', 'zero_Celsius', 'zetta'
]
def __dir__():
return __all__
def __getattr__(name):
if name not in __all__:
raise AttributeError(
"scipy.constants.constants is deprecated and has no attribute "
f"{name}. Try looking in scipy.constants instead.")
warnings.warn(f"Please use `{name}` from the `scipy.constants` namespace, "
"the `scipy.constants.constants` namespace is deprecated.",
category=DeprecationWarning, stacklevel=2)
return getattr(_constants, name)
| 2,477
| 38.967742
| 79
|
py
|
scipy
|
scipy-main/scipy/constants/setup.py
|
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('constants', parent_package, top_path)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| 347
| 30.636364
| 65
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.