content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\cfuncs.cpython-313.pyc | cfuncs.cpython-313.pyc | Other | 49,095 | 0.95 | 0.152288 | 0.195331 | awesome-app | 323 | 2025-04-27T11:54:45.239863 | Apache-2.0 | false | 390790c1b243b450f44d5e4256afb794 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\common_rules.cpython-313.pyc | common_rules.cpython-313.pyc | Other | 7,549 | 0.8 | 0.033058 | 0.008772 | react-lib | 765 | 2025-03-22T02:27:38.408913 | BSD-3-Clause | false | 00c6356bbfd795241fcfb9442d87cb8e |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\diagnose.cpython-313.pyc | diagnose.cpython-313.pyc | Other | 6,638 | 0.95 | 0.018519 | 0.02 | vue-tools | 915 | 2025-02-05T04:31:55.277192 | Apache-2.0 | false | dc2b355d4ed2e61c3e1dd6160c62489a |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\f2py2e.cpython-313.pyc | f2py2e.cpython-313.pyc | Other | 34,802 | 0.95 | 0.035581 | 0.00207 | react-lib | 704 | 2025-05-25T09:40:31.962864 | GPL-3.0 | false | b65cb805b95f7d89d6ab0c146f4dce59 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\f90mod_rules.cpython-313.pyc | f90mod_rules.cpython-313.pyc | Other | 12,708 | 0.95 | 0.098684 | 0.006993 | awesome-app | 284 | 2025-06-27T10:57:59.632003 | Apache-2.0 | false | 6cddda2f65d6810256d3580fc4221f3c |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\func2subr.cpython-313.pyc | func2subr.cpython-313.pyc | Other | 12,050 | 0.95 | 0.05 | 0 | python-kit | 786 | 2023-09-03T07:04:31.224348 | MIT | false | 00390418ab85c78f826730df690f0161 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\rules.cpython-313.pyc | rules.cpython-313.pyc | Other | 53,173 | 0.95 | 0.082645 | 0.135443 | vue-tools | 736 | 2023-11-03T00:28:18.266631 | GPL-3.0 | false | fadd9658dc63cfedc9f8ea30722a5268 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\symbolic.cpython-313.pyc | symbolic.cpython-313.pyc | Other | 80,210 | 0.75 | 0.017442 | 0.007862 | awesome-app | 963 | 2023-10-02T19:26:07.527829 | GPL-3.0 | false | d8295aa7084ad33333566f3d52ed3324 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\use_rules.cpython-313.pyc | use_rules.cpython-313.pyc | Other | 4,256 | 0.95 | 0.1 | 0.059701 | python-kit | 238 | 2023-07-16T17:01:18.323966 | MIT | false | 82feb8245d4500bca2d4ba16b94aeae5 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\_isocbind.cpython-313.pyc | _isocbind.cpython-313.pyc | Other | 1,900 | 0.95 | 0.033333 | 0 | react-lib | 826 | 2024-06-06T18:18:39.674754 | BSD-3-Clause | false | d8f8cdce9d724e0382cd7299bb018faa |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\_src_pyf.cpython-313.pyc | _src_pyf.cpython-313.pyc | Other | 9,109 | 0.95 | 0.078431 | 0.040816 | vue-tools | 145 | 2024-11-19T21:52:17.869971 | MIT | false | 391f99e247e704692cc120518621ba03 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 3,010 | 0.85 | 0.095238 | 0.1 | python-kit | 695 | 2024-02-12T18:51:24.821558 | GPL-3.0 | false | 60ed65fea025c9c8d3186fc63b9d7b57 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\__main__.cpython-313.pyc | __main__.cpython-313.pyc | Other | 255 | 0.8 | 0 | 0 | python-kit | 908 | 2024-10-20T00:34:06.696836 | GPL-3.0 | false | c65ed90f28e0b53b5a8870e60582a8e8 |
\n\n | .venv\Lib\site-packages\numpy\f2py\__pycache__\__version__.cpython-313.pyc | __version__.cpython-313.pyc | Other | 236 | 0.7 | 0 | 0 | react-lib | 714 | 2024-07-02T11:18:02.482732 | Apache-2.0 | false | 4828949606cf6d177c7942cae0cdc2b2 |
def __getattr__(attr_name):\n import warnings\n\n from numpy.fft import _helper\n ret = getattr(_helper, attr_name, None)\n if ret is None:\n raise AttributeError(\n f"module 'numpy.fft.helper' has no attribute {attr_name}")\n warnings.warn(\n "The numpy.fft.helper has been made private and renamed to "\n "numpy.fft._helper. All four functions exported by it (i.e. fftshift, "\n "ifftshift, fftfreq, rfftfreq) are available from numpy.fft. "\n f"Please use numpy.fft.{attr_name} instead.",\n DeprecationWarning,\n stacklevel=3\n )\n return ret\n | .venv\Lib\site-packages\numpy\fft\helper.py | helper.py | Python | 628 | 0.85 | 0.117647 | 0 | python-kit | 835 | 2023-08-28T03:40:44.909819 | GPL-3.0 | false | f55dd430897e11ea62427c063168142d |
from typing import Any\nfrom typing import Literal as L\n\nfrom typing_extensions import deprecated\n\nimport numpy as np\nfrom numpy._typing import ArrayLike, NDArray, _ShapeLike\n\nfrom ._helper import integer_types as integer_types\n\n__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"]\n\n###\n\n@deprecated("Please use `numpy.fft.fftshift` instead.")\ndef fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ...\n@deprecated("Please use `numpy.fft.ifftshift` instead.")\ndef ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ...\n@deprecated("Please use `numpy.fft.fftfreq` instead.")\ndef fftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ...\n@deprecated("Please use `numpy.fft.rfftfreq` instead.")\ndef rfftfreq(n: int | np.integer, d: ArrayLike = 1.0, device: L["cpu"] | None = None) -> NDArray[Any]: ...\n | .venv\Lib\site-packages\numpy\fft\helper.pyi | helper.pyi | Other | 913 | 0.95 | 0.181818 | 0.0625 | node-utils | 700 | 2025-03-14T22:21:39.523638 | GPL-3.0 | false | 4a604e66aafb8e9e976450e117881309 |
"""\nDiscrete Fourier Transforms - _helper.py\n\n"""\nfrom numpy._core import arange, asarray, empty, integer, roll\nfrom numpy._core.overrides import array_function_dispatch, set_module\n\n# Created by Pearu Peterson, September 2002\n\n__all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']\n\ninteger_types = (int, integer)\n\n\ndef _fftshift_dispatcher(x, axes=None):\n return (x,)\n\n\n@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')\ndef fftshift(x, axes=None):\n """\n Shift the zero-frequency component to the center of the spectrum.\n\n This function swaps half-spaces for all axes listed (defaults to all).\n Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to shift. Default is None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n ifftshift : The inverse of `fftshift`.\n\n Examples\n --------\n >>> import numpy as np\n >>> freqs = np.fft.fftfreq(10, 0.1)\n >>> freqs\n array([ 0., 1., 2., ..., -3., -2., -1.])\n >>> np.fft.fftshift(freqs)\n array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])\n\n Shift the zero-frequency component only along the second axis:\n\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.fftshift(freqs, axes=(1,))\n array([[ 2., 0., 1.],\n [-4., 3., 4.],\n [-1., -3., -2.]])\n\n """\n x = asarray(x)\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [dim // 2 for dim in x.shape]\n elif isinstance(axes, integer_types):\n shift = x.shape[axes] // 2\n else:\n shift = [x.shape[ax] // 2 for ax in axes]\n\n return roll(x, shift, axes)\n\n\n@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')\ndef ifftshift(x, axes=None):\n """\n The inverse of `fftshift`. Although identical for even-length `x`, the\n functions differ by one sample for odd-length `x`.\n\n Parameters\n ----------\n x : array_like\n Input array.\n axes : int or shape tuple, optional\n Axes over which to calculate. Defaults to None, which shifts all axes.\n\n Returns\n -------\n y : ndarray\n The shifted array.\n\n See Also\n --------\n fftshift : Shift zero-frequency component to the center of the spectrum.\n\n Examples\n --------\n >>> import numpy as np\n >>> freqs = np.fft.fftfreq(9, d=1./9).reshape(3, 3)\n >>> freqs\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n >>> np.fft.ifftshift(np.fft.fftshift(freqs))\n array([[ 0., 1., 2.],\n [ 3., 4., -4.],\n [-3., -2., -1.]])\n\n """\n x = asarray(x)\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(axes, integer_types):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n return roll(x, shift, axes)\n\n\n@set_module('numpy.fft')\ndef fftfreq(n, d=1.0, device=None):\n """\n Return the Discrete Fourier Transform sample frequencies.\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, -n/2, ..., -1] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2, -(n-1)/2, ..., -1] / (d*n) if n is odd\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n device : str, optional\n The device on which to place the created array. Default: ``None``.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n f : ndarray\n Array of length `n` containing the sample frequencies.\n\n Examples\n --------\n >>> import numpy as np\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5], dtype=float)\n >>> fourier = np.fft.fft(signal)\n >>> n = signal.size\n >>> timestep = 0.1\n >>> freq = np.fft.fftfreq(n, d=timestep)\n >>> freq\n array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])\n\n """\n if not isinstance(n, integer_types):\n raise ValueError("n should be an integer")\n val = 1.0 / (n * d)\n results = empty(n, int, device=device)\n N = (n - 1) // 2 + 1\n p1 = arange(0, N, dtype=int, device=device)\n results[:N] = p1\n p2 = arange(-(n // 2), 0, dtype=int, device=device)\n results[N:] = p2\n return results * val\n\n\n@set_module('numpy.fft')\ndef rfftfreq(n, d=1.0, device=None):\n """\n Return the Discrete Fourier Transform sample frequencies\n (for usage with rfft, irfft).\n\n The returned float array `f` contains the frequency bin centers in cycles\n per unit of the sample spacing (with zero at the start). For instance, if\n the sample spacing is in seconds, then the frequency unit is cycles/second.\n\n Given a window length `n` and a sample spacing `d`::\n\n f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even\n f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd\n\n Unlike `fftfreq` (but like `scipy.fftpack.rfftfreq`)\n the Nyquist frequency component is considered to be positive.\n\n Parameters\n ----------\n n : int\n Window length.\n d : scalar, optional\n Sample spacing (inverse of the sampling rate). Defaults to 1.\n device : str, optional\n The device on which to place the created array. Default: ``None``.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n f : ndarray\n Array of length ``n//2 + 1`` containing the sample frequencies.\n\n Examples\n --------\n >>> import numpy as np\n >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float)\n >>> fourier = np.fft.rfft(signal)\n >>> n = signal.size\n >>> sample_rate = 100\n >>> freq = np.fft.fftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., ..., -30., -20., -10.])\n >>> freq = np.fft.rfftfreq(n, d=1./sample_rate)\n >>> freq\n array([ 0., 10., 20., 30., 40., 50.])\n\n """\n if not isinstance(n, integer_types):\n raise ValueError("n should be an integer")\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = arange(0, N, dtype=int, device=device)\n return results * val\n | .venv\Lib\site-packages\numpy\fft\_helper.py | _helper.py | Python | 7,022 | 0.95 | 0.114894 | 0.005291 | node-utils | 953 | 2024-02-15T00:46:39.107540 | BSD-3-Clause | false | 5ec4f15092560d138153c1638e9c778c |
from typing import Any, Final, TypeVar, overload\nfrom typing import Literal as L\n\nfrom numpy import complexfloating, floating, generic, integer\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ShapeLike,\n)\n\n__all__ = ["fftfreq", "fftshift", "ifftshift", "rfftfreq"]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n\n###\n\ninteger_types: Final[tuple[type[int], type[integer]]] = ...\n\n###\n\n@overload\ndef fftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ...\n@overload\ndef fftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ...\n\n#\n@overload\ndef ifftshift(x: _ArrayLike[_ScalarT], axes: _ShapeLike | None = None) -> NDArray[_ScalarT]: ...\n@overload\ndef ifftshift(x: ArrayLike, axes: _ShapeLike | None = None) -> NDArray[Any]: ...\n\n#\n@overload\ndef fftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ...\n@overload\ndef fftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ...\n\n#\n@overload\ndef rfftfreq(n: int | integer, d: _ArrayLikeFloat_co = 1.0, device: L["cpu"] | None = None) -> NDArray[floating]: ...\n@overload\ndef rfftfreq(n: int | integer, d: _ArrayLikeComplex_co = 1.0, device: L["cpu"] | None = None) -> NDArray[complexfloating]: ...\n | .venv\Lib\site-packages\numpy\fft\_helper.pyi | _helper.pyi | Other | 1,439 | 0.95 | 0.177778 | 0.142857 | python-kit | 524 | 2023-08-13T17:26:04.481912 | BSD-3-Clause | false | 61b6b158770a76f1cc7ea9b753d7c9d2 |
"""\nDiscrete Fourier Transforms\n\nRoutines in this module:\n\nfft(a, n=None, axis=-1, norm="backward")\nifft(a, n=None, axis=-1, norm="backward")\nrfft(a, n=None, axis=-1, norm="backward")\nirfft(a, n=None, axis=-1, norm="backward")\nhfft(a, n=None, axis=-1, norm="backward")\nihfft(a, n=None, axis=-1, norm="backward")\nfftn(a, s=None, axes=None, norm="backward")\nifftn(a, s=None, axes=None, norm="backward")\nrfftn(a, s=None, axes=None, norm="backward")\nirfftn(a, s=None, axes=None, norm="backward")\nfft2(a, s=None, axes=(-2,-1), norm="backward")\nifft2(a, s=None, axes=(-2, -1), norm="backward")\nrfft2(a, s=None, axes=(-2,-1), norm="backward")\nirfft2(a, s=None, axes=(-2, -1), norm="backward")\n\ni = inverse transform\nr = transform of purely real data\nh = Hermite transform\nn = n-dimensional transform\n2 = 2-dimensional transform\n(Note: 2D routines are just nD routines with different default\nbehavior.)\n\n"""\n__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',\n 'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']\n\nimport functools\nimport warnings\n\nfrom numpy._core import (\n asarray,\n conjugate,\n empty_like,\n overrides,\n reciprocal,\n result_type,\n sqrt,\n take,\n)\nfrom numpy.lib.array_utils import normalize_axis_index\n\nfrom . import _pocketfft_umath as pfu\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy.fft')\n\n\n# `inv_norm` is a float by which the result of the transform needs to be\n# divided. This replaces the original, more intuitive 'fct` parameter to avoid\n# divisions by zero (or alternatively additional checks) in the case of\n# zero-length axes during its computation.\ndef _raw_fft(a, n, axis, is_real, is_forward, norm, out=None):\n if n < 1:\n raise ValueError(f"Invalid number of FFT data points ({n}) specified.")\n\n # Calculate the normalization factor, passing in the array dtype to\n # avoid precision loss in the possible sqrt or reciprocal.\n if not is_forward:\n norm = _swap_direction(norm)\n\n real_dtype = result_type(a.real.dtype, 1.0)\n if norm is None or norm == "backward":\n fct = 1\n elif norm == "ortho":\n fct = reciprocal(sqrt(n, dtype=real_dtype))\n elif norm == "forward":\n fct = reciprocal(n, dtype=real_dtype)\n else:\n raise ValueError(f'Invalid norm value {norm}; should be "backward",'\n '"ortho" or "forward".')\n\n n_out = n\n if is_real:\n if is_forward:\n ufunc = pfu.rfft_n_even if n % 2 == 0 else pfu.rfft_n_odd\n n_out = n // 2 + 1\n else:\n ufunc = pfu.irfft\n else:\n ufunc = pfu.fft if is_forward else pfu.ifft\n\n axis = normalize_axis_index(axis, a.ndim)\n\n if out is None:\n if is_real and not is_forward: # irfft, complex in, real output.\n out_dtype = real_dtype\n else: # Others, complex output.\n out_dtype = result_type(a.dtype, 1j)\n out = empty_like(a, shape=a.shape[:axis] + (n_out,) + a.shape[axis + 1:],\n dtype=out_dtype)\n elif ((shape := getattr(out, "shape", None)) is not None\n and (len(shape) != a.ndim or shape[axis] != n_out)):\n raise ValueError("output array has wrong shape.")\n\n return ufunc(a, fct, axes=[(axis,), (), (axis,)], out=out)\n\n\n_SWAP_DIRECTION_MAP = {"backward": "forward", None: "forward",\n "ortho": "ortho", "forward": "backward"}\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(f'Invalid norm value {norm}; should be "backward", '\n '"ortho" or "forward".') from None\n\n\ndef _fft_dispatcher(a, n=None, axis=None, norm=None, out=None):\n return (a, out)\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef fft(a, n=None, axis=-1, norm=None, out=None):\n """\n Compute the one-dimensional discrete Fourier Transform.\n\n This function computes the one-dimensional *n*-point discrete Fourier\n Transform (DFT) with the efficient Fast Fourier Transform (FFT)\n algorithm [CT].\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex.\n n : int, optional\n Length of the transformed axis of the output.\n If `n` is smaller than the length of the input, the input is cropped.\n If it is larger, the input is padded with zeros. If `n` is not given,\n the length of the input along the axis specified by `axis` is used.\n axis : int, optional\n Axis over which to compute the FFT. If not given, the last axis is\n used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n\n Raises\n ------\n IndexError\n If `axis` is not a valid axis of `a`.\n\n See Also\n --------\n numpy.fft : for definition of the DFT and conventions used.\n ifft : The inverse of `fft`.\n fft2 : The two-dimensional FFT.\n fftn : The *n*-dimensional FFT.\n rfftn : The *n*-dimensional FFT of real input.\n fftfreq : Frequency bins for given FFT parameters.\n\n Notes\n -----\n FFT (Fast Fourier Transform) refers to a way the discrete Fourier\n Transform (DFT) can be calculated efficiently, by using symmetries in the\n calculated terms. The symmetry is highest when `n` is a power of 2, and\n the transform is therefore most efficient for these sizes.\n\n The DFT is defined, with the conventions used in this implementation, in\n the documentation for the `numpy.fft` module.\n\n References\n ----------\n .. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the\n machine calculation of complex Fourier series," *Math. Comput.*\n 19: 297-301.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))\n array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,\n 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,\n -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,\n 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])\n\n In this example, real input has an FFT which is Hermitian, i.e., symmetric\n in the real part and anti-symmetric in the imaginary part, as described in\n the `numpy.fft` documentation:\n\n >>> import matplotlib.pyplot as plt\n >>> t = np.arange(256)\n >>> sp = np.fft.fft(np.sin(t))\n >>> freq = np.fft.fftfreq(t.shape[-1])\n >>> _ = plt.plot(freq, sp.real, freq, sp.imag)\n >>> plt.show()\n\n """\n a = asarray(a)\n if n is None:\n n = a.shape[axis]\n output = _raw_fft(a, n, axis, False, True, norm, out)\n return output\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef ifft(a, n=None, axis=-1, norm=None, out=None):\n """\n Compute the one-dimensional inverse discrete Fourier Transform.\n\n This function computes the inverse of the one-dimensional *n*-point\n discrete Fourier transform computed by `fft`. In other words,\n ``ifft(fft(a)) == a`` to within numerical accuracy.\n For a general description of the algorithm and definitions,\n see `numpy.fft`.\n\n The input should be ordered in the same way as is returned by `fft`,\n i.e.,\n\n * ``a[0]`` should contain the zero frequency term,\n * ``a[1:n//2]`` should contain the positive-frequency terms,\n * ``a[n//2 + 1:]`` should contain the negative-frequency terms, in\n increasing order starting from the most negative frequency.\n\n For an even number of input points, ``A[n//2]`` represents the sum of\n the values at the positive and negative Nyquist frequencies, as the two\n are aliased together. See `numpy.fft` for details.\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex.\n n : int, optional\n Length of the transformed axis of the output.\n If `n` is smaller than the length of the input, the input is cropped.\n If it is larger, the input is padded with zeros. If `n` is not given,\n the length of the input along the axis specified by `axis` is used.\n See notes about padding issues.\n axis : int, optional\n Axis over which to compute the inverse DFT. If not given, the last\n axis is used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n\n Raises\n ------\n IndexError\n If `axis` is not a valid axis of `a`.\n\n See Also\n --------\n numpy.fft : An introduction, with definitions and general explanations.\n fft : The one-dimensional (forward) FFT, of which `ifft` is the inverse\n ifft2 : The two-dimensional inverse FFT.\n ifftn : The n-dimensional inverse FFT.\n\n Notes\n -----\n If the input parameter `n` is larger than the size of the input, the input\n is padded by appending zeros at the end. Even though this is the common\n approach, it might lead to surprising results. If a different padding is\n desired, it must be performed before calling `ifft`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fft.ifft([0, 4, 0, 0])\n array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary\n\n Create and plot a band-limited signal with random phases:\n\n >>> import matplotlib.pyplot as plt\n >>> t = np.arange(400)\n >>> n = np.zeros((400,), dtype=complex)\n >>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))\n >>> s = np.fft.ifft(n)\n >>> plt.plot(t, s.real, label='real')\n [<matplotlib.lines.Line2D object at ...>]\n >>> plt.plot(t, s.imag, '--', label='imaginary')\n [<matplotlib.lines.Line2D object at ...>]\n >>> plt.legend()\n <matplotlib.legend.Legend object at ...>\n >>> plt.show()\n\n """\n a = asarray(a)\n if n is None:\n n = a.shape[axis]\n output = _raw_fft(a, n, axis, False, False, norm, out=out)\n return output\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef rfft(a, n=None, axis=-1, norm=None, out=None):\n """\n Compute the one-dimensional discrete Fourier Transform for real input.\n\n This function computes the one-dimensional *n*-point discrete Fourier\n Transform (DFT) of a real-valued array by means of an efficient algorithm\n called the Fast Fourier Transform (FFT).\n\n Parameters\n ----------\n a : array_like\n Input array\n n : int, optional\n Number of points along transformation axis in the input to use.\n If `n` is smaller than the length of the input, the input is cropped.\n If it is larger, the input is padded with zeros. If `n` is not given,\n the length of the input along the axis specified by `axis` is used.\n axis : int, optional\n Axis over which to compute the FFT. If not given, the last axis is\n used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n If `n` is even, the length of the transformed axis is ``(n/2)+1``.\n If `n` is odd, the length is ``(n+1)/2``.\n\n Raises\n ------\n IndexError\n If `axis` is not a valid axis of `a`.\n\n See Also\n --------\n numpy.fft : For definition of the DFT and conventions used.\n irfft : The inverse of `rfft`.\n fft : The one-dimensional FFT of general (complex) input.\n fftn : The *n*-dimensional FFT.\n rfftn : The *n*-dimensional FFT of real input.\n\n Notes\n -----\n When the DFT is computed for purely real input, the output is\n Hermitian-symmetric, i.e. the negative frequency terms are just the complex\n conjugates of the corresponding positive-frequency terms, and the\n negative-frequency terms are therefore redundant. This function does not\n compute the negative frequency terms, and the length of the transformed\n axis of the output is therefore ``n//2 + 1``.\n\n When ``A = rfft(a)`` and fs is the sampling frequency, ``A[0]`` contains\n the zero-frequency term 0*fs, which is real due to Hermitian symmetry.\n\n If `n` is even, ``A[-1]`` contains the term representing both positive\n and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely\n real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains\n the largest positive frequency (fs/2*(n-1)/n), and is complex in the\n general case.\n\n If the input `a` contains an imaginary part, it is silently discarded.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fft.fft([0, 1, 0, 0])\n array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary\n >>> np.fft.rfft([0, 1, 0, 0])\n array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary\n\n Notice how the final element of the `fft` output is the complex conjugate\n of the second element, for real input. For `rfft`, this symmetry is\n exploited to compute only the non-negative frequency terms.\n\n """\n a = asarray(a)\n if n is None:\n n = a.shape[axis]\n output = _raw_fft(a, n, axis, True, True, norm, out=out)\n return output\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef irfft(a, n=None, axis=-1, norm=None, out=None):\n """\n Computes the inverse of `rfft`.\n\n This function computes the inverse of the one-dimensional *n*-point\n discrete Fourier Transform of real input computed by `rfft`.\n In other words, ``irfft(rfft(a), len(a)) == a`` to within numerical\n accuracy. (See Notes below for why ``len(a)`` is necessary here.)\n\n The input is expected to be in the form returned by `rfft`, i.e. the\n real zero-frequency term followed by the complex positive frequency terms\n in order of increasing frequency. Since the discrete Fourier Transform of\n real input is Hermitian-symmetric, the negative frequency terms are taken\n to be the complex conjugates of the corresponding positive frequency terms.\n\n Parameters\n ----------\n a : array_like\n The input array.\n n : int, optional\n Length of the transformed axis of the output.\n For `n` output points, ``n//2+1`` input points are necessary. If the\n input is longer than this, it is cropped. If it is shorter than this,\n it is padded with zeros. If `n` is not given, it is taken to be\n ``2*(m-1)`` where ``m`` is the length of the input along the axis\n specified by `axis`.\n axis : int, optional\n Axis over which to compute the inverse FFT. If not given, the last\n axis is used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n The length of the transformed axis is `n`, or, if `n` is not given,\n ``2*(m-1)`` where ``m`` is the length of the transformed axis of the\n input. To get an odd number of output points, `n` must be specified.\n\n Raises\n ------\n IndexError\n If `axis` is not a valid axis of `a`.\n\n See Also\n --------\n numpy.fft : For definition of the DFT and conventions used.\n rfft : The one-dimensional FFT of real input, of which `irfft` is inverse.\n fft : The one-dimensional FFT.\n irfft2 : The inverse of the two-dimensional FFT of real input.\n irfftn : The inverse of the *n*-dimensional FFT of real input.\n\n Notes\n -----\n Returns the real valued `n`-point inverse discrete Fourier transform\n of `a`, where `a` contains the non-negative frequency terms of a\n Hermitian-symmetric sequence. `n` is the length of the result, not the\n input.\n\n If you specify an `n` such that `a` must be zero-padded or truncated, the\n extra/removed values will be added/removed at high frequencies. One can\n thus resample a series to `m` points via Fourier interpolation by:\n ``a_resamp = irfft(rfft(a), m)``.\n\n The correct interpretation of the hermitian input depends on the length of\n the original data, as given by `n`. This is because each input shape could\n correspond to either an odd or even length signal. By default, `irfft`\n assumes an even output length which puts the last entry at the Nyquist\n frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,\n the value is thus treated as purely real. To avoid losing information, the\n correct length of the real input **must** be given.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fft.ifft([1, -1j, -1, 1j])\n array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary\n >>> np.fft.irfft([1, -1j, -1])\n array([0., 1., 0., 0.])\n\n Notice how the last term in the input to the ordinary `ifft` is the\n complex conjugate of the second term, and the output has zero imaginary\n part everywhere. When calling `irfft`, the negative frequencies are not\n specified, and the output array is purely real.\n\n """\n a = asarray(a)\n if n is None:\n n = (a.shape[axis] - 1) * 2\n output = _raw_fft(a, n, axis, True, False, norm, out=out)\n return output\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef hfft(a, n=None, axis=-1, norm=None, out=None):\n """\n Compute the FFT of a signal that has Hermitian symmetry, i.e., a real\n spectrum.\n\n Parameters\n ----------\n a : array_like\n The input array.\n n : int, optional\n Length of the transformed axis of the output. For `n` output\n points, ``n//2 + 1`` input points are necessary. If the input is\n longer than this, it is cropped. If it is shorter than this, it is\n padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``\n where ``m`` is the length of the input along the axis specified by\n `axis`.\n axis : int, optional\n Axis over which to compute the FFT. If not given, the last\n axis is used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n The length of the transformed axis is `n`, or, if `n` is not given,\n ``2*m - 2`` where ``m`` is the length of the transformed axis of\n the input. To get an odd number of output points, `n` must be\n specified, for instance as ``2*m - 1`` in the typical case,\n\n Raises\n ------\n IndexError\n If `axis` is not a valid axis of `a`.\n\n See also\n --------\n rfft : Compute the one-dimensional FFT for real input.\n ihfft : The inverse of `hfft`.\n\n Notes\n -----\n `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the\n opposite case: here the signal has Hermitian symmetry in the time\n domain and is real in the frequency domain. So here it's `hfft` for\n which you must supply the length of the result if it is to be odd.\n\n * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,\n * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.\n\n The correct interpretation of the hermitian input depends on the length of\n the original data, as given by `n`. This is because each input shape could\n correspond to either an odd or even length signal. By default, `hfft`\n assumes an even output length which puts the last entry at the Nyquist\n frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,\n the value is thus treated as purely real. To avoid losing information, the\n shape of the full signal **must** be given.\n\n Examples\n --------\n >>> import numpy as np\n >>> signal = np.array([1, 2, 3, 4, 3, 2])\n >>> np.fft.fft(signal)\n array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary\n >>> np.fft.hfft(signal[:4]) # Input first half of signal\n array([15., -4., 0., -1., 0., -4.])\n >>> np.fft.hfft(signal, 6) # Input entire signal and truncate\n array([15., -4., 0., -1., 0., -4.])\n\n\n >>> signal = np.array([[1, 1.j], [-1.j, 2]])\n >>> np.conj(signal.T) - signal # check Hermitian symmetry\n array([[ 0.-0.j, -0.+0.j], # may vary\n [ 0.+0.j, 0.-0.j]])\n >>> freq_spectrum = np.fft.hfft(signal)\n >>> freq_spectrum\n array([[ 1., 1.],\n [ 2., -2.]])\n\n """\n a = asarray(a)\n if n is None:\n n = (a.shape[axis] - 1) * 2\n new_norm = _swap_direction(norm)\n output = irfft(conjugate(a), n, axis, norm=new_norm, out=None)\n return output\n\n\n@array_function_dispatch(_fft_dispatcher)\ndef ihfft(a, n=None, axis=-1, norm=None, out=None):\n """\n Compute the inverse FFT of a signal that has Hermitian symmetry.\n\n Parameters\n ----------\n a : array_like\n Input array.\n n : int, optional\n Length of the inverse FFT, the number of points along\n transformation axis in the input to use. If `n` is smaller than\n the length of the input, the input is cropped. If it is larger,\n the input is padded with zeros. If `n` is not given, the length of\n the input along the axis specified by `axis` is used.\n axis : int, optional\n Axis over which to compute the inverse FFT. If not given, the last\n axis is used.\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axis\n indicated by `axis`, or the last one if `axis` is not specified.\n The length of the transformed axis is ``n//2 + 1``.\n\n See also\n --------\n hfft, irfft\n\n Notes\n -----\n `hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the\n opposite case: here the signal has Hermitian symmetry in the time\n domain and is real in the frequency domain. So here it's `hfft` for\n which you must supply the length of the result if it is to be odd:\n\n * even: ``ihfft(hfft(a, 2*len(a) - 2)) == a``, within roundoff error,\n * odd: ``ihfft(hfft(a, 2*len(a) - 1)) == a``, within roundoff error.\n\n Examples\n --------\n >>> import numpy as np\n >>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])\n >>> np.fft.ifft(spectrum)\n array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary\n >>> np.fft.ihfft(spectrum)\n array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary\n\n """\n a = asarray(a)\n if n is None:\n n = a.shape[axis]\n new_norm = _swap_direction(norm)\n out = rfft(a, n, axis, norm=new_norm, out=out)\n return conjugate(out, out=out)\n\n\ndef _cook_nd_args(a, s=None, axes=None, invreal=0):\n if s is None:\n shapeless = True\n if axes is None:\n s = list(a.shape)\n else:\n s = take(a.shape, axes)\n else:\n shapeless = False\n s = list(s)\n if axes is None:\n if not shapeless:\n msg = ("`axes` should not be `None` if `s` is not `None` "\n "(Deprecated in NumPy 2.0). In a future version of NumPy, "\n "this will raise an error and `s[i]` will correspond to "\n "the size along the transformed axis specified by "\n "`axes[i]`. To retain current behaviour, pass a sequence "\n "[0, ..., k-1] to `axes` for an array of dimension k.")\n warnings.warn(msg, DeprecationWarning, stacklevel=3)\n axes = list(range(-len(s), 0))\n if len(s) != len(axes):\n raise ValueError("Shape and axes have different lengths.")\n if invreal and shapeless:\n s[-1] = (a.shape[axes[-1]] - 1) * 2\n if None in s:\n msg = ("Passing an array containing `None` values to `s` is "\n "deprecated in NumPy 2.0 and will raise an error in "\n "a future version of NumPy. To use the default behaviour "\n "of the corresponding 1-D transform, pass the value matching "\n "the default for its `n` parameter. To use the default "\n "behaviour for every axis, the `s` argument can be omitted.")\n warnings.warn(msg, DeprecationWarning, stacklevel=3)\n # use the whole input array along axis `i` if `s[i] == -1`\n s = [a.shape[_a] if _s == -1 else _s for _s, _a in zip(s, axes)]\n return s, axes\n\n\ndef _raw_fftnd(a, s=None, axes=None, function=fft, norm=None, out=None):\n a = asarray(a)\n s, axes = _cook_nd_args(a, s, axes)\n itl = list(range(len(axes)))\n itl.reverse()\n for ii in itl:\n a = function(a, n=s[ii], axis=axes[ii], norm=norm, out=out)\n return a\n\n\ndef _fftn_dispatcher(a, s=None, axes=None, norm=None, out=None):\n return (a, out)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef fftn(a, s=None, axes=None, norm=None, out=None):\n """\n Compute the N-dimensional discrete Fourier Transform.\n\n This function computes the *N*-dimensional discrete Fourier Transform over\n any number of axes in an *M*-dimensional array by means of the Fast Fourier\n Transform (FFT).\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex.\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n This corresponds to ``n`` for ``fft(x, n)``.\n Along any axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes specified\n by `axes` is used.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the FFT. If not given, the last ``len(s)``\n axes are used, or all axes if `s` is also not specified.\n Repeated indices in `axes` means that the transform over that axis is\n performed multiple times.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must be explicitly specified too.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for all axes (and hence is\n incompatible with passing in all but the trivial ``s``).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` and `a`,\n as explained in the parameters section above.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n ifftn : The inverse of `fftn`, the inverse *n*-dimensional FFT.\n fft : The one-dimensional FFT, with definitions and conventions used.\n rfftn : The *n*-dimensional FFT of real input.\n fft2 : The two-dimensional FFT.\n fftshift : Shifts zero-frequency terms to centre of array\n\n Notes\n -----\n The output, analogously to `fft`, contains the term for zero frequency in\n the low-order corner of all axes, the positive frequency terms in the\n first half of all axes, the term for the Nyquist frequency in the middle\n of all axes and the negative frequency terms in the second half of all\n axes, in order of decreasingly negative frequency.\n\n See `numpy.fft` for details, definitions and conventions used.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.mgrid[:3, :3, :3][0]\n >>> np.fft.fftn(a, axes=(1, 2))\n array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary\n [ 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\n [[ 9.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\n [[18.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j]]])\n >>> np.fft.fftn(a, (2, 2), axes=(0, 1))\n array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary\n [ 0.+0.j, 0.+0.j, 0.+0.j]],\n [[-2.+0.j, -2.+0.j, -2.+0.j],\n [ 0.+0.j, 0.+0.j, 0.+0.j]]])\n\n >>> import matplotlib.pyplot as plt\n >>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,\n ... 2 * np.pi * np.arange(200) / 34)\n >>> S = np.sin(X) + np.cos(Y) + np.random.uniform(0, 1, X.shape)\n >>> FS = np.fft.fftn(S)\n >>> plt.imshow(np.log(np.abs(np.fft.fftshift(FS))**2))\n <matplotlib.image.AxesImage object at 0x...>\n >>> plt.show()\n\n """\n return _raw_fftnd(a, s, axes, fft, norm, out=out)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef ifftn(a, s=None, axes=None, norm=None, out=None):\n """\n Compute the N-dimensional inverse discrete Fourier Transform.\n\n This function computes the inverse of the N-dimensional discrete\n Fourier Transform over any number of axes in an M-dimensional array by\n means of the Fast Fourier Transform (FFT). In other words,\n ``ifftn(fftn(a)) == a`` to within numerical accuracy.\n For a description of the definitions and conventions used, see `numpy.fft`.\n\n The input, analogously to `ifft`, should be ordered in the same way as is\n returned by `fftn`, i.e. it should have the term for zero frequency\n in all axes in the low-order corner, the positive frequency terms in the\n first half of all axes, the term for the Nyquist frequency in the middle\n of all axes and the negative frequency terms in the second half of all\n axes, in order of decreasingly negative frequency.\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex.\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n This corresponds to ``n`` for ``ifft(x, n)``.\n Along any axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes specified\n by `axes` is used. See notes for issue on `ifft` zero padding.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the IFFT. If not given, the last ``len(s)``\n axes are used, or all axes if `s` is also not specified.\n Repeated indices in `axes` means that the inverse transform over that\n axis is performed multiple times.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must be explicitly specified too.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for all axes (and hence is\n incompatible with passing in all but the trivial ``s``).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` or `a`,\n as explained in the parameters section above.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n fftn : The forward *n*-dimensional FFT, of which `ifftn` is the inverse.\n ifft : The one-dimensional inverse FFT.\n ifft2 : The two-dimensional inverse FFT.\n ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning\n of array.\n\n Notes\n -----\n See `numpy.fft` for definitions and conventions used.\n\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\n the input along the specified dimension. Although this is the common\n approach, it might lead to surprising results. If another form of zero\n padding is desired, it must be performed before `ifftn` is called.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.eye(4)\n >>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))\n array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary\n [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])\n\n\n Create and plot an image with band-limited frequency content:\n\n >>> import matplotlib.pyplot as plt\n >>> n = np.zeros((200,200), dtype=complex)\n >>> n[60:80, 20:40] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20, 20)))\n >>> im = np.fft.ifftn(n).real\n >>> plt.imshow(im)\n <matplotlib.image.AxesImage object at 0x...>\n >>> plt.show()\n\n """\n return _raw_fftnd(a, s, axes, ifft, norm, out=out)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef fft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n """\n Compute the 2-dimensional discrete Fourier Transform.\n\n This function computes the *n*-dimensional discrete Fourier Transform\n over any axes in an *M*-dimensional array by means of the\n Fast Fourier Transform (FFT). By default, the transform is computed over\n the last two axes of the input array, i.e., a 2-dimensional FFT.\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n This corresponds to ``n`` for ``fft(x, n)``.\n Along each axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes specified\n by `axes` is used.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the FFT. If not given, the last two\n axes are used. A repeated index in `axes` means the transform over\n that axis is performed multiple times. A one-element sequence means\n that a one-dimensional FFT is performed. Default: ``(-2, -1)``.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must not be ``None``.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for all axes (and hence only the\n last axis can have ``s`` not equal to the shape at that axis).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or the last two axes if `axes` is not given.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length, or `axes` not given and\n ``len(s) != 2``.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n ifft2 : The inverse two-dimensional FFT.\n fft : The one-dimensional FFT.\n fftn : The *n*-dimensional FFT.\n fftshift : Shifts zero-frequency terms to the center of the array.\n For two-dimensional input, swaps first and third quadrants, and second\n and fourth quadrants.\n\n Notes\n -----\n `fft2` is just `fftn` with a different default for `axes`.\n\n The output, analogously to `fft`, contains the term for zero frequency in\n the low-order corner of the transformed axes, the positive frequency terms\n in the first half of these axes, the term for the Nyquist frequency in the\n middle of the axes and the negative frequency terms in the second half of\n the axes, in order of decreasingly negative frequency.\n\n See `fftn` for details and a plotting example, and `numpy.fft` for\n definitions and conventions used.\n\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.mgrid[:5, :5][0]\n >>> np.fft.fft2(a)\n array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary\n 0. +0.j , 0. +0.j ],\n [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,\n 0. +0.j , 0. +0.j ],\n [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,\n 0. +0.j , 0. +0.j ],\n [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,\n 0. +0.j , 0. +0.j ],\n [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,\n 0. +0.j , 0. +0.j ]])\n\n """\n return _raw_fftnd(a, s, axes, fft, norm, out=out)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef ifft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n """\n Compute the 2-dimensional inverse discrete Fourier Transform.\n\n This function computes the inverse of the 2-dimensional discrete Fourier\n Transform over any number of axes in an M-dimensional array by means of\n the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(a)) == a``\n to within numerical accuracy. By default, the inverse transform is\n computed over the last two axes of the input array.\n\n The input, analogously to `ifft`, should be ordered in the same way as is\n returned by `fft2`, i.e. it should have the term for zero frequency\n in the low-order corner of the two axes, the positive frequency terms in\n the first half of these axes, the term for the Nyquist frequency in the\n middle of the axes and the negative frequency terms in the second half of\n both axes, in order of decreasingly negative frequency.\n\n Parameters\n ----------\n a : array_like\n Input array, can be complex.\n s : sequence of ints, optional\n Shape (length of each axis) of the output (``s[0]`` refers to axis 0,\n ``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.\n Along each axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes specified\n by `axes` is used. See notes for issue on `ifft` zero padding.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the FFT. If not given, the last two\n axes are used. A repeated index in `axes` means the transform over\n that axis is performed multiple times. A one-element sequence means\n that a one-dimensional FFT is performed. Default: ``(-2, -1)``.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must not be ``None``.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for all axes (and hence is\n incompatible with passing in all but the trivial ``s``).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or the last two axes if `axes` is not given.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length, or `axes` not given and\n ``len(s) != 2``.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n numpy.fft : Overall view of discrete Fourier transforms, with definitions\n and conventions used.\n fft2 : The forward 2-dimensional FFT, of which `ifft2` is the inverse.\n ifftn : The inverse of the *n*-dimensional FFT.\n fft : The one-dimensional FFT.\n ifft : The one-dimensional inverse FFT.\n\n Notes\n -----\n `ifft2` is just `ifftn` with a different default for `axes`.\n\n See `ifftn` for details and a plotting example, and `numpy.fft` for\n definition and conventions used.\n\n Zero-padding, analogously with `ifft`, is performed by appending zeros to\n the input along the specified dimension. Although this is the common\n approach, it might lead to surprising results. If another form of zero\n padding is desired, it must be performed before `ifft2` is called.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = 4 * np.eye(4)\n >>> np.fft.ifft2(a)\n array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary\n [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],\n [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],\n [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])\n\n """\n return _raw_fftnd(a, s, axes, ifft, norm, out=None)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef rfftn(a, s=None, axes=None, norm=None, out=None):\n """\n Compute the N-dimensional discrete Fourier Transform for real input.\n\n This function computes the N-dimensional discrete Fourier Transform over\n any number of axes in an M-dimensional real array by means of the Fast\n Fourier Transform (FFT). By default, all axes are transformed, with the\n real transform performed over the last axis, while the remaining\n transforms are complex.\n\n Parameters\n ----------\n a : array_like\n Input array, taken to be real.\n s : sequence of ints, optional\n Shape (length along each transformed axis) to use from the input.\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).\n The final element of `s` corresponds to `n` for ``rfft(x, n)``, while\n for the remaining axes, it corresponds to `n` for ``fft(x, n)``.\n Along any axis, if the given shape is smaller than that of the input,\n the input is cropped. If it is larger, the input is padded with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes specified\n by `axes` is used.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the FFT. If not given, the last ``len(s)``\n axes are used, or all axes if `s` is also not specified.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must be explicitly specified too.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for all axes (and hence is\n incompatible with passing in all but the trivial ``s``).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : complex ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` and `a`,\n as explained in the parameters section above.\n The length of the last axis transformed will be ``s[-1]//2+1``,\n while the remaining transformed axes will have lengths according to\n `s`, or unchanged from the input.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n irfftn : The inverse of `rfftn`, i.e. the inverse of the n-dimensional FFT\n of real input.\n fft : The one-dimensional FFT, with definitions and conventions used.\n rfft : The one-dimensional FFT of real input.\n fftn : The n-dimensional FFT.\n rfft2 : The two-dimensional FFT of real input.\n\n Notes\n -----\n The transform for real input is performed over the last transformation\n axis, as by `rfft`, then the transform over the remaining axes is\n performed as by `fftn`. The order of the output is as for `rfft` for the\n final transformation axis, and as for `fftn` for the remaining\n transformation axes.\n\n See `fft` for details, definitions and conventions used.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.ones((2, 2, 2))\n >>> np.fft.rfftn(a)\n array([[[8.+0.j, 0.+0.j], # may vary\n [0.+0.j, 0.+0.j]],\n [[0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j]]])\n\n >>> np.fft.rfftn(a, axes=(2, 0))\n array([[[4.+0.j, 0.+0.j], # may vary\n [4.+0.j, 0.+0.j]],\n [[0.+0.j, 0.+0.j],\n [0.+0.j, 0.+0.j]]])\n\n """\n a = asarray(a)\n s, axes = _cook_nd_args(a, s, axes)\n a = rfft(a, s[-1], axes[-1], norm, out=out)\n for ii in range(len(axes) - 2, -1, -1):\n a = fft(a, s[ii], axes[ii], norm, out=out)\n return a\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef rfft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n """\n Compute the 2-dimensional FFT of a real array.\n\n Parameters\n ----------\n a : array\n Input array, taken to be real.\n s : sequence of ints, optional\n Shape of the FFT.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the FFT. Default: ``(-2, -1)``.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must not be ``None``.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : complex ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for the last inverse transform.\n incompatible with passing in all but the trivial ``s``).\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n The result of the real 2-D FFT.\n\n See Also\n --------\n rfftn : Compute the N-dimensional discrete Fourier Transform for real\n input.\n\n Notes\n -----\n This is really just `rfftn` with different default behavior.\n For more details see `rfftn`.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.mgrid[:5, :5][0]\n >>> np.fft.rfft2(a)\n array([[ 50. +0.j , 0. +0.j , 0. +0.j ],\n [-12.5+17.20477401j, 0. +0.j , 0. +0.j ],\n [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ],\n [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ],\n [-12.5-17.20477401j, 0. +0.j , 0. +0.j ]])\n """\n return rfftn(a, s, axes, norm, out=out)\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef irfftn(a, s=None, axes=None, norm=None, out=None):\n """\n Computes the inverse of `rfftn`.\n\n This function computes the inverse of the N-dimensional discrete\n Fourier Transform for real input over any number of axes in an\n M-dimensional array by means of the Fast Fourier Transform (FFT). In\n other words, ``irfftn(rfftn(a), a.shape) == a`` to within numerical\n accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,\n and for the same reason.)\n\n The input should be ordered in the same way as is returned by `rfftn`,\n i.e. as for `irfft` for the final transformation axis, and as for `ifftn`\n along all the other axes.\n\n Parameters\n ----------\n a : array_like\n Input array.\n s : sequence of ints, optional\n Shape (length of each transformed axis) of the output\n (``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the\n number of input points used along this axis, except for the last axis,\n where ``s[-1]//2+1`` points of the input are used.\n Along any axis, if the shape indicated by `s` is smaller than that of\n the input, the input is cropped. If it is larger, the input is padded\n with zeros.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n If `s` is not given, the shape of the input along the axes\n specified by axes is used. Except for the last axis which is taken to\n be ``2*(m-1)`` where ``m`` is the length of the input along that axis.\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n Axes over which to compute the inverse FFT. If not given, the last\n `len(s)` axes are used, or all axes if `s` is also not specified.\n Repeated indices in `axes` means that the inverse transform over that\n axis is performed multiple times.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must be explicitly specified too.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for the last transformation.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n The truncated or zero-padded input, transformed along the axes\n indicated by `axes`, or by a combination of `s` or `a`,\n as explained in the parameters section above.\n The length of each transformed axis is as given by the corresponding\n element of `s`, or the length of the input in every axis except for the\n last one if `s` is not given. In the final transformed axis the length\n of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the\n length of the final transformed axis of the input. To get an odd\n number of output points in the final axis, `s` must be specified.\n\n Raises\n ------\n ValueError\n If `s` and `axes` have different length.\n IndexError\n If an element of `axes` is larger than than the number of axes of `a`.\n\n See Also\n --------\n rfftn : The forward n-dimensional FFT of real input,\n of which `ifftn` is the inverse.\n fft : The one-dimensional FFT, with definitions and conventions used.\n irfft : The inverse of the one-dimensional FFT of real input.\n irfft2 : The inverse of the two-dimensional FFT of real input.\n\n Notes\n -----\n See `fft` for definitions and conventions used.\n\n See `rfft` for definitions and conventions used for real input.\n\n The correct interpretation of the hermitian input depends on the shape of\n the original data, as given by `s`. This is because each input shape could\n correspond to either an odd or even length signal. By default, `irfftn`\n assumes an even output length which puts the last entry at the Nyquist\n frequency; aliasing with its symmetric counterpart. When performing the\n final complex to real transform, the last value is thus treated as purely\n real. To avoid losing information, the correct shape of the real input\n **must** be given.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.zeros((3, 2, 2))\n >>> a[0, 0, 0] = 3 * 2 * 2\n >>> np.fft.irfftn(a)\n array([[[1., 1.],\n [1., 1.]],\n [[1., 1.],\n [1., 1.]],\n [[1., 1.],\n [1., 1.]]])\n\n """\n a = asarray(a)\n s, axes = _cook_nd_args(a, s, axes, invreal=1)\n for ii in range(len(axes) - 1):\n a = ifft(a, s[ii], axes[ii], norm)\n a = irfft(a, s[-1], axes[-1], norm, out=out)\n return a\n\n\n@array_function_dispatch(_fftn_dispatcher)\ndef irfft2(a, s=None, axes=(-2, -1), norm=None, out=None):\n """\n Computes the inverse of `rfft2`.\n\n Parameters\n ----------\n a : array_like\n The input array\n s : sequence of ints, optional\n Shape of the real output to the inverse FFT.\n\n .. versionchanged:: 2.0\n\n If it is ``-1``, the whole input is used (no padding/trimming).\n\n .. deprecated:: 2.0\n\n If `s` is not ``None``, `axes` must not be ``None`` either.\n\n .. deprecated:: 2.0\n\n `s` must contain only ``int`` s, not ``None`` values. ``None``\n values currently mean that the default value for ``n`` is used\n in the corresponding 1-D transform, but this behaviour is\n deprecated.\n\n axes : sequence of ints, optional\n The axes over which to compute the inverse fft.\n Default: ``(-2, -1)``, the last two axes.\n\n .. deprecated:: 2.0\n\n If `s` is specified, the corresponding `axes` to be transformed\n must not be ``None``.\n\n norm : {"backward", "ortho", "forward"}, optional\n Normalization mode (see `numpy.fft`). Default is "backward".\n Indicates which direction of the forward/backward pair of transforms\n is scaled and with what normalization factor.\n\n .. versionadded:: 1.20.0\n\n The "backward", "forward" values were added.\n\n out : ndarray, optional\n If provided, the result will be placed in this array. It should be\n of the appropriate shape and dtype for the last transformation.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n The result of the inverse real 2-D FFT.\n\n See Also\n --------\n rfft2 : The forward two-dimensional FFT of real input,\n of which `irfft2` is the inverse.\n rfft : The one-dimensional FFT for real input.\n irfft : The inverse of the one-dimensional FFT of real input.\n irfftn : Compute the inverse of the N-dimensional FFT of real input.\n\n Notes\n -----\n This is really `irfftn` with different defaults.\n For more details see `irfftn`.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.mgrid[:5, :5][0]\n >>> A = np.fft.rfft2(a)\n >>> np.fft.irfft2(A, s=a.shape)\n array([[0., 0., 0., 0., 0.],\n [1., 1., 1., 1., 1.],\n [2., 2., 2., 2., 2.],\n [3., 3., 3., 3., 3.],\n [4., 4., 4., 4., 4.]])\n """\n return irfftn(a, s, axes, norm, out=None)\n | .venv\Lib\site-packages\numpy\fft\_pocketfft.py | _pocketfft.py | Python | 64,291 | 0.75 | 0.100413 | 0.011086 | python-kit | 40 | 2023-09-18T13:55:18.852152 | Apache-2.0 | false | 3fa1f49a2447276e2f7a8fa839c5df0b |
from collections.abc import Sequence\nfrom typing import Literal as L\nfrom typing import TypeAlias\n\nfrom numpy import complex128, float64\nfrom numpy._typing import ArrayLike, NDArray, _ArrayLikeNumber_co\n\n__all__ = [\n "fft",\n "ifft",\n "rfft",\n "irfft",\n "hfft",\n "ihfft",\n "rfftn",\n "irfftn",\n "rfft2",\n "irfft2",\n "fft2",\n "ifft2",\n "fftn",\n "ifftn",\n]\n\n_NormKind: TypeAlias = L["backward", "ortho", "forward"] | None\n\ndef fft(\n a: ArrayLike,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef ifft(\n a: ArrayLike,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef rfft(\n a: ArrayLike,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef irfft(\n a: ArrayLike,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[float64] | None = ...,\n) -> NDArray[float64]: ...\n\n# Input array must be compatible with `np.conjugate`\ndef hfft(\n a: _ArrayLikeNumber_co,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[float64] | None = ...,\n) -> NDArray[float64]: ...\n\ndef ihfft(\n a: ArrayLike,\n n: int | None = ...,\n axis: int = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef fftn(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef ifftn(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef rfftn(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef irfftn(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[float64] | None = ...,\n) -> NDArray[float64]: ...\n\ndef fft2(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef ifft2(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef rfft2(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[complex128] | None = ...,\n) -> NDArray[complex128]: ...\n\ndef irfft2(\n a: ArrayLike,\n s: Sequence[int] | None = ...,\n axes: Sequence[int] | None = ...,\n norm: _NormKind = ...,\n out: NDArray[float64] | None = ...,\n) -> NDArray[float64]: ...\n | .venv\Lib\site-packages\numpy\fft\_pocketfft.pyi | _pocketfft.pyi | Other | 3,312 | 0.95 | 0.101449 | 0.008264 | vue-tools | 305 | 2024-11-01T04:33:39.259867 | BSD-3-Clause | false | b37eca93a5eeed3fa8f3c8bb6a0192dd |
!<arch>\n/ -1 0 206 `\n | .venv\Lib\site-packages\numpy\fft\_pocketfft_umath.cp313-win_amd64.lib | _pocketfft_umath.cp313-win_amd64.lib | Other | 2,176 | 0.8 | 0 | 0 | vue-tools | 789 | 2025-06-06T02:32:07.530046 | GPL-3.0 | false | 0962ba8c17e0105a7730e50cef750b32 |
"""\nDiscrete Fourier Transform\n==========================\n\n.. currentmodule:: numpy.fft\n\nThe SciPy module `scipy.fft` is a more comprehensive superset\nof `numpy.fft`, which includes only a basic set of routines.\n\nStandard FFTs\n-------------\n\n.. autosummary::\n :toctree: generated/\n\n fft Discrete Fourier transform.\n ifft Inverse discrete Fourier transform.\n fft2 Discrete Fourier transform in two dimensions.\n ifft2 Inverse discrete Fourier transform in two dimensions.\n fftn Discrete Fourier transform in N-dimensions.\n ifftn Inverse discrete Fourier transform in N dimensions.\n\nReal FFTs\n---------\n\n.. autosummary::\n :toctree: generated/\n\n rfft Real discrete Fourier transform.\n irfft Inverse real discrete Fourier transform.\n rfft2 Real discrete Fourier transform in two dimensions.\n irfft2 Inverse real discrete Fourier transform in two dimensions.\n rfftn Real discrete Fourier transform in N dimensions.\n irfftn Inverse real discrete Fourier transform in N dimensions.\n\nHermitian FFTs\n--------------\n\n.. autosummary::\n :toctree: generated/\n\n hfft Hermitian discrete Fourier transform.\n ihfft Inverse Hermitian discrete Fourier transform.\n\nHelper routines\n---------------\n\n.. autosummary::\n :toctree: generated/\n\n fftfreq Discrete Fourier Transform sample frequencies.\n rfftfreq DFT sample frequencies (for usage with rfft, irfft).\n fftshift Shift zero-frequency component to center of spectrum.\n ifftshift Inverse of fftshift.\n\n\nBackground information\n----------------------\n\nFourier analysis is fundamentally a method for expressing a function as a\nsum of periodic components, and for recovering the function from those\ncomponents. When both the function and its Fourier transform are\nreplaced with discretized counterparts, it is called the discrete Fourier\ntransform (DFT). The DFT has become a mainstay of numerical computing in\npart because of a very fast algorithm for computing it, called the Fast\nFourier Transform (FFT), which was known to Gauss (1805) and was brought\nto light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_\nprovide an accessible introduction to Fourier analysis and its\napplications.\n\nBecause the discrete Fourier transform separates its input into\ncomponents that contribute at discrete frequencies, it has a great number\nof applications in digital signal processing, e.g., for filtering, and in\nthis context the discretized input to the transform is customarily\nreferred to as a *signal*, which exists in the *time domain*. The output\nis called a *spectrum* or *transform* and exists in the *frequency\ndomain*.\n\nImplementation details\n----------------------\n\nThere are many ways to define the DFT, varying in the sign of the\nexponent, normalization, etc. In this implementation, the DFT is defined\nas\n\n.. math::\n A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}\n \\qquad k = 0,\\ldots,n-1.\n\nThe DFT is in general defined for complex inputs and outputs, and a\nsingle-frequency component at linear frequency :math:`f` is\nrepresented by a complex exponential\n:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`\nis the sampling interval.\n\nThe values in the result follow so-called "standard" order: If ``A =\nfft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of\nthe signal), which is always purely real for real inputs. Then ``A[1:n/2]``\ncontains the positive-frequency terms, and ``A[n/2+1:]`` contains the\nnegative-frequency terms, in order of decreasingly negative frequency.\nFor an even number of input points, ``A[n/2]`` represents both positive and\nnegative Nyquist frequency, and is also purely real for real input. For\nan odd number of input points, ``A[(n-1)/2]`` contains the largest positive\nfrequency, while ``A[(n+1)/2]`` contains the largest negative frequency.\nThe routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies\nof corresponding elements in the output. The routine\n``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the\nzero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes\nthat shift.\n\nWhen the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``\nis its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.\nThe phase spectrum is obtained by ``np.angle(A)``.\n\nThe inverse DFT is defined as\n\n.. math::\n a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}\n \\qquad m = 0,\\ldots,n-1.\n\nIt differs from the forward transform by the sign of the exponential\nargument and the default normalization by :math:`1/n`.\n\nType Promotion\n--------------\n\n`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and\n``complex128`` arrays respectively. For an FFT implementation that does not\npromote input arrays, see `scipy.fftpack`.\n\nNormalization\n-------------\n\nThe argument ``norm`` indicates which direction of the pair of direct/inverse\ntransforms is scaled and with what normalization factor.\nThe default normalization (``"backward"``) has the direct (forward) transforms\nunscaled and the inverse (backward) transforms scaled by :math:`1/n`. It is\npossible to obtain unitary transforms by setting the keyword argument ``norm``\nto ``"ortho"`` so that both direct and inverse transforms are scaled by\n:math:`1/\\sqrt{n}`. Finally, setting the keyword argument ``norm`` to\n``"forward"`` has the direct transforms scaled by :math:`1/n` and the inverse\ntransforms unscaled (i.e. exactly opposite to the default ``"backward"``).\n`None` is an alias of the default option ``"backward"`` for backward\ncompatibility.\n\nReal and Hermitian transforms\n-----------------------------\n\nWhen the input is purely real, its transform is Hermitian, i.e., the\ncomponent at frequency :math:`f_k` is the complex conjugate of the\ncomponent at frequency :math:`-f_k`, which means that for real\ninputs there is no information in the negative frequency components that\nis not already available from the positive frequency components.\nThe family of `rfft` functions is\ndesigned to operate on real inputs, and exploits this symmetry by\ncomputing only the positive frequency components, up to and including the\nNyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex\noutput points. The inverses of this family assumes the same symmetry of\nits input, and for an output of ``n`` points uses ``n/2+1`` input points.\n\nCorrespondingly, when the spectrum is purely real, the signal is\nHermitian. The `hfft` family of functions exploits this symmetry by\nusing ``n/2+1`` complex points in the input (time) domain for ``n`` real\npoints in the frequency domain.\n\nIn higher dimensions, FFTs are used, e.g., for image analysis and\nfiltering. The computational efficiency of the FFT means that it can\nalso be a faster way to compute large convolutions, using the property\nthat a convolution in the time domain is equivalent to a point-by-point\nmultiplication in the frequency domain.\n\nHigher dimensions\n-----------------\n\nIn two dimensions, the DFT is defined as\n\n.. math::\n A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}\n a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}\n \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,\n\nwhich extends in the obvious way to higher dimensions, and the inverses\nin higher dimensions also extend in the same way.\n\nReferences\n----------\n\n.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the\n machine calculation of complex Fourier series," *Math. Comput.*\n 19: 297-301.\n\n.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,\n 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.\n 12-13. Cambridge Univ. Press, Cambridge, UK.\n\nExamples\n--------\n\nFor examples, see the various functions.\n\n"""\n\n# TODO: `numpy.fft.helper`` was deprecated in NumPy 2.0. It should\n# be deleted once downstream libraries move to `numpy.fft`.\nfrom . import _helper, _pocketfft, helper\nfrom ._helper import *\nfrom ._pocketfft import *\n\n__all__ = _pocketfft.__all__.copy() # noqa: PLE0605\n__all__ += _helper.__all__\n\nfrom numpy._pytesttester import PytestTester\n\ntest = PytestTester(__name__)\ndel PytestTester\n | .venv\Lib\site-packages\numpy\fft\__init__.py | __init__.py | Python | 8,506 | 0.95 | 0.083721 | 0.012048 | awesome-app | 60 | 2024-10-04T22:15:39.149395 | Apache-2.0 | false | c94471751c52c78cd0bd265bc36dcc1c |
from ._helper import (\n fftfreq,\n fftshift,\n ifftshift,\n rfftfreq,\n)\nfrom ._pocketfft import (\n fft,\n fft2,\n fftn,\n hfft,\n ifft,\n ifft2,\n ifftn,\n ihfft,\n irfft,\n irfft2,\n irfftn,\n rfft,\n rfft2,\n rfftn,\n)\n\n__all__ = [\n "fft",\n "ifft",\n "rfft",\n "irfft",\n "hfft",\n "ihfft",\n "rfftn",\n "irfftn",\n "rfft2",\n "irfft2",\n "fft2",\n "ifft2",\n "fftn",\n "ifftn",\n "fftshift",\n "ifftshift",\n "fftfreq",\n "rfftfreq",\n]\n | .venv\Lib\site-packages\numpy\fft\__init__.pyi | __init__.pyi | Other | 557 | 0.85 | 0 | 0 | node-utils | 256 | 2023-12-26T18:05:00.696666 | Apache-2.0 | false | e39142f6802fd14b323791247d20f0f6 |
"""Test functions for fftpack.helper module\n\nCopied from fftpack.helper by Pearu Peterson, October 2005\n\n"""\nimport numpy as np\nfrom numpy import fft, pi\nfrom numpy.testing import assert_array_almost_equal\n\n\nclass TestFFTShift:\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\n y = [-4, -3, -2, -1, 0, 1, 2, 3, 4]\n assert_array_almost_equal(fft.fftshift(x), y)\n assert_array_almost_equal(fft.ifftshift(y), x)\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\n y = [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4]\n assert_array_almost_equal(fft.fftshift(x), y)\n assert_array_almost_equal(fft.ifftshift(y), x)\n\n def test_inverse(self):\n for n in [1, 4, 9, 100, 211]:\n x = np.random.random((n,))\n assert_array_almost_equal(fft.ifftshift(fft.fftshift(x)), x)\n\n def test_axes_keyword(self):\n freqs = [[0, 1, 2], [3, 4, -4], [-3, -2, -1]]\n shifted = [[-1, -3, -2], [2, 0, 1], [-4, 3, 4]]\n assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shifted)\n assert_array_almost_equal(fft.fftshift(freqs, axes=0),\n fft.fftshift(freqs, axes=(0,)))\n assert_array_almost_equal(fft.ifftshift(shifted, axes=(0, 1)), freqs)\n assert_array_almost_equal(fft.ifftshift(shifted, axes=0),\n fft.ifftshift(shifted, axes=(0,)))\n\n assert_array_almost_equal(fft.fftshift(freqs), shifted)\n assert_array_almost_equal(fft.ifftshift(shifted), freqs)\n\n def test_uneven_dims(self):\n """ Test 2D input, which has uneven dimension sizes """\n freqs = [\n [0, 1],\n [2, 3],\n [4, 5]\n ]\n\n # shift in dimension 0\n shift_dim0 = [\n [4, 5],\n [0, 1],\n [2, 3]\n ]\n assert_array_almost_equal(fft.fftshift(freqs, axes=0), shift_dim0)\n assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=0), freqs)\n assert_array_almost_equal(fft.fftshift(freqs, axes=(0,)), shift_dim0)\n assert_array_almost_equal(fft.ifftshift(shift_dim0, axes=[0]), freqs)\n\n # shift in dimension 1\n shift_dim1 = [\n [1, 0],\n [3, 2],\n [5, 4]\n ]\n assert_array_almost_equal(fft.fftshift(freqs, axes=1), shift_dim1)\n assert_array_almost_equal(fft.ifftshift(shift_dim1, axes=1), freqs)\n\n # shift in both dimensions\n shift_dim_both = [\n [5, 4],\n [1, 0],\n [3, 2]\n ]\n assert_array_almost_equal(fft.fftshift(freqs, axes=(0, 1)), shift_dim_both)\n assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=(0, 1)), freqs)\n assert_array_almost_equal(fft.fftshift(freqs, axes=[0, 1]), shift_dim_both)\n assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=[0, 1]), freqs)\n\n # axes=None (default) shift in all dimensions\n assert_array_almost_equal(fft.fftshift(freqs, axes=None), shift_dim_both)\n assert_array_almost_equal(fft.ifftshift(shift_dim_both, axes=None), freqs)\n assert_array_almost_equal(fft.fftshift(freqs), shift_dim_both)\n assert_array_almost_equal(fft.ifftshift(shift_dim_both), freqs)\n\n def test_equal_to_original(self):\n """ Test the new (>=v1.15) and old implementations are equal (see #10073) """\n from numpy._core import arange, asarray, concatenate, take\n\n def original_fftshift(x, axes=None):\n """ How fftshift was implemented in v1.14"""\n tmp = asarray(x)\n ndim = tmp.ndim\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, int):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = (n + 1) // 2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n def original_ifftshift(x, axes=None):\n """ How ifftshift was implemented in v1.14 """\n tmp = asarray(x)\n ndim = tmp.ndim\n if axes is None:\n axes = list(range(ndim))\n elif isinstance(axes, int):\n axes = (axes,)\n y = tmp\n for k in axes:\n n = tmp.shape[k]\n p2 = n - (n + 1) // 2\n mylist = concatenate((arange(p2, n), arange(p2)))\n y = take(y, mylist, k)\n return y\n\n # create possible 2d array combinations and try all possible keywords\n # compare output to original functions\n for i in range(16):\n for j in range(16):\n for axes_keyword in [0, 1, None, (0,), (0, 1)]:\n inp = np.random.rand(i, j)\n\n assert_array_almost_equal(fft.fftshift(inp, axes_keyword),\n original_fftshift(inp, axes_keyword))\n\n assert_array_almost_equal(fft.ifftshift(inp, axes_keyword),\n original_ifftshift(inp, axes_keyword))\n\n\nclass TestFFTFreq:\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4, -4, -3, -2, -1]\n assert_array_almost_equal(9 * fft.fftfreq(9), x)\n assert_array_almost_equal(9 * pi * fft.fftfreq(9, pi), x)\n x = [0, 1, 2, 3, 4, -5, -4, -3, -2, -1]\n assert_array_almost_equal(10 * fft.fftfreq(10), x)\n assert_array_almost_equal(10 * pi * fft.fftfreq(10, pi), x)\n\n\nclass TestRFFTFreq:\n\n def test_definition(self):\n x = [0, 1, 2, 3, 4]\n assert_array_almost_equal(9 * fft.rfftfreq(9), x)\n assert_array_almost_equal(9 * pi * fft.rfftfreq(9, pi), x)\n x = [0, 1, 2, 3, 4, 5]\n assert_array_almost_equal(10 * fft.rfftfreq(10), x)\n assert_array_almost_equal(10 * pi * fft.rfftfreq(10, pi), x)\n\n\nclass TestIRFFTN:\n\n def test_not_last_axis_success(self):\n ar, ai = np.random.random((2, 16, 8, 32))\n a = ar + 1j * ai\n\n axes = (-2,)\n\n # Should not raise error\n fft.irfftn(a, axes=axes)\n | .venv\Lib\site-packages\numpy\fft\tests\test_helper.py | test_helper.py | Python | 6,321 | 0.95 | 0.143713 | 0.051095 | node-utils | 499 | 2023-08-10T19:49:28.513738 | GPL-3.0 | true | 2ed1e7034405377d2d33a2245c5e758d |
import queue\nimport threading\n\nimport pytest\n\nimport numpy as np\nfrom numpy.random import random\nfrom numpy.testing import IS_WASM, assert_allclose, assert_array_equal, assert_raises\n\n\ndef fft1(x):\n L = len(x)\n phase = -2j * np.pi * (np.arange(L) / L)\n phase = np.arange(L).reshape(-1, 1) * phase\n return np.sum(x * np.exp(phase), axis=1)\n\n\nclass TestFFTShift:\n\n def test_fft_n(self):\n assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)\n\n\nclass TestFFT1D:\n\n def test_identity(self):\n maxlen = 512\n x = random(maxlen) + 1j * random(maxlen)\n xr = random(maxlen)\n for i in range(1, maxlen):\n assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],\n atol=1e-12)\n assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]), i),\n xr[0:i], atol=1e-12)\n\n @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble])\n def test_identity_long_short(self, dtype):\n # Test with explicitly given number of points, both for n\n # smaller and for n larger than the input size.\n maxlen = 16\n atol = 5 * np.spacing(np.array(1., dtype=dtype))\n x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype)\n xx = np.concatenate([x, np.zeros_like(x)])\n xr = random(maxlen).astype(dtype)\n xxr = np.concatenate([xr, np.zeros_like(xr)])\n for i in range(1, maxlen * 2):\n check_c = np.fft.ifft(np.fft.fft(x, n=i), n=i)\n assert check_c.real.dtype == dtype\n assert_allclose(check_c, xx[0:i], atol=atol, rtol=0)\n check_r = np.fft.irfft(np.fft.rfft(xr, n=i), n=i)\n assert check_r.dtype == dtype\n assert_allclose(check_r, xxr[0:i], atol=atol, rtol=0)\n\n @pytest.mark.parametrize("dtype", [np.single, np.double, np.longdouble])\n def test_identity_long_short_reversed(self, dtype):\n # Also test explicitly given number of points in reversed order.\n maxlen = 16\n atol = 5 * np.spacing(np.array(1., dtype=dtype))\n x = random(maxlen).astype(dtype) + 1j * random(maxlen).astype(dtype)\n xx = np.concatenate([x, np.zeros_like(x)])\n for i in range(1, maxlen * 2):\n check_via_c = np.fft.fft(np.fft.ifft(x, n=i), n=i)\n assert check_via_c.dtype == x.dtype\n assert_allclose(check_via_c, xx[0:i], atol=atol, rtol=0)\n # For irfft, we can neither recover the imaginary part of\n # the first element, nor the imaginary part of the last\n # element if npts is even. So, set to 0 for the comparison.\n y = x.copy()\n n = i // 2 + 1\n y.imag[0] = 0\n if i % 2 == 0:\n y.imag[n - 1:] = 0\n yy = np.concatenate([y, np.zeros_like(y)])\n check_via_r = np.fft.rfft(np.fft.irfft(x, n=i), n=i)\n assert check_via_r.dtype == x.dtype\n assert_allclose(check_via_r, yy[0:n], atol=atol, rtol=0)\n\n def test_fft(self):\n x = random(30) + 1j * random(30)\n assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)\n assert_allclose(fft1(x), np.fft.fft(x, norm="backward"), atol=1e-6)\n assert_allclose(fft1(x) / np.sqrt(30),\n np.fft.fft(x, norm="ortho"), atol=1e-6)\n assert_allclose(fft1(x) / 30.,\n np.fft.fft(x, norm="forward"), atol=1e-6)\n\n @pytest.mark.parametrize("axis", (0, 1))\n @pytest.mark.parametrize("dtype", (complex, float))\n @pytest.mark.parametrize("transpose", (True, False))\n def test_fft_out_argument(self, dtype, transpose, axis):\n def zeros_like(x):\n if transpose:\n return np.zeros_like(x.T).T\n else:\n return np.zeros_like(x)\n\n # tests below only test the out parameter\n if dtype is complex:\n y = random((10, 20)) + 1j * random((10, 20))\n fft, ifft = np.fft.fft, np.fft.ifft\n else:\n y = random((10, 20))\n fft, ifft = np.fft.rfft, np.fft.irfft\n\n expected = fft(y, axis=axis)\n out = zeros_like(expected)\n result = fft(y, out=out, axis=axis)\n assert result is out\n assert_array_equal(result, expected)\n\n expected2 = ifft(expected, axis=axis)\n out2 = out if dtype is complex else zeros_like(expected2)\n result2 = ifft(out, out=out2, axis=axis)\n assert result2 is out2\n assert_array_equal(result2, expected2)\n\n @pytest.mark.parametrize("axis", [0, 1])\n def test_fft_inplace_out(self, axis):\n # Test some weirder in-place combinations\n y = random((20, 20)) + 1j * random((20, 20))\n # Fully in-place.\n y1 = y.copy()\n expected1 = np.fft.fft(y1, axis=axis)\n result1 = np.fft.fft(y1, axis=axis, out=y1)\n assert result1 is y1\n assert_array_equal(result1, expected1)\n # In-place of part of the array; rest should be unchanged.\n y2 = y.copy()\n out2 = y2[:10] if axis == 0 else y2[:, :10]\n expected2 = np.fft.fft(y2, n=10, axis=axis)\n result2 = np.fft.fft(y2, n=10, axis=axis, out=out2)\n assert result2 is out2\n assert_array_equal(result2, expected2)\n if axis == 0:\n assert_array_equal(y2[10:], y[10:])\n else:\n assert_array_equal(y2[:, 10:], y[:, 10:])\n # In-place of another part of the array.\n y3 = y.copy()\n y3_sel = y3[5:] if axis == 0 else y3[:, 5:]\n out3 = y3[5:15] if axis == 0 else y3[:, 5:15]\n expected3 = np.fft.fft(y3_sel, n=10, axis=axis)\n result3 = np.fft.fft(y3_sel, n=10, axis=axis, out=out3)\n assert result3 is out3\n assert_array_equal(result3, expected3)\n if axis == 0:\n assert_array_equal(y3[:5], y[:5])\n assert_array_equal(y3[15:], y[15:])\n else:\n assert_array_equal(y3[:, :5], y[:, :5])\n assert_array_equal(y3[:, 15:], y[:, 15:])\n # In-place with n > nin; rest should be unchanged.\n y4 = y.copy()\n y4_sel = y4[:10] if axis == 0 else y4[:, :10]\n out4 = y4[:15] if axis == 0 else y4[:, :15]\n expected4 = np.fft.fft(y4_sel, n=15, axis=axis)\n result4 = np.fft.fft(y4_sel, n=15, axis=axis, out=out4)\n assert result4 is out4\n assert_array_equal(result4, expected4)\n if axis == 0:\n assert_array_equal(y4[15:], y[15:])\n else:\n assert_array_equal(y4[:, 15:], y[:, 15:])\n # Overwrite in a transpose.\n y5 = y.copy()\n out5 = y5.T\n result5 = np.fft.fft(y5, axis=axis, out=out5)\n assert result5 is out5\n assert_array_equal(result5, expected1)\n # Reverse strides.\n y6 = y.copy()\n out6 = y6[::-1] if axis == 0 else y6[:, ::-1]\n result6 = np.fft.fft(y6, axis=axis, out=out6)\n assert result6 is out6\n assert_array_equal(result6, expected1)\n\n def test_fft_bad_out(self):\n x = np.arange(30.)\n with pytest.raises(TypeError, match="must be of ArrayType"):\n np.fft.fft(x, out="")\n with pytest.raises(ValueError, match="has wrong shape"):\n np.fft.fft(x, out=np.zeros_like(x).reshape(5, -1))\n with pytest.raises(TypeError, match="Cannot cast"):\n np.fft.fft(x, out=np.zeros_like(x, dtype=float))\n\n @pytest.mark.parametrize('norm', (None, 'backward', 'ortho', 'forward'))\n def test_ifft(self, norm):\n x = random(30) + 1j * random(30)\n assert_allclose(\n x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),\n atol=1e-6)\n # Ensure we get the correct error message\n with pytest.raises(ValueError,\n match='Invalid number of FFT data points'):\n np.fft.ifft([], norm=norm)\n\n def test_fft2(self):\n x = random((30, 20)) + 1j * random((30, 20))\n assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),\n np.fft.fft2(x), atol=1e-6)\n assert_allclose(np.fft.fft2(x),\n np.fft.fft2(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),\n np.fft.fft2(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.fft2(x) / (30. * 20.),\n np.fft.fft2(x, norm="forward"), atol=1e-6)\n\n def test_ifft2(self):\n x = random((30, 20)) + 1j * random((30, 20))\n assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),\n np.fft.ifft2(x), atol=1e-6)\n assert_allclose(np.fft.ifft2(x),\n np.fft.ifft2(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),\n np.fft.ifft2(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.ifft2(x) * (30. * 20.),\n np.fft.ifft2(x, norm="forward"), atol=1e-6)\n\n def test_fftn(self):\n x = random((30, 20, 10)) + 1j * random((30, 20, 10))\n assert_allclose(\n np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),\n np.fft.fftn(x), atol=1e-6)\n assert_allclose(np.fft.fftn(x),\n np.fft.fftn(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),\n np.fft.fftn(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.fftn(x) / (30. * 20. * 10.),\n np.fft.fftn(x, norm="forward"), atol=1e-6)\n\n def test_ifftn(self):\n x = random((30, 20, 10)) + 1j * random((30, 20, 10))\n assert_allclose(\n np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),\n np.fft.ifftn(x), atol=1e-6)\n assert_allclose(np.fft.ifftn(x),\n np.fft.ifftn(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),\n np.fft.ifftn(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.ifftn(x) * (30. * 20. * 10.),\n np.fft.ifftn(x, norm="forward"), atol=1e-6)\n\n def test_rfft(self):\n x = random(30)\n for n in [x.size, 2 * x.size]:\n for norm in [None, 'backward', 'ortho', 'forward']:\n assert_allclose(\n np.fft.fft(x, n=n, norm=norm)[:(n // 2 + 1)],\n np.fft.rfft(x, n=n, norm=norm), atol=1e-6)\n assert_allclose(\n np.fft.rfft(x, n=n),\n np.fft.rfft(x, n=n, norm="backward"), atol=1e-6)\n assert_allclose(\n np.fft.rfft(x, n=n) / np.sqrt(n),\n np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)\n assert_allclose(\n np.fft.rfft(x, n=n) / n,\n np.fft.rfft(x, n=n, norm="forward"), atol=1e-6)\n\n def test_rfft_even(self):\n x = np.arange(8)\n n = 4\n y = np.fft.rfft(x, n)\n assert_allclose(y, np.fft.fft(x[:n])[:n // 2 + 1], rtol=1e-14)\n\n def test_rfft_odd(self):\n x = np.array([1, 0, 2, 3, -3])\n y = np.fft.rfft(x)\n assert_allclose(y, np.fft.fft(x)[:3], rtol=1e-14)\n\n def test_irfft(self):\n x = random(30)\n assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)\n assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="backward"),\n norm="backward"), atol=1e-6)\n assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="ortho"),\n norm="ortho"), atol=1e-6)\n assert_allclose(x, np.fft.irfft(np.fft.rfft(x, norm="forward"),\n norm="forward"), atol=1e-6)\n\n def test_rfft2(self):\n x = random((30, 20))\n assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)\n assert_allclose(np.fft.rfft2(x),\n np.fft.rfft2(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),\n np.fft.rfft2(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.rfft2(x) / (30. * 20.),\n np.fft.rfft2(x, norm="forward"), atol=1e-6)\n\n def test_irfft2(self):\n x = random((30, 20))\n assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)\n assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="backward"),\n norm="backward"), atol=1e-6)\n assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"),\n norm="ortho"), atol=1e-6)\n assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x, norm="forward"),\n norm="forward"), atol=1e-6)\n\n def test_rfftn(self):\n x = random((30, 20, 10))\n assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)\n assert_allclose(np.fft.rfftn(x),\n np.fft.rfftn(x, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),\n np.fft.rfftn(x, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.rfftn(x) / (30. * 20. * 10.),\n np.fft.rfftn(x, norm="forward"), atol=1e-6)\n # Regression test for gh-27159\n x = np.ones((2, 3))\n result = np.fft.rfftn(x, axes=(0, 0, 1), s=(10, 20, 40))\n assert result.shape == (10, 21)\n expected = np.fft.fft(np.fft.fft(np.fft.rfft(x, axis=1, n=40),\n axis=0, n=20), axis=0, n=10)\n assert expected.shape == (10, 21)\n assert_allclose(result, expected, atol=1e-6)\n\n def test_irfftn(self):\n x = random((30, 20, 10))\n assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)\n assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="backward"),\n norm="backward"), atol=1e-6)\n assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"),\n norm="ortho"), atol=1e-6)\n assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x, norm="forward"),\n norm="forward"), atol=1e-6)\n\n def test_hfft(self):\n x = random(14) + 1j * random(14)\n x_herm = np.concatenate((random(1), x, random(1)))\n x = np.concatenate((x_herm, x[::-1].conj()))\n assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)\n assert_allclose(np.fft.hfft(x_herm),\n np.fft.hfft(x_herm, norm="backward"), atol=1e-6)\n assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),\n np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)\n assert_allclose(np.fft.hfft(x_herm) / 30.,\n np.fft.hfft(x_herm, norm="forward"), atol=1e-6)\n\n def test_ihfft(self):\n x = random(14) + 1j * random(14)\n x_herm = np.concatenate((random(1), x, random(1)))\n x = np.concatenate((x_herm, x[::-1].conj()))\n assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)\n assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,\n norm="backward"), norm="backward"), atol=1e-6)\n assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,\n norm="ortho"), norm="ortho"), atol=1e-6)\n assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm,\n norm="forward"), norm="forward"), atol=1e-6)\n\n @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,\n np.fft.rfftn, np.fft.irfftn])\n def test_axes(self, op):\n x = random((30, 20, 10))\n axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]\n for a in axes:\n op_tr = op(np.transpose(x, a))\n tr_op = np.transpose(op(x, axes=a), a)\n assert_allclose(op_tr, tr_op, atol=1e-6)\n\n @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,\n np.fft.fft2, np.fft.ifft2])\n def test_s_negative_1(self, op):\n x = np.arange(100).reshape(10, 10)\n # should use the whole input array along the first axis\n assert op(x, s=(-1, 5), axes=(0, 1)).shape == (10, 5)\n\n @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,\n np.fft.rfftn, np.fft.irfftn])\n def test_s_axes_none(self, op):\n x = np.arange(100).reshape(10, 10)\n with pytest.warns(match='`axes` should not be `None` if `s`'):\n op(x, s=(-1, 5))\n\n @pytest.mark.parametrize("op", [np.fft.fft2, np.fft.ifft2])\n def test_s_axes_none_2D(self, op):\n x = np.arange(100).reshape(10, 10)\n with pytest.warns(match='`axes` should not be `None` if `s`'):\n op(x, s=(-1, 5), axes=None)\n\n @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,\n np.fft.rfftn, np.fft.irfftn,\n np.fft.fft2, np.fft.ifft2])\n def test_s_contains_none(self, op):\n x = random((30, 20, 10))\n with pytest.warns(match='array containing `None` values to `s`'):\n op(x, s=(10, None, 10), axes=(0, 1, 2))\n\n def test_all_1d_norm_preserving(self):\n # verify that round-trip transforms are norm-preserving\n x = random(30)\n x_norm = np.linalg.norm(x)\n n = x.size * 2\n func_pairs = [(np.fft.fft, np.fft.ifft),\n (np.fft.rfft, np.fft.irfft),\n # hfft: order so the first function takes x.size samples\n # (necessary for comparison to x_norm above)\n (np.fft.ihfft, np.fft.hfft),\n ]\n for forw, back in func_pairs:\n for n in [x.size, 2 * x.size]:\n for norm in [None, 'backward', 'ortho', 'forward']:\n tmp = forw(x, n=n, norm=norm)\n tmp = back(tmp, n=n, norm=norm)\n assert_allclose(x_norm,\n np.linalg.norm(tmp), atol=1e-6)\n\n @pytest.mark.parametrize("axes", [(0, 1), (0, 2), None])\n @pytest.mark.parametrize("dtype", (complex, float))\n @pytest.mark.parametrize("transpose", (True, False))\n def test_fftn_out_argument(self, dtype, transpose, axes):\n def zeros_like(x):\n if transpose:\n return np.zeros_like(x.T).T\n else:\n return np.zeros_like(x)\n\n # tests below only test the out parameter\n if dtype is complex:\n x = random((10, 5, 6)) + 1j * random((10, 5, 6))\n fft, ifft = np.fft.fftn, np.fft.ifftn\n else:\n x = random((10, 5, 6))\n fft, ifft = np.fft.rfftn, np.fft.irfftn\n\n expected = fft(x, axes=axes)\n out = zeros_like(expected)\n result = fft(x, out=out, axes=axes)\n assert result is out\n assert_array_equal(result, expected)\n\n expected2 = ifft(expected, axes=axes)\n out2 = out if dtype is complex else zeros_like(expected2)\n result2 = ifft(out, out=out2, axes=axes)\n assert result2 is out2\n assert_array_equal(result2, expected2)\n\n @pytest.mark.parametrize("fft", [np.fft.fftn, np.fft.ifftn, np.fft.rfftn])\n def test_fftn_out_and_s_interaction(self, fft):\n # With s, shape varies, so generally one cannot pass in out.\n if fft is np.fft.rfftn:\n x = random((10, 5, 6))\n else:\n x = random((10, 5, 6)) + 1j * random((10, 5, 6))\n with pytest.raises(ValueError, match="has wrong shape"):\n fft(x, out=np.zeros_like(x), s=(3, 3, 3), axes=(0, 1, 2))\n # Except on the first axis done (which is the last of axes).\n s = (10, 5, 5)\n expected = fft(x, s=s, axes=(0, 1, 2))\n out = np.zeros_like(expected)\n result = fft(x, s=s, axes=(0, 1, 2), out=out)\n assert result is out\n assert_array_equal(result, expected)\n\n @pytest.mark.parametrize("s", [(9, 5, 5), (3, 3, 3)])\n def test_irfftn_out_and_s_interaction(self, s):\n # Since for irfftn, the output is real and thus cannot be used for\n # intermediate steps, it should always work.\n x = random((9, 5, 6, 2)) + 1j * random((9, 5, 6, 2))\n expected = np.fft.irfftn(x, s=s, axes=(0, 1, 2))\n out = np.zeros_like(expected)\n result = np.fft.irfftn(x, s=s, axes=(0, 1, 2), out=out)\n assert result is out\n assert_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [np.float32, np.float64, np.complex64, np.complex128])\n@pytest.mark.parametrize("order", ["F", 'non-contiguous'])\n@pytest.mark.parametrize(\n "fft",\n [np.fft.fft, np.fft.fft2, np.fft.fftn,\n np.fft.ifft, np.fft.ifft2, np.fft.ifftn])\ndef test_fft_with_order(dtype, order, fft):\n # Check that FFT/IFFT produces identical results for C, Fortran and\n # non contiguous arrays\n rng = np.random.RandomState(42)\n X = rng.rand(8, 7, 13).astype(dtype, copy=False)\n # See discussion in pull/14178\n _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps\n if order == 'F':\n Y = np.asfortranarray(X)\n else:\n # Make a non contiguous array\n Y = X[::-1]\n X = np.ascontiguousarray(X[::-1])\n\n if fft.__name__.endswith('fft'):\n for axis in range(3):\n X_res = fft(X, axis=axis)\n Y_res = fft(Y, axis=axis)\n assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)\n elif fft.__name__.endswith(('fft2', 'fftn')):\n axes = [(0, 1), (1, 2), (0, 2)]\n if fft.__name__.endswith('fftn'):\n axes.extend([(0,), (1,), (2,), None])\n for ax in axes:\n X_res = fft(X, axes=ax)\n Y_res = fft(Y, axes=ax)\n assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)\n else:\n raise ValueError\n\n\n@pytest.mark.parametrize("order", ["F", "C"])\n@pytest.mark.parametrize("n", [None, 7, 12])\ndef test_fft_output_order(order, n):\n rng = np.random.RandomState(42)\n x = rng.rand(10)\n x = np.asarray(x, dtype=np.complex64, order=order)\n res = np.fft.fft(x, n=n)\n assert res.flags.c_contiguous == x.flags.c_contiguous\n assert res.flags.f_contiguous == x.flags.f_contiguous\n\n@pytest.mark.skipif(IS_WASM, reason="Cannot start thread")\nclass TestFFTThreadSafe:\n threads = 16\n input_shape = (800, 200)\n\n def _test_mtsame(self, func, *args):\n def worker(args, q):\n q.put(func(*args))\n\n q = queue.Queue()\n expected = func(*args)\n\n # Spin off a bunch of threads to call the same function simultaneously\n t = [threading.Thread(target=worker, args=(args, q))\n for i in range(self.threads)]\n [x.start() for x in t]\n\n [x.join() for x in t]\n # Make sure all threads returned the correct value\n for i in range(self.threads):\n assert_array_equal(q.get(timeout=5), expected,\n 'Function returned wrong value in multithreaded context')\n\n def test_fft(self):\n a = np.ones(self.input_shape) * 1 + 0j\n self._test_mtsame(np.fft.fft, a)\n\n def test_ifft(self):\n a = np.ones(self.input_shape) * 1 + 0j\n self._test_mtsame(np.fft.ifft, a)\n\n def test_rfft(self):\n a = np.ones(self.input_shape)\n self._test_mtsame(np.fft.rfft, a)\n\n def test_irfft(self):\n a = np.ones(self.input_shape) * 1 + 0j\n self._test_mtsame(np.fft.irfft, a)\n\n\ndef test_irfft_with_n_1_regression():\n # Regression test for gh-25661\n x = np.arange(10)\n np.fft.irfft(x, n=1)\n np.fft.hfft(x, n=1)\n np.fft.irfft(np.array([0], complex), n=10)\n\n\ndef test_irfft_with_n_large_regression():\n # Regression test for gh-25679\n x = np.arange(5) * (1 + 1j)\n result = np.fft.hfft(x, n=10)\n expected = np.array([20., 9.91628173, -11.8819096, 7.1048486,\n -6.62459848, 4., -3.37540152, -0.16057669,\n 1.8819096, -20.86055364])\n assert_allclose(result, expected)\n\n\n@pytest.mark.parametrize("fft", [\n np.fft.fft, np.fft.ifft, np.fft.rfft, np.fft.irfft\n])\n@pytest.mark.parametrize("data", [\n np.array([False, True, False]),\n np.arange(10, dtype=np.uint8),\n np.arange(5, dtype=np.int16),\n])\ndef test_fft_with_integer_or_bool_input(data, fft):\n # Regression test for gh-25819\n result = fft(data)\n float_data = data.astype(np.result_type(data, 1.))\n expected = fft(float_data)\n assert_array_equal(result, expected)\n | .venv\Lib\site-packages\numpy\fft\tests\test_pocketfft.py | test_pocketfft.py | Python | 25,035 | 0.95 | 0.169779 | 0.06501 | awesome-app | 747 | 2025-03-30T05:27:39.888968 | GPL-3.0 | true | 9ae85d31a45366f0d218c34ed518f8dc |
\n\n | .venv\Lib\site-packages\numpy\fft\tests\__pycache__\test_helper.cpython-313.pyc | test_helper.cpython-313.pyc | Other | 9,087 | 0.95 | 0.013158 | 0 | node-utils | 162 | 2023-07-14T03:07:28.658003 | GPL-3.0 | true | 43bd243756274a37d1b3261cf10acd2f |
\n\n | .venv\Lib\site-packages\numpy\fft\tests\__pycache__\test_pocketfft.cpython-313.pyc | test_pocketfft.cpython-313.pyc | Other | 46,319 | 0.8 | 0.002865 | 0.005988 | python-kit | 482 | 2024-10-15T18:27:01.681906 | GPL-3.0 | true | 73b3fe6dcf39b9218c0ab648e6f6a04d |
\n\n | .venv\Lib\site-packages\numpy\fft\tests\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 190 | 0.7 | 0 | 0 | node-utils | 582 | 2024-10-30T10:28:03.253178 | MIT | true | ce7db631f9709e597c22b22e9c23f37b |
\n\n | .venv\Lib\site-packages\numpy\fft\__pycache__\helper.cpython-313.pyc | helper.cpython-313.pyc | Other | 913 | 0.85 | 0 | 0 | python-kit | 704 | 2023-09-08T19:11:54.556154 | GPL-3.0 | false | ec9c4005d490733fe1db97a293e01668 |
\n\n | .venv\Lib\site-packages\numpy\fft\__pycache__\_helper.cpython-313.pyc | _helper.cpython-313.pyc | Other | 7,566 | 0.95 | 0.069652 | 0 | python-kit | 398 | 2024-10-18T01:28:52.178096 | MIT | false | d740aee709d2e6f63977623d26d62aa1 |
\n\n | .venv\Lib\site-packages\numpy\fft\__pycache__\_pocketfft.cpython-313.pyc | _pocketfft.cpython-313.pyc | Other | 60,105 | 0.75 | 0.07788 | 0.006462 | react-lib | 61 | 2024-05-25T03:53:46.834174 | Apache-2.0 | false | d2a46cd6f84713e1faf96ac3e16c9f28 |
\n\n | .venv\Lib\site-packages\numpy\fft\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 8,403 | 0.95 | 0.084112 | 0.006024 | vue-tools | 481 | 2024-11-09T19:30:41.815853 | Apache-2.0 | false | d3abac69afb3112bf4a30fa52514b2ec |
from ._array_utils_impl import ( # noqa: F401\n __all__,\n __doc__,\n byte_bounds,\n normalize_axis_index,\n normalize_axis_tuple,\n)\n | .venv\Lib\site-packages\numpy\lib\array_utils.py | array_utils.py | Python | 151 | 0.95 | 0 | 0 | react-lib | 482 | 2024-11-11T19:17:30.383402 | Apache-2.0 | false | b865d1600d995b85baa34514b29e33c0 |
from ._array_utils_impl import (\n __all__ as __all__,\n)\nfrom ._array_utils_impl import (\n byte_bounds as byte_bounds,\n)\nfrom ._array_utils_impl import (\n normalize_axis_index as normalize_axis_index,\n)\nfrom ._array_utils_impl import (\n normalize_axis_tuple as normalize_axis_tuple,\n)\n | .venv\Lib\site-packages\numpy\lib\array_utils.pyi | array_utils.pyi | Other | 308 | 0.85 | 0 | 0 | vue-tools | 432 | 2024-01-09T19:11:46.814242 | MIT | false | 071982cb93277cf619bb1317ecb98d1b |
from ._format_impl import ( # noqa: F401\n ARRAY_ALIGN,\n BUFFER_SIZE,\n EXPECTED_KEYS,\n GROWTH_AXIS_MAX_DIGITS,\n MAGIC_LEN,\n MAGIC_PREFIX,\n __all__,\n __doc__,\n descr_to_dtype,\n drop_metadata,\n dtype_to_descr,\n header_data_from_array_1_0,\n isfileobj,\n magic,\n open_memmap,\n read_array,\n read_array_header_1_0,\n read_array_header_2_0,\n read_magic,\n write_array,\n write_array_header_1_0,\n write_array_header_2_0,\n)\n | .venv\Lib\site-packages\numpy\lib\format.py | format.py | Python | 501 | 0.95 | 0 | 0 | node-utils | 839 | 2023-11-01T22:31:37.004782 | GPL-3.0 | false | 9f856071e1809634b4114db977895ea4 |
from ._format_impl import (\n ARRAY_ALIGN as ARRAY_ALIGN,\n)\nfrom ._format_impl import (\n BUFFER_SIZE as BUFFER_SIZE,\n)\nfrom ._format_impl import (\n EXPECTED_KEYS as EXPECTED_KEYS,\n)\nfrom ._format_impl import (\n GROWTH_AXIS_MAX_DIGITS as GROWTH_AXIS_MAX_DIGITS,\n)\nfrom ._format_impl import (\n MAGIC_LEN as MAGIC_LEN,\n)\nfrom ._format_impl import (\n MAGIC_PREFIX as MAGIC_PREFIX,\n)\nfrom ._format_impl import (\n __all__ as __all__,\n)\nfrom ._format_impl import (\n __doc__ as __doc__,\n)\nfrom ._format_impl import (\n descr_to_dtype as descr_to_dtype,\n)\nfrom ._format_impl import (\n drop_metadata as drop_metadata,\n)\nfrom ._format_impl import (\n dtype_to_descr as dtype_to_descr,\n)\nfrom ._format_impl import (\n header_data_from_array_1_0 as header_data_from_array_1_0,\n)\nfrom ._format_impl import (\n isfileobj as isfileobj,\n)\nfrom ._format_impl import (\n magic as magic,\n)\nfrom ._format_impl import (\n open_memmap as open_memmap,\n)\nfrom ._format_impl import (\n read_array as read_array,\n)\nfrom ._format_impl import (\n read_array_header_1_0 as read_array_header_1_0,\n)\nfrom ._format_impl import (\n read_array_header_2_0 as read_array_header_2_0,\n)\nfrom ._format_impl import (\n read_magic as read_magic,\n)\nfrom ._format_impl import (\n write_array as write_array,\n)\nfrom ._format_impl import (\n write_array_header_1_0 as write_array_header_1_0,\n)\nfrom ._format_impl import (\n write_array_header_2_0 as write_array_header_2_0,\n)\n | .venv\Lib\site-packages\numpy\lib\format.pyi | format.pyi | Other | 1,548 | 0.85 | 0 | 0 | awesome-app | 86 | 2023-12-29T15:18:31.881558 | GPL-3.0 | false | 7071a53df1876fc7f672838f3628e220 |
"""\nIntrospection helper functions.\n"""\n\n__all__ = ['opt_func_info']\n\n\ndef opt_func_info(func_name=None, signature=None):\n """\n Returns a dictionary containing the currently supported CPU dispatched\n features for all optimized functions.\n\n Parameters\n ----------\n func_name : str (optional)\n Regular expression to filter by function name.\n\n signature : str (optional)\n Regular expression to filter by data type.\n\n Returns\n -------\n dict\n A dictionary where keys are optimized function names and values are\n nested dictionaries indicating supported targets based on data types.\n\n Examples\n --------\n Retrieve dispatch information for functions named 'add' or 'sub' and\n data types 'float64' or 'float32':\n\n >>> import numpy as np\n >>> dict = np.lib.introspect.opt_func_info(\n ... func_name="add|abs", signature="float64|complex64"\n ... )\n >>> import json\n >>> print(json.dumps(dict, indent=2))\n {\n "absolute": {\n "dd": {\n "current": "SSE41",\n "available": "SSE41 baseline(SSE SSE2 SSE3)"\n },\n "Ff": {\n "current": "FMA3__AVX2",\n "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"\n },\n "Dd": {\n "current": "FMA3__AVX2",\n "available": "AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)"\n }\n },\n "add": {\n "ddd": {\n "current": "FMA3__AVX2",\n "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"\n },\n "FFF": {\n "current": "FMA3__AVX2",\n "available": "FMA3__AVX2 baseline(SSE SSE2 SSE3)"\n }\n }\n }\n\n """\n import re\n\n from numpy._core._multiarray_umath import __cpu_targets_info__ as targets\n from numpy._core._multiarray_umath import dtype\n\n if func_name is not None:\n func_pattern = re.compile(func_name)\n matching_funcs = {\n k: v for k, v in targets.items()\n if func_pattern.search(k)\n }\n else:\n matching_funcs = targets\n\n if signature is not None:\n sig_pattern = re.compile(signature)\n matching_sigs = {}\n for k, v in matching_funcs.items():\n matching_chars = {}\n for chars, targets in v.items():\n if any(\n sig_pattern.search(c) or sig_pattern.search(dtype(c).name)\n for c in chars\n ):\n matching_chars[chars] = targets\n if matching_chars:\n matching_sigs[k] = matching_chars\n else:\n matching_sigs = matching_funcs\n return matching_sigs\n | .venv\Lib\site-packages\numpy\lib\introspect.py | introspect.py | Python | 2,844 | 0.85 | 0.147368 | 0 | node-utils | 641 | 2025-02-17T21:50:41.994515 | Apache-2.0 | false | f884dc39bae045204843f2f9e8ab2f79 |
__all__ = ["opt_func_info"]\n\ndef opt_func_info(func_name: str | None = None, signature: str | None = None) -> dict[str, dict[str, dict[str, str]]]: ...\n | .venv\Lib\site-packages\numpy\lib\introspect.pyi | introspect.pyi | Other | 155 | 0.85 | 0.333333 | 0 | python-kit | 698 | 2023-10-16T06:56:42.093774 | MIT | false | 9d1949f2e4e3e6f90bd7bc6563d8634f |
"""\nMixin classes for custom array types that don't inherit from ndarray.\n"""\n\n__all__ = ['NDArrayOperatorsMixin']\n\n\ndef _disables_array_ufunc(obj):\n """True when __array_ufunc__ is set to None."""\n try:\n return obj.__array_ufunc__ is None\n except AttributeError:\n return False\n\n\ndef _binary_method(ufunc, name):\n """Implement a forward binary method with a ufunc, e.g., __add__."""\n def func(self, other):\n if _disables_array_ufunc(other):\n return NotImplemented\n return ufunc(self, other)\n func.__name__ = f'__{name}__'\n return func\n\n\ndef _reflected_binary_method(ufunc, name):\n """Implement a reflected binary method with a ufunc, e.g., __radd__."""\n def func(self, other):\n if _disables_array_ufunc(other):\n return NotImplemented\n return ufunc(other, self)\n func.__name__ = f'__r{name}__'\n return func\n\n\ndef _inplace_binary_method(ufunc, name):\n """Implement an in-place binary method with a ufunc, e.g., __iadd__."""\n def func(self, other):\n return ufunc(self, other, out=(self,))\n func.__name__ = f'__i{name}__'\n return func\n\n\ndef _numeric_methods(ufunc, name):\n """Implement forward, reflected and inplace binary methods with a ufunc."""\n return (_binary_method(ufunc, name),\n _reflected_binary_method(ufunc, name),\n _inplace_binary_method(ufunc, name))\n\n\ndef _unary_method(ufunc, name):\n """Implement a unary special method with a ufunc."""\n def func(self):\n return ufunc(self)\n func.__name__ = f'__{name}__'\n return func\n\n\nclass NDArrayOperatorsMixin:\n """Mixin defining all operator special methods using __array_ufunc__.\n\n This class implements the special methods for almost all of Python's\n builtin operators defined in the `operator` module, including comparisons\n (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by\n deferring to the ``__array_ufunc__`` method, which subclasses must\n implement.\n\n It is useful for writing classes that do not inherit from `numpy.ndarray`,\n but that should support arithmetic and numpy universal functions like\n arrays as described in :external+neps:doc:`nep-0013-ufunc-overrides`.\n\n As an trivial example, consider this implementation of an ``ArrayLike``\n class that simply wraps a NumPy array and ensures that the result of any\n arithmetic operation is also an ``ArrayLike`` object:\n\n >>> import numbers\n >>> class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):\n ... def __init__(self, value):\n ... self.value = np.asarray(value)\n ...\n ... # One might also consider adding the built-in list type to this\n ... # list, to support operations like np.add(array_like, list)\n ... _HANDLED_TYPES = (np.ndarray, numbers.Number)\n ...\n ... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n ... out = kwargs.get('out', ())\n ... for x in inputs + out:\n ... # Only support operations with instances of\n ... # _HANDLED_TYPES. Use ArrayLike instead of type(self)\n ... # for isinstance to allow subclasses that don't\n ... # override __array_ufunc__ to handle ArrayLike objects.\n ... if not isinstance(\n ... x, self._HANDLED_TYPES + (ArrayLike,)\n ... ):\n ... return NotImplemented\n ...\n ... # Defer to the implementation of the ufunc\n ... # on unwrapped values.\n ... inputs = tuple(x.value if isinstance(x, ArrayLike) else x\n ... for x in inputs)\n ... if out:\n ... kwargs['out'] = tuple(\n ... x.value if isinstance(x, ArrayLike) else x\n ... for x in out)\n ... result = getattr(ufunc, method)(*inputs, **kwargs)\n ...\n ... if type(result) is tuple:\n ... # multiple return values\n ... return tuple(type(self)(x) for x in result)\n ... elif method == 'at':\n ... # no return value\n ... return None\n ... else:\n ... # one return value\n ... return type(self)(result)\n ...\n ... def __repr__(self):\n ... return '%s(%r)' % (type(self).__name__, self.value)\n\n In interactions between ``ArrayLike`` objects and numbers or numpy arrays,\n the result is always another ``ArrayLike``:\n\n >>> x = ArrayLike([1, 2, 3])\n >>> x - 1\n ArrayLike(array([0, 1, 2]))\n >>> 1 - x\n ArrayLike(array([ 0, -1, -2]))\n >>> np.arange(3) - x\n ArrayLike(array([-1, -1, -1]))\n >>> x - np.arange(3)\n ArrayLike(array([1, 1, 1]))\n\n Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations\n with arbitrary, unrecognized types. This ensures that interactions with\n ArrayLike preserve a well-defined casting hierarchy.\n\n """\n from numpy._core import umath as um\n\n __slots__ = ()\n # Like np.ndarray, this mixin class implements "Option 1" from the ufunc\n # overrides NEP.\n\n # comparisons don't have reflected and in-place versions\n __lt__ = _binary_method(um.less, 'lt')\n __le__ = _binary_method(um.less_equal, 'le')\n __eq__ = _binary_method(um.equal, 'eq')\n __ne__ = _binary_method(um.not_equal, 'ne')\n __gt__ = _binary_method(um.greater, 'gt')\n __ge__ = _binary_method(um.greater_equal, 'ge')\n\n # numeric methods\n __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')\n __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')\n __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')\n __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(\n um.matmul, 'matmul')\n __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(\n um.true_divide, 'truediv')\n __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(\n um.floor_divide, 'floordiv')\n __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')\n __divmod__ = _binary_method(um.divmod, 'divmod')\n __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')\n # __idivmod__ does not exist\n # TODO: handle the optional third argument for __pow__?\n __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')\n __lshift__, __rlshift__, __ilshift__ = _numeric_methods(\n um.left_shift, 'lshift')\n __rshift__, __rrshift__, __irshift__ = _numeric_methods(\n um.right_shift, 'rshift')\n __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')\n __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')\n __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')\n\n # unary methods\n __neg__ = _unary_method(um.negative, 'neg')\n __pos__ = _unary_method(um.positive, 'pos')\n __abs__ = _unary_method(um.absolute, 'abs')\n __invert__ = _unary_method(um.invert, 'invert')\n | .venv\Lib\site-packages\numpy\lib\mixins.py | mixins.py | Python | 7,380 | 0.95 | 0.194444 | 0.045752 | python-kit | 643 | 2023-10-10T21:32:13.021448 | Apache-2.0 | false | 4f5d88357e980305823d6d9c780f7cc8 |
from abc import ABC, abstractmethod\nfrom typing import Any\nfrom typing import Literal as L\n\nfrom numpy import ufunc\n\n__all__ = ["NDArrayOperatorsMixin"]\n\n# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,\n# even though it's reliant on subclasses implementing `__array_ufunc__`\n\n# NOTE: The accepted input- and output-types of the various dunders are\n# completely dependent on how `__array_ufunc__` is implemented.\n# As such, only little type safety can be provided here.\n\nclass NDArrayOperatorsMixin(ABC):\n @abstractmethod\n def __array_ufunc__(\n self,\n ufunc: ufunc,\n method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "at"],\n *inputs: Any,\n **kwargs: Any,\n ) -> Any: ...\n def __lt__(self, other: Any) -> Any: ...\n def __le__(self, other: Any) -> Any: ...\n def __eq__(self, other: Any) -> Any: ...\n def __ne__(self, other: Any) -> Any: ...\n def __gt__(self, other: Any) -> Any: ...\n def __ge__(self, other: Any) -> Any: ...\n def __add__(self, other: Any) -> Any: ...\n def __radd__(self, other: Any) -> Any: ...\n def __iadd__(self, other: Any) -> Any: ...\n def __sub__(self, other: Any) -> Any: ...\n def __rsub__(self, other: Any) -> Any: ...\n def __isub__(self, other: Any) -> Any: ...\n def __mul__(self, other: Any) -> Any: ...\n def __rmul__(self, other: Any) -> Any: ...\n def __imul__(self, other: Any) -> Any: ...\n def __matmul__(self, other: Any) -> Any: ...\n def __rmatmul__(self, other: Any) -> Any: ...\n def __imatmul__(self, other: Any) -> Any: ...\n def __truediv__(self, other: Any) -> Any: ...\n def __rtruediv__(self, other: Any) -> Any: ...\n def __itruediv__(self, other: Any) -> Any: ...\n def __floordiv__(self, other: Any) -> Any: ...\n def __rfloordiv__(self, other: Any) -> Any: ...\n def __ifloordiv__(self, other: Any) -> Any: ...\n def __mod__(self, other: Any) -> Any: ...\n def __rmod__(self, other: Any) -> Any: ...\n def __imod__(self, other: Any) -> Any: ...\n def __divmod__(self, other: Any) -> Any: ...\n def __rdivmod__(self, other: Any) -> Any: ...\n def __pow__(self, other: Any) -> Any: ...\n def __rpow__(self, other: Any) -> Any: ...\n def __ipow__(self, other: Any) -> Any: ...\n def __lshift__(self, other: Any) -> Any: ...\n def __rlshift__(self, other: Any) -> Any: ...\n def __ilshift__(self, other: Any) -> Any: ...\n def __rshift__(self, other: Any) -> Any: ...\n def __rrshift__(self, other: Any) -> Any: ...\n def __irshift__(self, other: Any) -> Any: ...\n def __and__(self, other: Any) -> Any: ...\n def __rand__(self, other: Any) -> Any: ...\n def __iand__(self, other: Any) -> Any: ...\n def __xor__(self, other: Any) -> Any: ...\n def __rxor__(self, other: Any) -> Any: ...\n def __ixor__(self, other: Any) -> Any: ...\n def __or__(self, other: Any) -> Any: ...\n def __ror__(self, other: Any) -> Any: ...\n def __ior__(self, other: Any) -> Any: ...\n def __neg__(self) -> Any: ...\n def __pos__(self) -> Any: ...\n def __abs__(self) -> Any: ...\n def __invert__(self) -> Any: ...\n | .venv\Lib\site-packages\numpy\lib\mixins.pyi | mixins.pyi | Other | 3,206 | 0.95 | 0.706667 | 0.1 | vue-tools | 871 | 2025-01-16T05:37:59.367676 | Apache-2.0 | false | 6d996ab341f6e5a870bbb8ea8c038d2b |
from ._npyio_impl import DataSource, NpzFile, __doc__ # noqa: F401\n | .venv\Lib\site-packages\numpy\lib\npyio.py | npyio.py | Python | 69 | 0.75 | 0 | 0 | node-utils | 261 | 2025-02-18T05:22:18.875627 | BSD-3-Clause | false | 30a65bcc4eac6a4d60b70ef70480c69b |
from numpy.lib._npyio_impl import (\n DataSource as DataSource,\n)\nfrom numpy.lib._npyio_impl import (\n NpzFile as NpzFile,\n)\nfrom numpy.lib._npyio_impl import (\n __doc__ as __doc__,\n)\n | .venv\Lib\site-packages\numpy\lib\npyio.pyi | npyio.pyi | Other | 201 | 0.85 | 0 | 0 | vue-tools | 107 | 2023-10-24T11:24:50.530267 | MIT | false | 248241c0b44b55b97adfa6ce0d0366b8 |
"""\nCollection of utilities to manipulate structured arrays.\n\nMost of these functions were initially implemented by John Hunter for\nmatplotlib. They have been rewritten and extended for convenience.\n\n"""\nimport itertools\n\nimport numpy as np\nimport numpy.ma as ma\nimport numpy.ma.mrecords as mrec\nfrom numpy._core.overrides import array_function_dispatch\nfrom numpy.lib._iotools import _is_string_like\n\n__all__ = [\n 'append_fields', 'apply_along_fields', 'assign_fields_by_name',\n 'drop_fields', 'find_duplicates', 'flatten_descr',\n 'get_fieldstructure', 'get_names', 'get_names_flat',\n 'join_by', 'merge_arrays', 'rec_append_fields',\n 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',\n 'rename_fields', 'repack_fields', 'require_fields',\n 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',\n ]\n\n\ndef _recursive_fill_fields_dispatcher(input, output):\n return (input, output)\n\n\n@array_function_dispatch(_recursive_fill_fields_dispatcher)\ndef recursive_fill_fields(input, output):\n """\n Fills fields from output with fields from input,\n with support for nested structures.\n\n Parameters\n ----------\n input : ndarray\n Input array.\n output : ndarray\n Output array.\n\n Notes\n -----\n * `output` should be at least the same size as `input`\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])\n >>> b = np.zeros((3,), dtype=a.dtype)\n >>> rfn.recursive_fill_fields(a, b)\n array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])\n\n """\n newdtype = output.dtype\n for field in newdtype.names:\n try:\n current = input[field]\n except ValueError:\n continue\n if current.dtype.names is not None:\n recursive_fill_fields(current, output[field])\n else:\n output[field][:len(current)] = current\n return output\n\n\ndef _get_fieldspec(dtype):\n """\n Produce a list of name/dtype pairs corresponding to the dtype fields\n\n Similar to dtype.descr, but the second item of each tuple is a dtype, not a\n string. As a result, this handles subarray dtypes\n\n Can be passed to the dtype constructor to reconstruct the dtype, noting that\n this (deliberately) discards field offsets.\n\n Examples\n --------\n >>> import numpy as np\n >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])\n >>> dt.descr\n [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]\n >>> _get_fieldspec(dt)\n [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]\n\n """\n if dtype.names is None:\n # .descr returns a nameless field, so we should too\n return [('', dtype)]\n else:\n fields = ((name, dtype.fields[name]) for name in dtype.names)\n # keep any titles, if present\n return [\n (name if len(f) == 2 else (f[2], name), f[0])\n for name, f in fields\n ]\n\n\ndef get_names(adtype):\n """\n Returns the field names of the input datatype as a tuple. Input datatype\n must have fields otherwise error is raised.\n\n Parameters\n ----------\n adtype : dtype\n Input datatype\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)\n ('A',)\n >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)\n ('A', 'B')\n >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])\n >>> rfn.get_names(adtype)\n ('a', ('b', ('ba', 'bb')))\n """\n listnames = []\n names = adtype.names\n for name in names:\n current = adtype[name]\n if current.names is not None:\n listnames.append((name, tuple(get_names(current))))\n else:\n listnames.append(name)\n return tuple(listnames)\n\n\ndef get_names_flat(adtype):\n """\n Returns the field names of the input datatype as a tuple. Input datatype\n must have fields otherwise error is raised.\n Nested structure are flattened beforehand.\n\n Parameters\n ----------\n adtype : dtype\n Input datatype\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None\n False\n >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)\n ('A', 'B')\n >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])\n >>> rfn.get_names_flat(adtype)\n ('a', 'b', 'ba', 'bb')\n """\n listnames = []\n names = adtype.names\n for name in names:\n listnames.append(name)\n current = adtype[name]\n if current.names is not None:\n listnames.extend(get_names_flat(current))\n return tuple(listnames)\n\n\ndef flatten_descr(ndtype):\n """\n Flatten a structured data-type description.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])\n >>> rfn.flatten_descr(ndtype)\n (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))\n\n """\n names = ndtype.names\n if names is None:\n return (('', ndtype),)\n else:\n descr = []\n for field in names:\n (typ, _) = ndtype.fields[field]\n if typ.names is not None:\n descr.extend(flatten_descr(typ))\n else:\n descr.append((field, typ))\n return tuple(descr)\n\n\ndef _zip_dtype(seqarrays, flatten=False):\n newdtype = []\n if flatten:\n for a in seqarrays:\n newdtype.extend(flatten_descr(a.dtype))\n else:\n for a in seqarrays:\n current = a.dtype\n if current.names is not None and len(current.names) == 1:\n # special case - dtypes of 1 field are flattened\n newdtype.extend(_get_fieldspec(current))\n else:\n newdtype.append(('', current))\n return np.dtype(newdtype)\n\n\ndef _zip_descr(seqarrays, flatten=False):\n """\n Combine the dtype description of a series of arrays.\n\n Parameters\n ----------\n seqarrays : sequence of arrays\n Sequence of arrays\n flatten : {boolean}, optional\n Whether to collapse nested descriptions.\n """\n return _zip_dtype(seqarrays, flatten=flatten).descr\n\n\ndef get_fieldstructure(adtype, lastname=None, parents=None,):\n """\n Returns a dictionary with fields indexing lists of their parent fields.\n\n This function is used to simplify access to fields nested in other fields.\n\n Parameters\n ----------\n adtype : np.dtype\n Input datatype\n lastname : optional\n Last processed field name (used internally during recursion).\n parents : dictionary\n Dictionary of parent fields (used internally during recursion).\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> ndtype = np.dtype([('A', int),\n ... ('B', [('BA', int),\n ... ('BB', [('BBA', int), ('BBB', int)])])])\n >>> rfn.get_fieldstructure(ndtype)\n ... # XXX: possible regression, order of BBA and BBB is swapped\n {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}\n\n """\n if parents is None:\n parents = {}\n names = adtype.names\n for name in names:\n current = adtype[name]\n if current.names is not None:\n if lastname:\n parents[name] = [lastname, ]\n else:\n parents[name] = []\n parents.update(get_fieldstructure(current, name, parents))\n else:\n lastparent = list(parents.get(lastname, []) or [])\n if lastparent:\n lastparent.append(lastname)\n elif lastname:\n lastparent = [lastname, ]\n parents[name] = lastparent or []\n return parents\n\n\ndef _izip_fields_flat(iterable):\n """\n Returns an iterator of concatenated fields from a sequence of arrays,\n collapsing any nested structure.\n\n """\n for element in iterable:\n if isinstance(element, np.void):\n yield from _izip_fields_flat(tuple(element))\n else:\n yield element\n\n\ndef _izip_fields(iterable):\n """\n Returns an iterator of concatenated fields from a sequence of arrays.\n\n """\n for element in iterable:\n if (hasattr(element, '__iter__') and\n not isinstance(element, str)):\n yield from _izip_fields(element)\n elif isinstance(element, np.void) and len(tuple(element)) == 1:\n # this statement is the same from the previous expression\n yield from _izip_fields(element)\n else:\n yield element\n\n\ndef _izip_records(seqarrays, fill_value=None, flatten=True):\n """\n Returns an iterator of concatenated items from a sequence of arrays.\n\n Parameters\n ----------\n seqarrays : sequence of arrays\n Sequence of arrays.\n fill_value : {None, integer}\n Value used to pad shorter iterables.\n flatten : {True, False},\n Whether to\n """\n\n # Should we flatten the items, or just use a nested approach\n if flatten:\n zipfunc = _izip_fields_flat\n else:\n zipfunc = _izip_fields\n\n for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):\n yield tuple(zipfunc(tup))\n\n\ndef _fix_output(output, usemask=True, asrecarray=False):\n """\n Private function: return a recarray, a ndarray, a MaskedArray\n or a MaskedRecords depending on the input parameters\n """\n if not isinstance(output, ma.MaskedArray):\n usemask = False\n if usemask:\n if asrecarray:\n output = output.view(mrec.MaskedRecords)\n else:\n output = ma.filled(output)\n if asrecarray:\n output = output.view(np.recarray)\n return output\n\n\ndef _fix_defaults(output, defaults=None):\n """\n Update the fill_value and masked data of `output`\n from the default given in a dictionary defaults.\n """\n names = output.dtype.names\n (data, mask, fill_value) = (output.data, output.mask, output.fill_value)\n for (k, v) in (defaults or {}).items():\n if k in names:\n fill_value[k] = v\n data[k][mask[k]] = v\n return output\n\n\ndef _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,\n usemask=None, asrecarray=None):\n return seqarrays\n\n\n@array_function_dispatch(_merge_arrays_dispatcher)\ndef merge_arrays(seqarrays, fill_value=-1, flatten=False,\n usemask=False, asrecarray=False):\n """\n Merge arrays field by field.\n\n Parameters\n ----------\n seqarrays : sequence of ndarrays\n Sequence of arrays\n fill_value : {float}, optional\n Filling value used to pad missing data on the shorter arrays.\n flatten : {False, True}, optional\n Whether to collapse nested fields.\n usemask : {False, True}, optional\n Whether to return a masked array or not.\n asrecarray : {False, True}, optional\n Whether to return a recarray (MaskedRecords) or not.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))\n array([( 1, 10.), ( 2, 20.), (-1, 30.)],\n dtype=[('f0', '<i8'), ('f1', '<f8')])\n\n >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),\n ... np.array([10., 20., 30.])), usemask=False)\n array([(1, 10.0), (2, 20.0), (-1, 30.0)],\n dtype=[('f0', '<i8'), ('f1', '<f8')])\n >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),\n ... np.array([10., 20., 30.])),\n ... usemask=False, asrecarray=True)\n rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],\n dtype=[('a', '<i8'), ('f1', '<f8')])\n\n Notes\n -----\n * Without a mask, the missing value will be filled with something,\n depending on what its corresponding type:\n\n * ``-1`` for integers\n * ``-1.0`` for floating point numbers\n * ``'-'`` for characters\n * ``'-1'`` for strings\n * ``True`` for boolean values\n * XXX: I just obtained these values empirically\n """\n # Only one item in the input sequence ?\n if (len(seqarrays) == 1):\n seqarrays = np.asanyarray(seqarrays[0])\n # Do we have a single ndarray as input ?\n if isinstance(seqarrays, (np.ndarray, np.void)):\n seqdtype = seqarrays.dtype\n # Make sure we have named fields\n if seqdtype.names is None:\n seqdtype = np.dtype([('', seqdtype)])\n if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:\n # Minimal processing needed: just make sure everything's a-ok\n seqarrays = seqarrays.ravel()\n # Find what type of array we must return\n if usemask:\n if asrecarray:\n seqtype = mrec.MaskedRecords\n else:\n seqtype = ma.MaskedArray\n elif asrecarray:\n seqtype = np.recarray\n else:\n seqtype = np.ndarray\n return seqarrays.view(dtype=seqdtype, type=seqtype)\n else:\n seqarrays = (seqarrays,)\n else:\n # Make sure we have arrays in the input sequence\n seqarrays = [np.asanyarray(_m) for _m in seqarrays]\n # Find the sizes of the inputs and their maximum\n sizes = tuple(a.size for a in seqarrays)\n maxlength = max(sizes)\n # Get the dtype of the output (flattening if needed)\n newdtype = _zip_dtype(seqarrays, flatten=flatten)\n # Initialize the sequences for data and mask\n seqdata = []\n seqmask = []\n # If we expect some kind of MaskedArray, make a special loop.\n if usemask:\n for (a, n) in zip(seqarrays, sizes):\n nbmissing = (maxlength - n)\n # Get the data and mask\n data = a.ravel().__array__()\n mask = ma.getmaskarray(a).ravel()\n # Get the filling value (if needed)\n if nbmissing:\n fval = mrec._check_fill_value(fill_value, a.dtype)\n if isinstance(fval, (np.ndarray, np.void)):\n if len(fval.dtype) == 1:\n fval = fval.item()[0]\n fmsk = True\n else:\n fval = np.array(fval, dtype=a.dtype, ndmin=1)\n fmsk = np.ones((1,), dtype=mask.dtype)\n else:\n fval = None\n fmsk = True\n # Store an iterator padding the input to the expected length\n seqdata.append(itertools.chain(data, [fval] * nbmissing))\n seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))\n # Create an iterator for the data\n data = tuple(_izip_records(seqdata, flatten=flatten))\n output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),\n mask=list(_izip_records(seqmask, flatten=flatten)))\n if asrecarray:\n output = output.view(mrec.MaskedRecords)\n else:\n # Same as before, without the mask we don't need...\n for (a, n) in zip(seqarrays, sizes):\n nbmissing = (maxlength - n)\n data = a.ravel().__array__()\n if nbmissing:\n fval = mrec._check_fill_value(fill_value, a.dtype)\n if isinstance(fval, (np.ndarray, np.void)):\n if len(fval.dtype) == 1:\n fval = fval.item()[0]\n else:\n fval = np.array(fval, dtype=a.dtype, ndmin=1)\n else:\n fval = None\n seqdata.append(itertools.chain(data, [fval] * nbmissing))\n output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),\n dtype=newdtype, count=maxlength)\n if asrecarray:\n output = output.view(np.recarray)\n # And we're done...\n return output\n\n\ndef _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):\n return (base,)\n\n\n@array_function_dispatch(_drop_fields_dispatcher)\ndef drop_fields(base, drop_names, usemask=True, asrecarray=False):\n """\n Return a new array with fields in `drop_names` dropped.\n\n Nested fields are supported.\n\n Parameters\n ----------\n base : array\n Input array\n drop_names : string or sequence\n String or sequence of strings corresponding to the names of the\n fields to drop.\n usemask : {False, True}, optional\n Whether to return a masked array or not.\n asrecarray : string or sequence, optional\n Whether to return a recarray or a mrecarray (`asrecarray=True`) or\n a plain ndarray or masked array with flexible dtype. The default\n is False.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],\n ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])\n >>> rfn.drop_fields(a, 'a')\n array([((2., 3),), ((5., 6),)],\n dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])\n >>> rfn.drop_fields(a, 'ba')\n array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])\n >>> rfn.drop_fields(a, ['ba', 'bb'])\n array([(1,), (4,)], dtype=[('a', '<i8')])\n """\n if _is_string_like(drop_names):\n drop_names = [drop_names]\n else:\n drop_names = set(drop_names)\n\n def _drop_descr(ndtype, drop_names):\n names = ndtype.names\n newdtype = []\n for name in names:\n current = ndtype[name]\n if name in drop_names:\n continue\n if current.names is not None:\n descr = _drop_descr(current, drop_names)\n if descr:\n newdtype.append((name, descr))\n else:\n newdtype.append((name, current))\n return newdtype\n\n newdtype = _drop_descr(base.dtype, drop_names)\n\n output = np.empty(base.shape, dtype=newdtype)\n output = recursive_fill_fields(base, output)\n return _fix_output(output, usemask=usemask, asrecarray=asrecarray)\n\n\ndef _keep_fields(base, keep_names, usemask=True, asrecarray=False):\n """\n Return a new array keeping only the fields in `keep_names`,\n and preserving the order of those fields.\n\n Parameters\n ----------\n base : array\n Input array\n keep_names : string or sequence\n String or sequence of strings corresponding to the names of the\n fields to keep. Order of the names will be preserved.\n usemask : {False, True}, optional\n Whether to return a masked array or not.\n asrecarray : string or sequence, optional\n Whether to return a recarray or a mrecarray (`asrecarray=True`) or\n a plain ndarray or masked array with flexible dtype. The default\n is False.\n """\n newdtype = [(n, base.dtype[n]) for n in keep_names]\n output = np.empty(base.shape, dtype=newdtype)\n output = recursive_fill_fields(base, output)\n return _fix_output(output, usemask=usemask, asrecarray=asrecarray)\n\n\ndef _rec_drop_fields_dispatcher(base, drop_names):\n return (base,)\n\n\n@array_function_dispatch(_rec_drop_fields_dispatcher)\ndef rec_drop_fields(base, drop_names):\n """\n Returns a new numpy.recarray with fields in `drop_names` dropped.\n """\n return drop_fields(base, drop_names, usemask=False, asrecarray=True)\n\n\ndef _rename_fields_dispatcher(base, namemapper):\n return (base,)\n\n\n@array_function_dispatch(_rename_fields_dispatcher)\ndef rename_fields(base, namemapper):\n """\n Rename the fields from a flexible-datatype ndarray or recarray.\n\n Nested fields are supported.\n\n Parameters\n ----------\n base : ndarray\n Input array whose fields must be modified.\n namemapper : dictionary\n Dictionary mapping old field names to their new version.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],\n ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])\n >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})\n array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],\n dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])\n\n """\n def _recursive_rename_fields(ndtype, namemapper):\n newdtype = []\n for name in ndtype.names:\n newname = namemapper.get(name, name)\n current = ndtype[name]\n if current.names is not None:\n newdtype.append(\n (newname, _recursive_rename_fields(current, namemapper))\n )\n else:\n newdtype.append((newname, current))\n return newdtype\n newdtype = _recursive_rename_fields(base.dtype, namemapper)\n return base.view(newdtype)\n\n\ndef _append_fields_dispatcher(base, names, data, dtypes=None,\n fill_value=None, usemask=None, asrecarray=None):\n yield base\n yield from data\n\n\n@array_function_dispatch(_append_fields_dispatcher)\ndef append_fields(base, names, data, dtypes=None,\n fill_value=-1, usemask=True, asrecarray=False):\n """\n Add new fields to an existing array.\n\n The names of the fields are given with the `names` arguments,\n the corresponding values with the `data` arguments.\n If a single field is appended, `names`, `data` and `dtypes` do not have\n to be lists but just values.\n\n Parameters\n ----------\n base : array\n Input array to extend.\n names : string, sequence\n String or sequence of strings corresponding to the names\n of the new fields.\n data : array or sequence of arrays\n Array or sequence of arrays storing the fields to add to the base.\n dtypes : sequence of datatypes, optional\n Datatype or sequence of datatypes.\n If None, the datatypes are estimated from the `data`.\n fill_value : {float}, optional\n Filling value used to pad missing data on the shorter arrays.\n usemask : {False, True}, optional\n Whether to return a masked array or not.\n asrecarray : {False, True}, optional\n Whether to return a recarray (MaskedRecords) or not.\n\n """\n # Check the names\n if isinstance(names, (tuple, list)):\n if len(names) != len(data):\n msg = "The number of arrays does not match the number of names"\n raise ValueError(msg)\n elif isinstance(names, str):\n names = [names, ]\n data = [data, ]\n #\n if dtypes is None:\n data = [np.array(a, copy=None, subok=True) for a in data]\n data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]\n else:\n if not isinstance(dtypes, (tuple, list)):\n dtypes = [dtypes, ]\n if len(data) != len(dtypes):\n if len(dtypes) == 1:\n dtypes = dtypes * len(data)\n else:\n msg = "The dtypes argument must be None, a dtype, or a list."\n raise ValueError(msg)\n data = [np.array(a, copy=None, subok=True, dtype=d).view([(n, d)])\n for (a, n, d) in zip(data, names, dtypes)]\n #\n base = merge_arrays(base, usemask=usemask, fill_value=fill_value)\n if len(data) > 1:\n data = merge_arrays(data, flatten=True, usemask=usemask,\n fill_value=fill_value)\n else:\n data = data.pop()\n #\n output = ma.masked_all(\n max(len(base), len(data)),\n dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))\n output = recursive_fill_fields(base, output)\n output = recursive_fill_fields(data, output)\n #\n return _fix_output(output, usemask=usemask, asrecarray=asrecarray)\n\n\ndef _rec_append_fields_dispatcher(base, names, data, dtypes=None):\n yield base\n yield from data\n\n\n@array_function_dispatch(_rec_append_fields_dispatcher)\ndef rec_append_fields(base, names, data, dtypes=None):\n """\n Add new fields to an existing array.\n\n The names of the fields are given with the `names` arguments,\n the corresponding values with the `data` arguments.\n If a single field is appended, `names`, `data` and `dtypes` do not have\n to be lists but just values.\n\n Parameters\n ----------\n base : array\n Input array to extend.\n names : string, sequence\n String or sequence of strings corresponding to the names\n of the new fields.\n data : array or sequence of arrays\n Array or sequence of arrays storing the fields to add to the base.\n dtypes : sequence of datatypes, optional\n Datatype or sequence of datatypes.\n If None, the datatypes are estimated from the `data`.\n\n See Also\n --------\n append_fields\n\n Returns\n -------\n appended_array : np.recarray\n """\n return append_fields(base, names, data=data, dtypes=dtypes,\n asrecarray=True, usemask=False)\n\n\ndef _repack_fields_dispatcher(a, align=None, recurse=None):\n return (a,)\n\n\n@array_function_dispatch(_repack_fields_dispatcher)\ndef repack_fields(a, align=False, recurse=False):\n """\n Re-pack the fields of a structured array or dtype in memory.\n\n The memory layout of structured datatypes allows fields at arbitrary\n byte offsets. This means the fields can be separated by padding bytes,\n their offsets can be non-monotonically increasing, and they can overlap.\n\n This method removes any overlaps and reorders the fields in memory so they\n have increasing byte offsets, and adds or removes padding bytes depending\n on the `align` option, which behaves like the `align` option to\n `numpy.dtype`.\n\n If `align=False`, this method produces a "packed" memory layout in which\n each field starts at the byte the previous field ended, and any padding\n bytes are removed.\n\n If `align=True`, this methods produces an "aligned" memory layout in which\n each field's offset is a multiple of its alignment, and the total itemsize\n is a multiple of the largest alignment, by adding padding bytes as needed.\n\n Parameters\n ----------\n a : ndarray or dtype\n array or dtype for which to repack the fields.\n align : boolean\n If true, use an "aligned" memory layout, otherwise use a "packed" layout.\n recurse : boolean\n If True, also repack nested structures.\n\n Returns\n -------\n repacked : ndarray or dtype\n Copy of `a` with fields repacked, or `a` itself if no repacking was\n needed.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.lib import recfunctions as rfn\n >>> def print_offsets(d):\n ... print("offsets:", [d.fields[name][1] for name in d.names])\n ... print("itemsize:", d.itemsize)\n ...\n >>> dt = np.dtype('u1, <i8, <f8', align=True)\n >>> dt\n dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \\n'offsets': [0, 8, 16], 'itemsize': 24}, align=True)\n >>> print_offsets(dt)\n offsets: [0, 8, 16]\n itemsize: 24\n >>> packed_dt = rfn.repack_fields(dt)\n >>> packed_dt\n dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])\n >>> print_offsets(packed_dt)\n offsets: [0, 1, 9]\n itemsize: 17\n\n """\n if not isinstance(a, np.dtype):\n dt = repack_fields(a.dtype, align=align, recurse=recurse)\n return a.astype(dt, copy=False)\n\n if a.names is None:\n return a\n\n fieldinfo = []\n for name in a.names:\n tup = a.fields[name]\n if recurse:\n fmt = repack_fields(tup[0], align=align, recurse=True)\n else:\n fmt = tup[0]\n\n if len(tup) == 3:\n name = (tup[2], name)\n\n fieldinfo.append((name, fmt))\n\n dt = np.dtype(fieldinfo, align=align)\n return np.dtype((a.type, dt))\n\ndef _get_fields_and_offsets(dt, offset=0):\n """\n Returns a flat list of (dtype, count, offset) tuples of all the\n scalar fields in the dtype "dt", including nested fields, in left\n to right order.\n """\n\n # counts up elements in subarrays, including nested subarrays, and returns\n # base dtype and count\n def count_elem(dt):\n count = 1\n while dt.shape != ():\n for size in dt.shape:\n count *= size\n dt = dt.base\n return dt, count\n\n fields = []\n for name in dt.names:\n field = dt.fields[name]\n f_dt, f_offset = field[0], field[1]\n f_dt, n = count_elem(f_dt)\n\n if f_dt.names is None:\n fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))\n else:\n subfields = _get_fields_and_offsets(f_dt, f_offset + offset)\n size = f_dt.itemsize\n\n for i in range(n):\n if i == 0:\n # optimization: avoid list comprehension if no subarray\n fields.extend(subfields)\n else:\n fields.extend([(d, c, o + i * size) for d, c, o in subfields])\n return fields\n\ndef _common_stride(offsets, counts, itemsize):\n """\n Returns the stride between the fields, or None if the stride is not\n constant. The values in "counts" designate the lengths of\n subarrays. Subarrays are treated as many contiguous fields, with\n always positive stride.\n """\n if len(offsets) <= 1:\n return itemsize\n\n negative = offsets[1] < offsets[0] # negative stride\n if negative:\n # reverse, so offsets will be ascending\n it = zip(reversed(offsets), reversed(counts))\n else:\n it = zip(offsets, counts)\n\n prev_offset = None\n stride = None\n for offset, count in it:\n if count != 1: # subarray: always c-contiguous\n if negative:\n return None # subarrays can never have a negative stride\n if stride is None:\n stride = itemsize\n if stride != itemsize:\n return None\n end_offset = offset + (count - 1) * itemsize\n else:\n end_offset = offset\n\n if prev_offset is not None:\n new_stride = offset - prev_offset\n if stride is None:\n stride = new_stride\n if stride != new_stride:\n return None\n\n prev_offset = end_offset\n\n if negative:\n return -stride\n return stride\n\n\ndef _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,\n casting=None):\n return (arr,)\n\n@array_function_dispatch(_structured_to_unstructured_dispatcher)\ndef structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):\n """\n Converts an n-D structured array into an (n+1)-D unstructured array.\n\n The new array will have a new last dimension equal in size to the\n number of field-elements of the input array. If not supplied, the output\n datatype is determined from the numpy type promotion rules applied to all\n the field datatypes.\n\n Nested fields, as well as each element of any subarray fields, all count\n as a single field-elements.\n\n Parameters\n ----------\n arr : ndarray\n Structured array or dtype to convert. Cannot contain object datatype.\n dtype : dtype, optional\n The dtype of the output unstructured array.\n copy : bool, optional\n If true, always return a copy. If false, a view is returned if\n possible, such as when the `dtype` and strides of the fields are\n suitable and the array subtype is one of `numpy.ndarray`,\n `numpy.recarray` or `numpy.memmap`.\n\n .. versionchanged:: 1.25.0\n A view can now be returned if the fields are separated by a\n uniform stride.\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n See casting argument of `numpy.ndarray.astype`. Controls what kind of\n data casting may occur.\n\n Returns\n -------\n unstructured : ndarray\n Unstructured array with one more dimension.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.lib import recfunctions as rfn\n >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])\n >>> a\n array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),\n (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],\n dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])\n >>> rfn.structured_to_unstructured(a)\n array([[0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.],\n [0., 0., 0., 0., 0.]])\n\n >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],\n ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)\n array([ 3. , 5.5, 9. , 11. ])\n\n """ # noqa: E501\n if arr.dtype.names is None:\n raise ValueError('arr must be a structured array')\n\n fields = _get_fields_and_offsets(arr.dtype)\n n_fields = len(fields)\n if n_fields == 0 and dtype is None:\n raise ValueError("arr has no fields. Unable to guess dtype")\n elif n_fields == 0:\n # too many bugs elsewhere for this to work now\n raise NotImplementedError("arr with no fields is not supported")\n\n dts, counts, offsets = zip(*fields)\n names = [f'f{n}' for n in range(n_fields)]\n\n if dtype is None:\n out_dtype = np.result_type(*[dt.base for dt in dts])\n else:\n out_dtype = np.dtype(dtype)\n\n # Use a series of views and casts to convert to an unstructured array:\n\n # first view using flattened fields (doesn't work for object arrays)\n # Note: dts may include a shape for subarrays\n flattened_fields = np.dtype({'names': names,\n 'formats': dts,\n 'offsets': offsets,\n 'itemsize': arr.dtype.itemsize})\n arr = arr.view(flattened_fields)\n\n # we only allow a few types to be unstructured by manipulating the\n # strides, because we know it won't work with, for example, np.matrix nor\n # np.ma.MaskedArray.\n can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)\n if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):\n # all elements have the right dtype already; if they have a common\n # stride, we can just return a view\n common_stride = _common_stride(offsets, counts, out_dtype.itemsize)\n if common_stride is not None:\n wrap = arr.__array_wrap__\n\n new_shape = arr.shape + (sum(counts), out_dtype.itemsize)\n new_strides = arr.strides + (abs(common_stride), 1)\n\n arr = arr[..., np.newaxis].view(np.uint8) # view as bytes\n arr = arr[..., min(offsets):] # remove the leading unused data\n arr = np.lib.stride_tricks.as_strided(arr,\n new_shape,\n new_strides,\n subok=True)\n\n # cast and drop the last dimension again\n arr = arr.view(out_dtype)[..., 0]\n\n if common_stride < 0:\n arr = arr[..., ::-1] # reverse, if the stride was negative\n if type(arr) is not type(wrap.__self__):\n # Some types (e.g. recarray) turn into an ndarray along the\n # way, so we have to wrap it again in order to match the\n # behavior with copy=True.\n arr = wrap(arr)\n return arr\n\n # next cast to a packed format with all fields converted to new dtype\n packed_fields = np.dtype({'names': names,\n 'formats': [(out_dtype, dt.shape) for dt in dts]})\n arr = arr.astype(packed_fields, copy=copy, casting=casting)\n\n # finally is it safe to view the packed fields as the unstructured type\n return arr.view((out_dtype, (sum(counts),)))\n\n\ndef _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,\n align=None, copy=None, casting=None):\n return (arr,)\n\n@array_function_dispatch(_unstructured_to_structured_dispatcher)\ndef unstructured_to_structured(arr, dtype=None, names=None, align=False,\n copy=False, casting='unsafe'):\n """\n Converts an n-D unstructured array into an (n-1)-D structured array.\n\n The last dimension of the input array is converted into a structure, with\n number of field-elements equal to the size of the last dimension of the\n input array. By default all output fields have the input array's dtype, but\n an output structured dtype with an equal number of fields-elements can be\n supplied instead.\n\n Nested fields, as well as each element of any subarray fields, all count\n towards the number of field-elements.\n\n Parameters\n ----------\n arr : ndarray\n Unstructured array or dtype to convert.\n dtype : dtype, optional\n The structured dtype of the output array\n names : list of strings, optional\n If dtype is not supplied, this specifies the field names for the output\n dtype, in order. The field dtypes will be the same as the input array.\n align : boolean, optional\n Whether to create an aligned memory layout.\n copy : bool, optional\n See copy argument to `numpy.ndarray.astype`. If true, always return a\n copy. If false, and `dtype` requirements are satisfied, a view is\n returned.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n See casting argument of `numpy.ndarray.astype`. Controls what kind of\n data casting may occur.\n\n Returns\n -------\n structured : ndarray\n Structured array with fewer dimensions.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.lib import recfunctions as rfn\n >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])\n >>> a = np.arange(20).reshape((4,5))\n >>> a\n array([[ 0, 1, 2, 3, 4],\n [ 5, 6, 7, 8, 9],\n [10, 11, 12, 13, 14],\n [15, 16, 17, 18, 19]])\n >>> rfn.unstructured_to_structured(a, dt)\n array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),\n (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],\n dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])\n\n """ # noqa: E501\n if arr.shape == ():\n raise ValueError('arr must have at least one dimension')\n n_elem = arr.shape[-1]\n if n_elem == 0:\n # too many bugs elsewhere for this to work now\n raise NotImplementedError("last axis with size 0 is not supported")\n\n if dtype is None:\n if names is None:\n names = [f'f{n}' for n in range(n_elem)]\n out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)\n fields = _get_fields_and_offsets(out_dtype)\n dts, counts, offsets = zip(*fields)\n else:\n if names is not None:\n raise ValueError("don't supply both dtype and names")\n # if dtype is the args of np.dtype, construct it\n dtype = np.dtype(dtype)\n # sanity check of the input dtype\n fields = _get_fields_and_offsets(dtype)\n if len(fields) == 0:\n dts, counts, offsets = [], [], []\n else:\n dts, counts, offsets = zip(*fields)\n\n if n_elem != sum(counts):\n raise ValueError('The length of the last dimension of arr must '\n 'be equal to the number of fields in dtype')\n out_dtype = dtype\n if align and not out_dtype.isalignedstruct:\n raise ValueError("align was True but dtype is not aligned")\n\n names = [f'f{n}' for n in range(len(fields))]\n\n # Use a series of views and casts to convert to a structured array:\n\n # first view as a packed structured array of one dtype\n packed_fields = np.dtype({'names': names,\n 'formats': [(arr.dtype, dt.shape) for dt in dts]})\n arr = np.ascontiguousarray(arr).view(packed_fields)\n\n # next cast to an unpacked but flattened format with varied dtypes\n flattened_fields = np.dtype({'names': names,\n 'formats': dts,\n 'offsets': offsets,\n 'itemsize': out_dtype.itemsize})\n arr = arr.astype(flattened_fields, copy=copy, casting=casting)\n\n # finally view as the final nested dtype and remove the last axis\n return arr.view(out_dtype)[..., 0]\n\ndef _apply_along_fields_dispatcher(func, arr):\n return (arr,)\n\n@array_function_dispatch(_apply_along_fields_dispatcher)\ndef apply_along_fields(func, arr):\n """\n Apply function 'func' as a reduction across fields of a structured array.\n\n This is similar to `numpy.apply_along_axis`, but treats the fields of a\n structured array as an extra axis. The fields are all first cast to a\n common type following the type-promotion rules from `numpy.result_type`\n applied to the field's dtypes.\n\n Parameters\n ----------\n func : function\n Function to apply on the "field" dimension. This function must\n support an `axis` argument, like `numpy.mean`, `numpy.sum`, etc.\n arr : ndarray\n Structured array for which to apply func.\n\n Returns\n -------\n out : ndarray\n Result of the reduction operation\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.lib import recfunctions as rfn\n >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],\n ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])\n >>> rfn.apply_along_fields(np.mean, b)\n array([ 2.66666667, 5.33333333, 8.66666667, 11. ])\n >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])\n array([ 3. , 5.5, 9. , 11. ])\n\n """\n if arr.dtype.names is None:\n raise ValueError('arr must be a structured array')\n\n uarr = structured_to_unstructured(arr)\n return func(uarr, axis=-1)\n # works and avoids axis requirement, but very, very slow:\n #return np.apply_along_axis(func, -1, uarr)\n\ndef _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):\n return dst, src\n\n@array_function_dispatch(_assign_fields_by_name_dispatcher)\ndef assign_fields_by_name(dst, src, zero_unassigned=True):\n """\n Assigns values from one structured array to another by field name.\n\n Normally in numpy >= 1.14, assignment of one structured array to another\n copies fields "by position", meaning that the first field from the src is\n copied to the first field of the dst, and so on, regardless of field name.\n\n This function instead copies "by field name", such that fields in the dst\n are assigned from the identically named field in the src. This applies\n recursively for nested structures. This is how structure assignment worked\n in numpy >= 1.6 to <= 1.13.\n\n Parameters\n ----------\n dst : ndarray\n src : ndarray\n The source and destination arrays during assignment.\n zero_unassigned : bool, optional\n If True, fields in the dst for which there was no matching\n field in the src are filled with the value 0 (zero). This\n was the behavior of numpy <= 1.13. If False, those fields\n are not modified.\n """\n\n if dst.dtype.names is None:\n dst[...] = src\n return\n\n for name in dst.dtype.names:\n if name not in src.dtype.names:\n if zero_unassigned:\n dst[name] = 0\n else:\n assign_fields_by_name(dst[name], src[name],\n zero_unassigned)\n\ndef _require_fields_dispatcher(array, required_dtype):\n return (array,)\n\n@array_function_dispatch(_require_fields_dispatcher)\ndef require_fields(array, required_dtype):\n """\n Casts a structured array to a new dtype using assignment by field-name.\n\n This function assigns from the old to the new array by name, so the\n value of a field in the output array is the value of the field with the\n same name in the source array. This has the effect of creating a new\n ndarray containing only the fields "required" by the required_dtype.\n\n If a field name in the required_dtype does not exist in the\n input array, that field is created and set to 0 in the output array.\n\n Parameters\n ----------\n a : ndarray\n array to cast\n required_dtype : dtype\n datatype for output array\n\n Returns\n -------\n out : ndarray\n array with the new dtype, with field values copied from the fields in\n the input array with the same name\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.lib import recfunctions as rfn\n >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])\n >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])\n array([(1., 1), (1., 1), (1., 1), (1., 1)],\n dtype=[('b', '<f4'), ('c', 'u1')])\n >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])\n array([(1., 0), (1., 0), (1., 0), (1., 0)],\n dtype=[('b', '<f4'), ('newf', 'u1')])\n\n """\n out = np.empty(array.shape, dtype=required_dtype)\n assign_fields_by_name(out, array)\n return out\n\n\ndef _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,\n asrecarray=None, autoconvert=None):\n return arrays\n\n\n@array_function_dispatch(_stack_arrays_dispatcher)\ndef stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,\n autoconvert=False):\n """\n Superposes arrays fields by fields\n\n Parameters\n ----------\n arrays : array or sequence\n Sequence of input arrays.\n defaults : dictionary, optional\n Dictionary mapping field names to the corresponding default values.\n usemask : {True, False}, optional\n Whether to return a MaskedArray (or MaskedRecords is\n `asrecarray==True`) or a ndarray.\n asrecarray : {False, True}, optional\n Whether to return a recarray (or MaskedRecords if `usemask==True`)\n or just a flexible-type ndarray.\n autoconvert : {False, True}, optional\n Whether automatically cast the type of the field to the maximum.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> x = np.array([1, 2,])\n >>> rfn.stack_arrays(x) is x\n True\n >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])\n >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],\n ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])\n >>> test = rfn.stack_arrays((z,zz))\n >>> test\n masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),\n (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],\n mask=[(False, False, True), (False, False, True),\n (False, False, False), (False, False, False),\n (False, False, False)],\n fill_value=(b'N/A', 1e+20, 1e+20),\n dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])\n\n """\n if isinstance(arrays, np.ndarray):\n return arrays\n elif len(arrays) == 1:\n return arrays[0]\n seqarrays = [np.asanyarray(a).ravel() for a in arrays]\n nrecords = [len(a) for a in seqarrays]\n ndtype = [a.dtype for a in seqarrays]\n fldnames = [d.names for d in ndtype]\n #\n dtype_l = ndtype[0]\n newdescr = _get_fieldspec(dtype_l)\n names = [n for n, d in newdescr]\n for dtype_n in ndtype[1:]:\n for fname, fdtype in _get_fieldspec(dtype_n):\n if fname not in names:\n newdescr.append((fname, fdtype))\n names.append(fname)\n else:\n nameidx = names.index(fname)\n _, cdtype = newdescr[nameidx]\n if autoconvert:\n newdescr[nameidx] = (fname, max(fdtype, cdtype))\n elif fdtype != cdtype:\n raise TypeError(f"Incompatible type '{cdtype}' <> '{fdtype}'")\n # Only one field: use concatenate\n if len(newdescr) == 1:\n output = ma.concatenate(seqarrays)\n else:\n #\n output = ma.masked_all((np.sum(nrecords),), newdescr)\n offset = np.cumsum(np.r_[0, nrecords])\n seen = []\n for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):\n names = a.dtype.names\n if names is None:\n output[f'f{len(seen)}'][i:j] = a\n else:\n for name in n:\n output[name][i:j] = a[name]\n if name not in seen:\n seen.append(name)\n #\n return _fix_output(_fix_defaults(output, defaults),\n usemask=usemask, asrecarray=asrecarray)\n\n\ndef _find_duplicates_dispatcher(\n a, key=None, ignoremask=None, return_index=None):\n return (a,)\n\n\n@array_function_dispatch(_find_duplicates_dispatcher)\ndef find_duplicates(a, key=None, ignoremask=True, return_index=False):\n """\n Find the duplicates in a structured array along a given key\n\n Parameters\n ----------\n a : array-like\n Input array\n key : {string, None}, optional\n Name of the fields along which to check the duplicates.\n If None, the search is performed by records\n ignoremask : {True, False}, optional\n Whether masked data should be discarded or considered as duplicates.\n return_index : {False, True}, optional\n Whether to return the indices of the duplicated values.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib import recfunctions as rfn\n >>> ndtype = [('a', int)]\n >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],\n ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)\n >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)\n (masked_array(data=[(1,), (1,), (2,), (2,)],\n mask=[(False,), (False,), (False,), (False,)],\n fill_value=(999999,),\n dtype=[('a', '<i8')]), array([0, 1, 3, 4]))\n """\n a = np.asanyarray(a).ravel()\n # Get a dictionary of fields\n fields = get_fieldstructure(a.dtype)\n # Get the sorting data (by selecting the corresponding field)\n base = a\n if key:\n for f in fields[key]:\n base = base[f]\n base = base[key]\n # Get the sorting indices and the sorted data\n sortidx = base.argsort()\n sortedbase = base[sortidx]\n sorteddata = sortedbase.filled()\n # Compare the sorting data\n flag = (sorteddata[:-1] == sorteddata[1:])\n # If masked data must be ignored, set the flag to false where needed\n if ignoremask:\n sortedmask = sortedbase.recordmask\n flag[sortedmask[1:]] = False\n flag = np.concatenate(([False], flag))\n # We need to take the point on the left as well (else we're missing it)\n flag[:-1] = flag[:-1] + flag[1:]\n duplicates = a[sortidx][flag]\n if return_index:\n return (duplicates, sortidx[flag])\n else:\n return duplicates\n\n\ndef _join_by_dispatcher(\n key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,\n defaults=None, usemask=None, asrecarray=None):\n return (r1, r2)\n\n\n@array_function_dispatch(_join_by_dispatcher)\ndef join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',\n defaults=None, usemask=True, asrecarray=False):\n """\n Join arrays `r1` and `r2` on key `key`.\n\n The key should be either a string or a sequence of string corresponding\n to the fields used to join the array. An exception is raised if the\n `key` field cannot be found in the two input arrays. Neither `r1` nor\n `r2` should have any duplicates along `key`: the presence of duplicates\n will make the output quite unreliable. Note that duplicates are not\n looked for by the algorithm.\n\n Parameters\n ----------\n key : {string, sequence}\n A string or a sequence of strings corresponding to the fields used\n for comparison.\n r1, r2 : arrays\n Structured arrays.\n jointype : {'inner', 'outer', 'leftouter'}, optional\n If 'inner', returns the elements common to both r1 and r2.\n If 'outer', returns the common elements as well as the elements of\n r1 not in r2 and the elements of not in r2.\n If 'leftouter', returns the common elements and the elements of r1\n not in r2.\n r1postfix : string, optional\n String appended to the names of the fields of r1 that are present\n in r2 but absent of the key.\n r2postfix : string, optional\n String appended to the names of the fields of r2 that are present\n in r1 but absent of the key.\n defaults : {dictionary}, optional\n Dictionary mapping field names to the corresponding default values.\n usemask : {True, False}, optional\n Whether to return a MaskedArray (or MaskedRecords is\n `asrecarray==True`) or a ndarray.\n asrecarray : {False, True}, optional\n Whether to return a recarray (or MaskedRecords if `usemask==True`)\n or just a flexible-type ndarray.\n\n Notes\n -----\n * The output is sorted along the key.\n * A temporary array is formed by dropping the fields not in the key for\n the two arrays and concatenating the result. This array is then\n sorted, and the common entries selected. The output is constructed by\n filling the fields with the selected entries. Matching is not\n preserved if there are some duplicates...\n\n """\n # Check jointype\n if jointype not in ('inner', 'outer', 'leftouter'):\n raise ValueError(\n "The 'jointype' argument should be in 'inner', "\n "'outer' or 'leftouter' (got '%s' instead)" % jointype\n )\n # If we have a single key, put it in a tuple\n if isinstance(key, str):\n key = (key,)\n\n # Check the keys\n if len(set(key)) != len(key):\n dup = next(x for n, x in enumerate(key) if x in key[n + 1:])\n raise ValueError(f"duplicate join key {dup!r}")\n for name in key:\n if name not in r1.dtype.names:\n raise ValueError(f'r1 does not have key field {name!r}')\n if name not in r2.dtype.names:\n raise ValueError(f'r2 does not have key field {name!r}')\n\n # Make sure we work with ravelled arrays\n r1 = r1.ravel()\n r2 = r2.ravel()\n (nb1, nb2) = (len(r1), len(r2))\n (r1names, r2names) = (r1.dtype.names, r2.dtype.names)\n\n # Check the names for collision\n collisions = (set(r1names) & set(r2names)) - set(key)\n if collisions and not (r1postfix or r2postfix):\n msg = "r1 and r2 contain common names, r1postfix and r2postfix "\n msg += "can't both be empty"\n raise ValueError(msg)\n\n # Make temporary arrays of just the keys\n # (use order of keys in `r1` for back-compatibility)\n key1 = [n for n in r1names if n in key]\n r1k = _keep_fields(r1, key1)\n r2k = _keep_fields(r2, key1)\n\n # Concatenate the two arrays for comparison\n aux = ma.concatenate((r1k, r2k))\n idx_sort = aux.argsort(order=key)\n aux = aux[idx_sort]\n #\n # Get the common keys\n flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))\n flag_in[:-1] = flag_in[1:] + flag_in[:-1]\n idx_in = idx_sort[flag_in]\n idx_1 = idx_in[(idx_in < nb1)]\n idx_2 = idx_in[(idx_in >= nb1)] - nb1\n (r1cmn, r2cmn) = (len(idx_1), len(idx_2))\n if jointype == 'inner':\n (r1spc, r2spc) = (0, 0)\n elif jointype == 'outer':\n idx_out = idx_sort[~flag_in]\n idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))\n idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))\n (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)\n elif jointype == 'leftouter':\n idx_out = idx_sort[~flag_in]\n idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))\n (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)\n # Select the entries from each input\n (s1, s2) = (r1[idx_1], r2[idx_2])\n #\n # Build the new description of the output array .......\n # Start with the key fields\n ndtype = _get_fieldspec(r1k.dtype)\n\n # Add the fields from r1\n for fname, fdtype in _get_fieldspec(r1.dtype):\n if fname not in key:\n ndtype.append((fname, fdtype))\n\n # Add the fields from r2\n for fname, fdtype in _get_fieldspec(r2.dtype):\n # Have we seen the current name already ?\n # we need to rebuild this list every time\n names = [name for name, dtype in ndtype]\n try:\n nameidx = names.index(fname)\n except ValueError:\n #... we haven't: just add the description to the current list\n ndtype.append((fname, fdtype))\n else:\n # collision\n _, cdtype = ndtype[nameidx]\n if fname in key:\n # The current field is part of the key: take the largest dtype\n ndtype[nameidx] = (fname, max(fdtype, cdtype))\n else:\n # The current field is not part of the key: add the suffixes,\n # and place the new field adjacent to the old one\n ndtype[nameidx:nameidx + 1] = [\n (fname + r1postfix, cdtype),\n (fname + r2postfix, fdtype)\n ]\n # Rebuild a dtype from the new fields\n ndtype = np.dtype(ndtype)\n # Find the largest nb of common fields :\n # r1cmn and r2cmn should be equal, but...\n cmn = max(r1cmn, r2cmn)\n # Construct an empty array\n output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)\n names = output.dtype.names\n for f in r1names:\n selected = s1[f]\n if f not in names or (f in r2names and not r2postfix and f not in key):\n f += r1postfix\n current = output[f]\n current[:r1cmn] = selected[:r1cmn]\n if jointype in ('outer', 'leftouter'):\n current[cmn:cmn + r1spc] = selected[r1cmn:]\n for f in r2names:\n selected = s2[f]\n if f not in names or (f in r1names and not r1postfix and f not in key):\n f += r2postfix\n current = output[f]\n current[:r2cmn] = selected[:r2cmn]\n if (jointype == 'outer') and r2spc:\n current[-r2spc:] = selected[r2cmn:]\n # Sort and finalize the output\n output.sort(order=key)\n kwargs = {'usemask': usemask, 'asrecarray': asrecarray}\n return _fix_output(_fix_defaults(output, defaults), **kwargs)\n\n\ndef _rec_join_dispatcher(\n key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,\n defaults=None):\n return (r1, r2)\n\n\n@array_function_dispatch(_rec_join_dispatcher)\ndef rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',\n defaults=None):\n """\n Join arrays `r1` and `r2` on keys.\n Alternative to join_by, that always returns a np.recarray.\n\n See Also\n --------\n join_by : equivalent function\n """\n kwargs = {'jointype': jointype, 'r1postfix': r1postfix, 'r2postfix': r2postfix,\n 'defaults': defaults, 'usemask': False, 'asrecarray': True}\n return join_by(key, r1, r2, **kwargs)\n\n\ndel array_function_dispatch\n | .venv\Lib\site-packages\numpy\lib\recfunctions.py | recfunctions.py | Python | 61,220 | 0.75 | 0.160619 | 0.070588 | python-kit | 861 | 2024-03-04T16:17:41.456932 | GPL-3.0 | false | 208d59521b7cc33e36a46fafc26c8359 |
from collections.abc import Callable, Iterable, Mapping, Sequence\nfrom typing import Any, Literal, TypeAlias, overload\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import TypeVar\n\nimport numpy as np\nimport numpy.typing as npt\nfrom numpy._typing import _AnyShape, _DTypeLike, _DTypeLikeVoid\nfrom numpy.ma.mrecords import MaskedRecords\n\n__all__ = [\n "append_fields",\n "apply_along_fields",\n "assign_fields_by_name",\n "drop_fields",\n "find_duplicates",\n "flatten_descr",\n "get_fieldstructure",\n "get_names",\n "get_names_flat",\n "join_by",\n "merge_arrays",\n "rec_append_fields",\n "rec_drop_fields",\n "rec_join",\n "recursive_fill_fields",\n "rename_fields",\n "repack_fields",\n "require_fields",\n "stack_arrays",\n "structured_to_unstructured",\n "unstructured_to_structured",\n]\n\n_T = TypeVar("_T")\n_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_ArrayT = TypeVar("_ArrayT", bound=npt.NDArray[Any])\n_VoidArrayT = TypeVar("_VoidArrayT", bound=npt.NDArray[np.void])\n_NonVoidDTypeT = TypeVar("_NonVoidDTypeT", bound=_NonVoidDType)\n\n_OneOrMany: TypeAlias = _T | Iterable[_T]\n_BuiltinSequence: TypeAlias = tuple[_T, ...] | list[_T]\n\n_NestedNames: TypeAlias = tuple[str | _NestedNames, ...]\n_NonVoid: TypeAlias = np.bool | np.number | np.character | np.datetime64 | np.timedelta64 | np.object_\n_NonVoidDType: TypeAlias = np.dtype[_NonVoid] | np.dtypes.StringDType\n\n_JoinType: TypeAlias = Literal["inner", "outer", "leftouter"]\n\n###\n\ndef recursive_fill_fields(input: npt.NDArray[np.void], output: _VoidArrayT) -> _VoidArrayT: ...\n\n#\ndef get_names(adtype: np.dtype[np.void]) -> _NestedNames: ...\ndef get_names_flat(adtype: np.dtype[np.void]) -> tuple[str, ...]: ...\n\n#\n@overload\ndef flatten_descr(ndtype: _NonVoidDTypeT) -> tuple[tuple[Literal[""], _NonVoidDTypeT]]: ...\n@overload\ndef flatten_descr(ndtype: np.dtype[np.void]) -> tuple[tuple[str, np.dtype]]: ...\n\n#\ndef get_fieldstructure(\n adtype: np.dtype[np.void],\n lastname: str | None = None,\n parents: dict[str, list[str]] | None = None,\n) -> dict[str, list[str]]: ...\n\n#\n@overload\ndef merge_arrays(\n seqarrays: Sequence[np.ndarray[_ShapeT, np.dtype]] | np.ndarray[_ShapeT, np.dtype],\n fill_value: float = -1,\n flatten: bool = False,\n usemask: bool = False,\n asrecarray: bool = False,\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef merge_arrays(\n seqarrays: Sequence[npt.ArrayLike] | np.void,\n fill_value: float = -1,\n flatten: bool = False,\n usemask: bool = False,\n asrecarray: bool = False,\n) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...\n\n#\n@overload\ndef drop_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n drop_names: str | Iterable[str],\n usemask: bool = True,\n asrecarray: Literal[False] = False,\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef drop_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n drop_names: str | Iterable[str],\n usemask: bool,\n asrecarray: Literal[True],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef drop_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n drop_names: str | Iterable[str],\n usemask: bool = True,\n *,\n asrecarray: Literal[True],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n\n#\n@overload\ndef rename_fields(\n base: MaskedRecords[_ShapeT, np.dtype[np.void]],\n namemapper: Mapping[str, str],\n) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef rename_fields(\n base: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],\n namemapper: Mapping[str, str],\n) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef rename_fields(\n base: np.recarray[_ShapeT, np.dtype[np.void]],\n namemapper: Mapping[str, str],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef rename_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n namemapper: Mapping[str, str],\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n\n#\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None,\n fill_value: int,\n usemask: Literal[False],\n asrecarray: Literal[False] = False,\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None = None,\n fill_value: int = -1,\n *,\n usemask: Literal[False],\n asrecarray: Literal[False] = False,\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None,\n fill_value: int,\n usemask: Literal[False],\n asrecarray: Literal[True],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None = None,\n fill_value: int = -1,\n *,\n usemask: Literal[False],\n asrecarray: Literal[True],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None = None,\n fill_value: int = -1,\n usemask: Literal[True] = True,\n asrecarray: Literal[False] = False,\n) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None,\n fill_value: int,\n usemask: Literal[True],\n asrecarray: Literal[True],\n) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None = None,\n fill_value: int = -1,\n usemask: Literal[True] = True,\n *,\n asrecarray: Literal[True],\n) -> MaskedRecords[_ShapeT, np.dtype[np.void]]: ...\n\n#\ndef rec_drop_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n drop_names: str | Iterable[str],\n) -> np.recarray[_ShapeT, np.dtype[np.void]]: ...\n\n#\ndef rec_append_fields(\n base: np.ndarray[_ShapeT, np.dtype[np.void]],\n names: _OneOrMany[str],\n data: _OneOrMany[npt.NDArray[Any]],\n dtypes: _BuiltinSequence[np.dtype] | None = None,\n) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...\n\n# TODO(jorenham): Stop passing `void` directly once structured dtypes are implemented,\n# e.g. using a `TypeVar` with constraints.\n# https://github.com/numpy/numtype/issues/92\n@overload\ndef repack_fields(a: _DTypeT, align: bool = False, recurse: bool = False) -> _DTypeT: ...\n@overload\ndef repack_fields(a: _ScalarT, align: bool = False, recurse: bool = False) -> _ScalarT: ...\n@overload\ndef repack_fields(a: _ArrayT, align: bool = False, recurse: bool = False) -> _ArrayT: ...\n\n# TODO(jorenham): Attempt shape-typing (return type has ndim == arr.ndim + 1)\n@overload\ndef structured_to_unstructured(\n arr: npt.NDArray[np.void],\n dtype: _DTypeLike[_ScalarT],\n copy: bool = False,\n casting: np._CastingKind = "unsafe",\n) -> npt.NDArray[_ScalarT]: ...\n@overload\ndef structured_to_unstructured(\n arr: npt.NDArray[np.void],\n dtype: npt.DTypeLike | None = None,\n copy: bool = False,\n casting: np._CastingKind = "unsafe",\n) -> npt.NDArray[Any]: ...\n\n#\n@overload\ndef unstructured_to_structured(\n arr: npt.NDArray[Any],\n dtype: npt.DTypeLike,\n names: None = None,\n align: bool = False,\n copy: bool = False,\n casting: str = "unsafe",\n) -> npt.NDArray[np.void]: ...\n@overload\ndef unstructured_to_structured(\n arr: npt.NDArray[Any],\n dtype: None,\n names: _OneOrMany[str],\n align: bool = False,\n copy: bool = False,\n casting: str = "unsafe",\n) -> npt.NDArray[np.void]: ...\n\n#\ndef apply_along_fields(\n func: Callable[[np.ndarray[_ShapeT, Any]], npt.NDArray[Any]],\n arr: np.ndarray[_ShapeT, np.dtype[np.void]],\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n\n#\ndef assign_fields_by_name(dst: npt.NDArray[np.void], src: npt.NDArray[np.void], zero_unassigned: bool = True) -> None: ...\n\n#\ndef require_fields(\n array: np.ndarray[_ShapeT, np.dtype[np.void]],\n required_dtype: _DTypeLikeVoid,\n) -> np.ndarray[_ShapeT, np.dtype[np.void]]: ...\n\n# TODO(jorenham): Attempt shape-typing\n@overload\ndef stack_arrays(\n arrays: _ArrayT,\n defaults: Mapping[str, object] | None = None,\n usemask: bool = True,\n asrecarray: bool = False,\n autoconvert: bool = False,\n) -> _ArrayT: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None,\n usemask: Literal[False],\n asrecarray: Literal[False] = False,\n autoconvert: bool = False,\n) -> npt.NDArray[np.void]: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None = None,\n *,\n usemask: Literal[False],\n asrecarray: Literal[False] = False,\n autoconvert: bool = False,\n) -> npt.NDArray[np.void]: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None = None,\n *,\n usemask: Literal[False],\n asrecarray: Literal[True],\n autoconvert: bool = False,\n) -> np.recarray[_AnyShape, np.dtype[np.void]]: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None = None,\n usemask: Literal[True] = True,\n asrecarray: Literal[False] = False,\n autoconvert: bool = False,\n) -> np.ma.MaskedArray[_AnyShape, np.dtype[np.void]]: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None,\n usemask: Literal[True],\n asrecarray: Literal[True],\n autoconvert: bool = False,\n) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...\n@overload\ndef stack_arrays(\n arrays: Sequence[npt.NDArray[Any]],\n defaults: Mapping[str, Incomplete] | None = None,\n usemask: Literal[True] = True,\n *,\n asrecarray: Literal[True],\n autoconvert: bool = False,\n) -> MaskedRecords[_AnyShape, np.dtype[np.void]]: ...\n\n#\n@overload\ndef find_duplicates(\n a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],\n key: str | None = None,\n ignoremask: bool = True,\n return_index: Literal[False] = False,\n) -> np.ma.MaskedArray[_ShapeT, np.dtype[np.void]]: ...\n@overload\ndef find_duplicates(\n a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],\n key: str | None,\n ignoremask: bool,\n return_index: Literal[True],\n) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...\n@overload\ndef find_duplicates(\n a: np.ma.MaskedArray[_ShapeT, np.dtype[np.void]],\n key: str | None = None,\n ignoremask: bool = True,\n *,\n return_index: Literal[True],\n) -> tuple[np.ma.MaskedArray[_ShapeT, np.dtype[np.void]], np.ndarray[_ShapeT, np.dtype[np.int_]]]: ...\n\n#\n@overload\ndef join_by(\n key: str | Sequence[str],\n r1: npt.NDArray[np.void],\n r2: npt.NDArray[np.void],\n jointype: _JoinType = "inner",\n r1postfix: str = "1",\n r2postfix: str = "2",\n defaults: Mapping[str, object] | None = None,\n *,\n usemask: Literal[False],\n asrecarray: Literal[False] = False,\n) -> np.ndarray[tuple[int], np.dtype[np.void]]: ...\n@overload\ndef join_by(\n key: str | Sequence[str],\n r1: npt.NDArray[np.void],\n r2: npt.NDArray[np.void],\n jointype: _JoinType = "inner",\n r1postfix: str = "1",\n r2postfix: str = "2",\n defaults: Mapping[str, object] | None = None,\n *,\n usemask: Literal[False],\n asrecarray: Literal[True],\n) -> np.recarray[tuple[int], np.dtype[np.void]]: ...\n@overload\ndef join_by(\n key: str | Sequence[str],\n r1: npt.NDArray[np.void],\n r2: npt.NDArray[np.void],\n jointype: _JoinType = "inner",\n r1postfix: str = "1",\n r2postfix: str = "2",\n defaults: Mapping[str, object] | None = None,\n usemask: Literal[True] = True,\n asrecarray: Literal[False] = False,\n) -> np.ma.MaskedArray[tuple[int], np.dtype[np.void]]: ...\n@overload\ndef join_by(\n key: str | Sequence[str],\n r1: npt.NDArray[np.void],\n r2: npt.NDArray[np.void],\n jointype: _JoinType = "inner",\n r1postfix: str = "1",\n r2postfix: str = "2",\n defaults: Mapping[str, object] | None = None,\n usemask: Literal[True] = True,\n *,\n asrecarray: Literal[True],\n) -> MaskedRecords[tuple[int], np.dtype[np.void]]: ...\n\n#\ndef rec_join(\n key: str | Sequence[str],\n r1: npt.NDArray[np.void],\n r2: npt.NDArray[np.void],\n jointype: _JoinType = "inner",\n r1postfix: str = "1",\n r2postfix: str = "2",\n defaults: Mapping[str, object] | None = None,\n) -> np.recarray[tuple[int], np.dtype[np.void]]: ...\n | .venv\Lib\site-packages\numpy\lib\recfunctions.pyi | recfunctions.pyi | Other | 13,651 | 0.95 | 0.112644 | 0.081081 | node-utils | 323 | 2025-01-31T23:49:04.980862 | BSD-3-Clause | false | b7c0511b46a1cb8f88f6bdb61f7f3b99 |
from ._scimath_impl import ( # noqa: F401\n __all__,\n __doc__,\n arccos,\n arcsin,\n arctanh,\n log,\n log2,\n log10,\n logn,\n power,\n sqrt,\n)\n | .venv\Lib\site-packages\numpy\lib\scimath.py | scimath.py | Python | 182 | 0.95 | 0 | 0 | node-utils | 813 | 2025-05-01T01:22:32.527419 | MIT | false | db4b3c6289e11aae867569d4409c6c7d |
from ._scimath_impl import (\n __all__ as __all__,\n)\nfrom ._scimath_impl import (\n arccos as arccos,\n)\nfrom ._scimath_impl import (\n arcsin as arcsin,\n)\nfrom ._scimath_impl import (\n arctanh as arctanh,\n)\nfrom ._scimath_impl import (\n log as log,\n)\nfrom ._scimath_impl import (\n log2 as log2,\n)\nfrom ._scimath_impl import (\n log10 as log10,\n)\nfrom ._scimath_impl import (\n logn as logn,\n)\nfrom ._scimath_impl import (\n power as power,\n)\nfrom ._scimath_impl import (\n sqrt as sqrt,\n)\n | .venv\Lib\site-packages\numpy\lib\scimath.pyi | scimath.pyi | Other | 542 | 0.85 | 0 | 0 | node-utils | 381 | 2024-06-17T04:04:40.729317 | MIT | false | 6c808b33f491c90a511fb526bb13b546 |
from ._stride_tricks_impl import __doc__, as_strided, sliding_window_view # noqa: F401\n | .venv\Lib\site-packages\numpy\lib\stride_tricks.py | stride_tricks.py | Python | 89 | 0.75 | 0 | 0 | python-kit | 133 | 2024-08-31T09:14:16.278216 | Apache-2.0 | false | a7ec66cbea0f4fcc4bb3f4462e85d787 |
from numpy.lib._stride_tricks_impl import (\n as_strided as as_strided,\n)\nfrom numpy.lib._stride_tricks_impl import (\n sliding_window_view as sliding_window_view,\n)\n | .venv\Lib\site-packages\numpy\lib\stride_tricks.pyi | stride_tricks.pyi | Other | 176 | 0.85 | 0 | 0 | react-lib | 386 | 2025-04-03T02:05:28.173541 | Apache-2.0 | false | e62d5c7c69c36b4f9d7298e9513432e6 |
from ._user_array_impl import __doc__, container # noqa: F401\n | .venv\Lib\site-packages\numpy\lib\user_array.py | user_array.py | Python | 64 | 0.75 | 0 | 0 | vue-tools | 911 | 2025-05-29T07:00:43.080599 | GPL-3.0 | false | 1cf31a900ad92ca6073dc2ac37becc9f |
from ._user_array_impl import container as container\n | .venv\Lib\site-packages\numpy\lib\user_array.pyi | user_array.pyi | Other | 54 | 0.65 | 0 | 0 | react-lib | 422 | 2024-02-18T13:37:54.188915 | Apache-2.0 | false | e68c9a2d9b222abafdeead2978bb5b63 |
"""\nThe arraypad module contains a group of functions to pad values onto the edges\nof an n-dimensional array.\n\n"""\nimport numpy as np\nfrom numpy._core.overrides import array_function_dispatch\nfrom numpy.lib._index_tricks_impl import ndindex\n\n__all__ = ['pad']\n\n\n###############################################################################\n# Private utility functions.\n\n\ndef _round_if_needed(arr, dtype):\n """\n Rounds arr inplace if destination dtype is integer.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n dtype : dtype\n The dtype of the destination array.\n """\n if np.issubdtype(dtype, np.integer):\n arr.round(out=arr)\n\n\ndef _slice_at_axis(sl, axis):\n """\n Construct tuple of slices to slice an array in the given dimension.\n\n Parameters\n ----------\n sl : slice\n The slice for the given dimension.\n axis : int\n The axis to which `sl` is applied. All other dimensions are left\n "unsliced".\n\n Returns\n -------\n sl : tuple of slices\n A tuple with slices matching `shape` in length.\n\n Examples\n --------\n >>> np._slice_at_axis(slice(None, 3, -1), 1)\n (slice(None, None, None), slice(None, 3, -1), (...,))\n """\n return (slice(None),) * axis + (sl,) + (...,)\n\n\ndef _view_roi(array, original_area_slice, axis):\n """\n Get a view of the current region of interest during iterative padding.\n\n When padding multiple dimensions iteratively corner values are\n unnecessarily overwritten multiple times. This function reduces the\n working area for the first dimensions so that corners are excluded.\n\n Parameters\n ----------\n array : ndarray\n The array with the region of interest.\n original_area_slice : tuple of slices\n Denotes the area with original values of the unpadded array.\n axis : int\n The currently padded dimension assuming that `axis` is padded before\n `axis` + 1.\n\n Returns\n -------\n roi : ndarray\n The region of interest of the original `array`.\n """\n axis += 1\n sl = (slice(None),) * axis + original_area_slice[axis:]\n return array[sl]\n\n\ndef _pad_simple(array, pad_width, fill_value=None):\n """\n Pad array on all sides with either a single value or undefined values.\n\n Parameters\n ----------\n array : ndarray\n Array to grow.\n pad_width : sequence of tuple[int, int]\n Pad width on both sides for each dimension in `arr`.\n fill_value : scalar, optional\n If provided the padded area is filled with this value, otherwise\n the pad area left undefined.\n\n Returns\n -------\n padded : ndarray\n The padded array with the same dtype as`array`. Its order will default\n to C-style if `array` is not F-contiguous.\n original_area_slice : tuple\n A tuple of slices pointing to the area of the original array.\n """\n # Allocate grown array\n new_shape = tuple(\n left + size + right\n for size, (left, right) in zip(array.shape, pad_width)\n )\n order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order\n padded = np.empty(new_shape, dtype=array.dtype, order=order)\n\n if fill_value is not None:\n padded.fill(fill_value)\n\n # Copy old array into correct space\n original_area_slice = tuple(\n slice(left, left + size)\n for size, (left, right) in zip(array.shape, pad_width)\n )\n padded[original_area_slice] = array\n\n return padded, original_area_slice\n\n\ndef _set_pad_area(padded, axis, width_pair, value_pair):\n """\n Set empty-padded area in given dimension.\n\n Parameters\n ----------\n padded : ndarray\n Array with the pad area which is modified inplace.\n axis : int\n Dimension with the pad area to set.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n value_pair : tuple of scalars or ndarrays\n Values inserted into the pad area on each side. It must match or be\n broadcastable to the shape of `arr`.\n """\n left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)\n padded[left_slice] = value_pair[0]\n\n right_slice = _slice_at_axis(\n slice(padded.shape[axis] - width_pair[1], None), axis)\n padded[right_slice] = value_pair[1]\n\n\ndef _get_edges(padded, axis, width_pair):\n """\n Retrieve edge values from empty-padded array in given dimension.\n\n Parameters\n ----------\n padded : ndarray\n Empty-padded array.\n axis : int\n Dimension in which the edges are considered.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n\n Returns\n -------\n left_edge, right_edge : ndarray\n Edge values of the valid area in `padded` in the given dimension. Its\n shape will always match `padded` except for the dimension given by\n `axis` which will have a length of 1.\n """\n left_index = width_pair[0]\n left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)\n left_edge = padded[left_slice]\n\n right_index = padded.shape[axis] - width_pair[1]\n right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)\n right_edge = padded[right_slice]\n\n return left_edge, right_edge\n\n\ndef _get_linear_ramps(padded, axis, width_pair, end_value_pair):\n """\n Construct linear ramps for empty-padded array in given dimension.\n\n Parameters\n ----------\n padded : ndarray\n Empty-padded array.\n axis : int\n Dimension in which the ramps are constructed.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n end_value_pair : (scalar, scalar)\n End values for the linear ramps which form the edge of the fully padded\n array. These values are included in the linear ramps.\n\n Returns\n -------\n left_ramp, right_ramp : ndarray\n Linear ramps to set on both sides of `padded`.\n """\n edge_pair = _get_edges(padded, axis, width_pair)\n\n left_ramp, right_ramp = (\n np.linspace(\n start=end_value,\n stop=edge.squeeze(axis), # Dimension is replaced by linspace\n num=width,\n endpoint=False,\n dtype=padded.dtype,\n axis=axis\n )\n for end_value, edge, width in zip(\n end_value_pair, edge_pair, width_pair\n )\n )\n\n # Reverse linear space in appropriate dimension\n right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]\n\n return left_ramp, right_ramp\n\n\ndef _get_stats(padded, axis, width_pair, length_pair, stat_func):\n """\n Calculate statistic for the empty-padded array in given dimension.\n\n Parameters\n ----------\n padded : ndarray\n Empty-padded array.\n axis : int\n Dimension in which the statistic is calculated.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n length_pair : 2-element sequence of None or int\n Gives the number of values in valid area from each side that is\n taken into account when calculating the statistic. If None the entire\n valid area in `padded` is considered.\n stat_func : function\n Function to compute statistic. The expected signature is\n ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.\n\n Returns\n -------\n left_stat, right_stat : ndarray\n Calculated statistic for both sides of `padded`.\n """\n # Calculate indices of the edges of the area with original values\n left_index = width_pair[0]\n right_index = padded.shape[axis] - width_pair[1]\n # as well as its length\n max_length = right_index - left_index\n\n # Limit stat_lengths to max_length\n left_length, right_length = length_pair\n if left_length is None or max_length < left_length:\n left_length = max_length\n if right_length is None or max_length < right_length:\n right_length = max_length\n\n if (left_length == 0 or right_length == 0) \\n and stat_func in {np.amax, np.amin}:\n # amax and amin can't operate on an empty array,\n # raise a more descriptive warning here instead of the default one\n raise ValueError("stat_length of 0 yields no value for padding")\n\n # Calculate statistic for the left side\n left_slice = _slice_at_axis(\n slice(left_index, left_index + left_length), axis)\n left_chunk = padded[left_slice]\n left_stat = stat_func(left_chunk, axis=axis, keepdims=True)\n _round_if_needed(left_stat, padded.dtype)\n\n if left_length == right_length == max_length:\n # return early as right_stat must be identical to left_stat\n return left_stat, left_stat\n\n # Calculate statistic for the right side\n right_slice = _slice_at_axis(\n slice(right_index - right_length, right_index), axis)\n right_chunk = padded[right_slice]\n right_stat = stat_func(right_chunk, axis=axis, keepdims=True)\n _round_if_needed(right_stat, padded.dtype)\n\n return left_stat, right_stat\n\n\ndef _set_reflect_both(padded, axis, width_pair, method,\n original_period, include_edge=False):\n """\n Pad `axis` of `arr` with reflection.\n\n Parameters\n ----------\n padded : ndarray\n Input array of arbitrary shape.\n axis : int\n Axis along which to pad `arr`.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n method : str\n Controls method of reflection; options are 'even' or 'odd'.\n original_period : int\n Original length of data on `axis` of `arr`.\n include_edge : bool\n If true, edge value is included in reflection, otherwise the edge\n value forms the symmetric axis to the reflection.\n\n Returns\n -------\n pad_amt : tuple of ints, length 2\n New index positions of padding to do along the `axis`. If these are\n both 0, padding is done in this dimension.\n """\n left_pad, right_pad = width_pair\n old_length = padded.shape[axis] - right_pad - left_pad\n\n if include_edge:\n # Avoid wrapping with only a subset of the original area\n # by ensuring period can only be a multiple of the original\n # area's length.\n old_length = old_length // original_period * original_period\n # Edge is included, we need to offset the pad amount by 1\n edge_offset = 1\n else:\n # Avoid wrapping with only a subset of the original area\n # by ensuring period can only be a multiple of the original\n # area's length.\n old_length = ((old_length - 1) // (original_period - 1)\n * (original_period - 1) + 1)\n edge_offset = 0 # Edge is not included, no need to offset pad amount\n old_length -= 1 # but must be omitted from the chunk\n\n if left_pad > 0:\n # Pad with reflected values on left side:\n # First limit chunk size which can't be larger than pad area\n chunk_length = min(old_length, left_pad)\n # Slice right to left, stop on or next to edge, start relative to stop\n stop = left_pad - edge_offset\n start = stop + chunk_length\n left_slice = _slice_at_axis(slice(start, stop, -1), axis)\n left_chunk = padded[left_slice]\n\n if method == "odd":\n # Negate chunk and align with edge\n edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)\n left_chunk = 2 * padded[edge_slice] - left_chunk\n\n # Insert chunk into padded area\n start = left_pad - chunk_length\n stop = left_pad\n pad_area = _slice_at_axis(slice(start, stop), axis)\n padded[pad_area] = left_chunk\n # Adjust pointer to left edge for next iteration\n left_pad -= chunk_length\n\n if right_pad > 0:\n # Pad with reflected values on right side:\n # First limit chunk size which can't be larger than pad area\n chunk_length = min(old_length, right_pad)\n # Slice right to left, start on or next to edge, stop relative to start\n start = -right_pad + edge_offset - 2\n stop = start - chunk_length\n right_slice = _slice_at_axis(slice(start, stop, -1), axis)\n right_chunk = padded[right_slice]\n\n if method == "odd":\n # Negate chunk and align with edge\n edge_slice = _slice_at_axis(\n slice(-right_pad - 1, -right_pad), axis)\n right_chunk = 2 * padded[edge_slice] - right_chunk\n\n # Insert chunk into padded area\n start = padded.shape[axis] - right_pad\n stop = start + chunk_length\n pad_area = _slice_at_axis(slice(start, stop), axis)\n padded[pad_area] = right_chunk\n # Adjust pointer to right edge for next iteration\n right_pad -= chunk_length\n\n return left_pad, right_pad\n\n\ndef _set_wrap_both(padded, axis, width_pair, original_period):\n """\n Pad `axis` of `arr` with wrapped values.\n\n Parameters\n ----------\n padded : ndarray\n Input array of arbitrary shape.\n axis : int\n Axis along which to pad `arr`.\n width_pair : (int, int)\n Pair of widths that mark the pad area on both sides in the given\n dimension.\n original_period : int\n Original length of data on `axis` of `arr`.\n\n Returns\n -------\n pad_amt : tuple of ints, length 2\n New index positions of padding to do along the `axis`. If these are\n both 0, padding is done in this dimension.\n """\n left_pad, right_pad = width_pair\n period = padded.shape[axis] - right_pad - left_pad\n # Avoid wrapping with only a subset of the original area by ensuring period\n # can only be a multiple of the original area's length.\n period = period // original_period * original_period\n\n # If the current dimension of `arr` doesn't contain enough valid values\n # (not part of the undefined pad area) we need to pad multiple times.\n # Each time the pad area shrinks on both sides which is communicated with\n # these variables.\n new_left_pad = 0\n new_right_pad = 0\n\n if left_pad > 0:\n # Pad with wrapped values on left side\n # First slice chunk from left side of the non-pad area.\n # Use min(period, left_pad) to ensure that chunk is not larger than\n # pad area.\n slice_end = left_pad + period\n slice_start = slice_end - min(period, left_pad)\n right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)\n right_chunk = padded[right_slice]\n\n if left_pad > period:\n # Chunk is smaller than pad area\n pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)\n new_left_pad = left_pad - period\n else:\n # Chunk matches pad area\n pad_area = _slice_at_axis(slice(None, left_pad), axis)\n padded[pad_area] = right_chunk\n\n if right_pad > 0:\n # Pad with wrapped values on right side\n # First slice chunk from right side of the non-pad area.\n # Use min(period, right_pad) to ensure that chunk is not larger than\n # pad area.\n slice_start = -right_pad - period\n slice_end = slice_start + min(period, right_pad)\n left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)\n left_chunk = padded[left_slice]\n\n if right_pad > period:\n # Chunk is smaller than pad area\n pad_area = _slice_at_axis(\n slice(-right_pad, -right_pad + period), axis)\n new_right_pad = right_pad - period\n else:\n # Chunk matches pad area\n pad_area = _slice_at_axis(slice(-right_pad, None), axis)\n padded[pad_area] = left_chunk\n\n return new_left_pad, new_right_pad\n\n\ndef _as_pairs(x, ndim, as_index=False):\n """\n Broadcast `x` to an array with the shape (`ndim`, 2).\n\n A helper function for `pad` that prepares and validates arguments like\n `pad_width` for iteration in pairs.\n\n Parameters\n ----------\n x : {None, scalar, array-like}\n The object to broadcast to the shape (`ndim`, 2).\n ndim : int\n Number of pairs the broadcasted `x` will have.\n as_index : bool, optional\n If `x` is not None, try to round each element of `x` to an integer\n (dtype `np.intp`) and ensure every element is positive.\n\n Returns\n -------\n pairs : nested iterables, shape (`ndim`, 2)\n The broadcasted version of `x`.\n\n Raises\n ------\n ValueError\n If `as_index` is True and `x` contains negative elements.\n Or if `x` is not broadcastable to the shape (`ndim`, 2).\n """\n if x is None:\n # Pass through None as a special case, otherwise np.round(x) fails\n # with an AttributeError\n return ((None, None),) * ndim\n\n x = np.array(x)\n if as_index:\n x = np.round(x).astype(np.intp, copy=False)\n\n if x.ndim < 3:\n # Optimization: Possibly use faster paths for cases where `x` has\n # only 1 or 2 elements. `np.broadcast_to` could handle these as well\n # but is currently slower\n\n if x.size == 1:\n # x was supplied as a single value\n x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2\n if as_index and x < 0:\n raise ValueError("index can't contain negative values")\n return ((x[0], x[0]),) * ndim\n\n if x.size == 2 and x.shape != (2, 1):\n # x was supplied with a single value for each side\n # but except case when each dimension has a single value\n # which should be broadcasted to a pair,\n # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]\n x = x.ravel() # Ensure x[0], x[1] works\n if as_index and (x[0] < 0 or x[1] < 0):\n raise ValueError("index can't contain negative values")\n return ((x[0], x[1]),) * ndim\n\n if as_index and x.min() < 0:\n raise ValueError("index can't contain negative values")\n\n # Converting the array with `tolist` seems to improve performance\n # when iterating and indexing the result (see usage in `pad`)\n return np.broadcast_to(x, (ndim, 2)).tolist()\n\n\ndef _pad_dispatcher(array, pad_width, mode=None, **kwargs):\n return (array,)\n\n\n###############################################################################\n# Public functions\n\n\n@array_function_dispatch(_pad_dispatcher, module='numpy')\ndef pad(array, pad_width, mode='constant', **kwargs):\n """\n Pad an array.\n\n Parameters\n ----------\n array : array_like of rank N\n The array to pad.\n pad_width : {sequence, array_like, int}\n Number of values padded to the edges of each axis.\n ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths\n for each axis.\n ``(before, after)`` or ``((before, after),)`` yields same before\n and after pad for each axis.\n ``(pad,)`` or ``int`` is a shortcut for before = after = pad width\n for all axes.\n mode : str or function, optional\n One of the following string values or a user supplied function.\n\n 'constant' (default)\n Pads with a constant value.\n 'edge'\n Pads with the edge values of array.\n 'linear_ramp'\n Pads with the linear ramp between end_value and the\n array edge value.\n 'maximum'\n Pads with the maximum value of all or part of the\n vector along each axis.\n 'mean'\n Pads with the mean value of all or part of the\n vector along each axis.\n 'median'\n Pads with the median value of all or part of the\n vector along each axis.\n 'minimum'\n Pads with the minimum value of all or part of the\n vector along each axis.\n 'reflect'\n Pads with the reflection of the vector mirrored on\n the first and last values of the vector along each\n axis.\n 'symmetric'\n Pads with the reflection of the vector mirrored\n along the edge of the array.\n 'wrap'\n Pads with the wrap of the vector along the axis.\n The first values are used to pad the end and the\n end values are used to pad the beginning.\n 'empty'\n Pads with undefined values.\n\n <function>\n Padding function, see Notes.\n stat_length : sequence or int, optional\n Used in 'maximum', 'mean', 'median', and 'minimum'. Number of\n values at edge of each axis used to calculate the statistic value.\n\n ``((before_1, after_1), ... (before_N, after_N))`` unique statistic\n lengths for each axis.\n\n ``(before, after)`` or ``((before, after),)`` yields same before\n and after statistic lengths for each axis.\n\n ``(stat_length,)`` or ``int`` is a shortcut for\n ``before = after = statistic`` length for all axes.\n\n Default is ``None``, to use the entire axis.\n constant_values : sequence or scalar, optional\n Used in 'constant'. The values to set the padded values for each\n axis.\n\n ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants\n for each axis.\n\n ``(before, after)`` or ``((before, after),)`` yields same before\n and after constants for each axis.\n\n ``(constant,)`` or ``constant`` is a shortcut for\n ``before = after = constant`` for all axes.\n\n Default is 0.\n end_values : sequence or scalar, optional\n Used in 'linear_ramp'. The values used for the ending value of the\n linear_ramp and that will form the edge of the padded array.\n\n ``((before_1, after_1), ... (before_N, after_N))`` unique end values\n for each axis.\n\n ``(before, after)`` or ``((before, after),)`` yields same before\n and after end values for each axis.\n\n ``(constant,)`` or ``constant`` is a shortcut for\n ``before = after = constant`` for all axes.\n\n Default is 0.\n reflect_type : {'even', 'odd'}, optional\n Used in 'reflect', and 'symmetric'. The 'even' style is the\n default with an unaltered reflection around the edge value. For\n the 'odd' style, the extended part of the array is created by\n subtracting the reflected values from two times the edge value.\n\n Returns\n -------\n pad : ndarray\n Padded array of rank equal to `array` with shape increased\n according to `pad_width`.\n\n Notes\n -----\n For an array with rank greater than 1, some of the padding of later\n axes is calculated from padding of previous axes. This is easiest to\n think about with a rank 2 array where the corners of the padded array\n are calculated by using padded values from the first axis.\n\n The padding function, if used, should modify a rank 1 array in-place. It\n has the following signature::\n\n padding_func(vector, iaxis_pad_width, iaxis, kwargs)\n\n where\n\n vector : ndarray\n A rank 1 array already padded with zeros. Padded values are\n vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].\n iaxis_pad_width : tuple\n A 2-tuple of ints, iaxis_pad_width[0] represents the number of\n values padded at the beginning of vector where\n iaxis_pad_width[1] represents the number of values padded at\n the end of vector.\n iaxis : int\n The axis currently being calculated.\n kwargs : dict\n Any keyword arguments the function requires.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))\n array([4, 4, 1, ..., 6, 6, 6])\n\n >>> np.pad(a, (2, 3), 'edge')\n array([1, 1, 1, ..., 5, 5, 5])\n\n >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))\n array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])\n\n >>> np.pad(a, (2,), 'maximum')\n array([5, 5, 1, 2, 3, 4, 5, 5, 5])\n\n >>> np.pad(a, (2,), 'mean')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> np.pad(a, (2,), 'median')\n array([3, 3, 1, 2, 3, 4, 5, 3, 3])\n\n >>> a = [[1, 2], [3, 4]]\n >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')\n array([[1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1],\n [3, 3, 3, 4, 3, 3, 3],\n [1, 1, 1, 2, 1, 1, 1],\n [1, 1, 1, 2, 1, 1, 1]])\n\n >>> a = [1, 2, 3, 4, 5]\n >>> np.pad(a, (2, 3), 'reflect')\n array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])\n\n >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')\n array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])\n\n >>> np.pad(a, (2, 3), 'symmetric')\n array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])\n\n >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')\n array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])\n\n >>> np.pad(a, (2, 3), 'wrap')\n array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])\n\n >>> def pad_with(vector, pad_width, iaxis, kwargs):\n ... pad_value = kwargs.get('padder', 10)\n ... vector[:pad_width[0]] = pad_value\n ... vector[-pad_width[1]:] = pad_value\n >>> a = np.arange(6)\n >>> a = a.reshape((2, 3))\n >>> np.pad(a, 2, pad_with)\n array([[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]])\n >>> np.pad(a, 2, pad_with, padder=100)\n array([[100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100],\n [100, 100, 0, 1, 2, 100, 100],\n [100, 100, 3, 4, 5, 100, 100],\n [100, 100, 100, 100, 100, 100, 100],\n [100, 100, 100, 100, 100, 100, 100]])\n """\n array = np.asarray(array)\n pad_width = np.asarray(pad_width)\n\n if not pad_width.dtype.kind == 'i':\n raise TypeError('`pad_width` must be of integral type.')\n\n # Broadcast to shape (array.ndim, 2)\n pad_width = _as_pairs(pad_width, array.ndim, as_index=True)\n\n if callable(mode):\n # Old behavior: Use user-supplied function with np.apply_along_axis\n function = mode\n # Create a new zero padded array\n padded, _ = _pad_simple(array, pad_width, fill_value=0)\n # And apply along each axis\n\n for axis in range(padded.ndim):\n # Iterate using ndindex as in apply_along_axis, but assuming that\n # function operates inplace on the padded array.\n\n # view with the iteration axis at the end\n view = np.moveaxis(padded, axis, -1)\n\n # compute indices for the iteration axes, and append a trailing\n # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)\n inds = ndindex(view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n for ind in inds:\n function(view[ind], pad_width[axis], axis, kwargs)\n\n return padded\n\n # Make sure that no unsupported keywords were passed for the current mode\n allowed_kwargs = {\n 'empty': [], 'edge': [], 'wrap': [],\n 'constant': ['constant_values'],\n 'linear_ramp': ['end_values'],\n 'maximum': ['stat_length'],\n 'mean': ['stat_length'],\n 'median': ['stat_length'],\n 'minimum': ['stat_length'],\n 'reflect': ['reflect_type'],\n 'symmetric': ['reflect_type'],\n }\n try:\n unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])\n except KeyError:\n raise ValueError(f"mode '{mode}' is not supported") from None\n if unsupported_kwargs:\n raise ValueError("unsupported keyword arguments for mode "\n f"'{mode}': {unsupported_kwargs}")\n\n stat_functions = {"maximum": np.amax, "minimum": np.amin,\n "mean": np.mean, "median": np.median}\n\n # Create array with final shape and original values\n # (padded area is undefined)\n padded, original_area_slice = _pad_simple(array, pad_width)\n # And prepare iteration over all dimensions\n # (zipping may be more readable than using enumerate)\n axes = range(padded.ndim)\n\n if mode == "constant":\n values = kwargs.get("constant_values", 0)\n values = _as_pairs(values, padded.ndim)\n for axis, width_pair, value_pair in zip(axes, pad_width, values):\n roi = _view_roi(padded, original_area_slice, axis)\n _set_pad_area(roi, axis, width_pair, value_pair)\n\n elif mode == "empty":\n pass # Do nothing as _pad_simple already returned the correct result\n\n elif array.size == 0:\n # Only modes "constant" and "empty" can extend empty axes, all other\n # modes depend on `array` not being empty\n # -> ensure every empty axis is only "padded with 0"\n for axis, width_pair in zip(axes, pad_width):\n if array.shape[axis] == 0 and any(width_pair):\n raise ValueError(\n f"can't extend empty axis {axis} using modes other than "\n "'constant' or 'empty'"\n )\n # passed, don't need to do anything more as _pad_simple already\n # returned the correct result\n\n elif mode == "edge":\n for axis, width_pair in zip(axes, pad_width):\n roi = _view_roi(padded, original_area_slice, axis)\n edge_pair = _get_edges(roi, axis, width_pair)\n _set_pad_area(roi, axis, width_pair, edge_pair)\n\n elif mode == "linear_ramp":\n end_values = kwargs.get("end_values", 0)\n end_values = _as_pairs(end_values, padded.ndim)\n for axis, width_pair, value_pair in zip(axes, pad_width, end_values):\n roi = _view_roi(padded, original_area_slice, axis)\n ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)\n _set_pad_area(roi, axis, width_pair, ramp_pair)\n\n elif mode in stat_functions:\n func = stat_functions[mode]\n length = kwargs.get("stat_length")\n length = _as_pairs(length, padded.ndim, as_index=True)\n for axis, width_pair, length_pair in zip(axes, pad_width, length):\n roi = _view_roi(padded, original_area_slice, axis)\n stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)\n _set_pad_area(roi, axis, width_pair, stat_pair)\n\n elif mode in {"reflect", "symmetric"}:\n method = kwargs.get("reflect_type", "even")\n include_edge = mode == "symmetric"\n for axis, (left_index, right_index) in zip(axes, pad_width):\n if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):\n # Extending singleton dimension for 'reflect' is legacy\n # behavior; it really should raise an error.\n edge_pair = _get_edges(padded, axis, (left_index, right_index))\n _set_pad_area(\n padded, axis, (left_index, right_index), edge_pair)\n continue\n\n roi = _view_roi(padded, original_area_slice, axis)\n while left_index > 0 or right_index > 0:\n # Iteratively pad until dimension is filled with reflected\n # values. This is necessary if the pad area is larger than\n # the length of the original values in the current dimension.\n left_index, right_index = _set_reflect_both(\n roi, axis, (left_index, right_index),\n method, array.shape[axis], include_edge\n )\n\n elif mode == "wrap":\n for axis, (left_index, right_index) in zip(axes, pad_width):\n roi = _view_roi(padded, original_area_slice, axis)\n original_period = padded.shape[axis] - right_index - left_index\n while left_index > 0 or right_index > 0:\n # Iteratively pad until dimension is filled with wrapped\n # values. This is necessary if the pad area is larger than\n # the length of the original values in the current dimension.\n left_index, right_index = _set_wrap_both(\n roi, axis, (left_index, right_index), original_period)\n\n return padded\n | .venv\Lib\site-packages\numpy\lib\_arraypad_impl.py | _arraypad_impl.py | Python | 33,186 | 0.95 | 0.134831 | 0.12349 | node-utils | 578 | 2024-11-10T19:27:33.415410 | Apache-2.0 | false | f78d8950de430ec4003c370226fe2e03 |
from typing import (\n Any,\n Protocol,\n TypeAlias,\n TypeVar,\n overload,\n type_check_only,\n)\nfrom typing import (\n Literal as L,\n)\n\nfrom numpy import generic\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeInt,\n)\n\n__all__ = ["pad"]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n\n@type_check_only\nclass _ModeFunc(Protocol):\n def __call__(\n self,\n vector: NDArray[Any],\n iaxis_pad_width: tuple[int, int],\n iaxis: int,\n kwargs: dict[str, Any],\n /,\n ) -> None: ...\n\n_ModeKind: TypeAlias = L[\n "constant",\n "edge",\n "linear_ramp",\n "maximum",\n "mean",\n "median",\n "minimum",\n "reflect",\n "symmetric",\n "wrap",\n "empty",\n]\n\n# TODO: In practice each keyword argument is exclusive to one or more\n# specific modes. Consider adding more overloads to express this in the future.\n\n# Expand `**kwargs` into explicit keyword-only arguments\n@overload\ndef pad(\n array: _ArrayLike[_ScalarT],\n pad_width: _ArrayLikeInt,\n mode: _ModeKind = ...,\n *,\n stat_length: _ArrayLikeInt | None = ...,\n constant_values: ArrayLike = ...,\n end_values: ArrayLike = ...,\n reflect_type: L["odd", "even"] = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef pad(\n array: ArrayLike,\n pad_width: _ArrayLikeInt,\n mode: _ModeKind = ...,\n *,\n stat_length: _ArrayLikeInt | None = ...,\n constant_values: ArrayLike = ...,\n end_values: ArrayLike = ...,\n reflect_type: L["odd", "even"] = ...,\n) -> NDArray[Any]: ...\n@overload\ndef pad(\n array: _ArrayLike[_ScalarT],\n pad_width: _ArrayLikeInt,\n mode: _ModeFunc,\n **kwargs: Any,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef pad(\n array: ArrayLike,\n pad_width: _ArrayLikeInt,\n mode: _ModeFunc,\n **kwargs: Any,\n) -> NDArray[Any]: ...\n | .venv\Lib\site-packages\numpy\lib\_arraypad_impl.pyi | _arraypad_impl.pyi | Other | 1,926 | 0.95 | 0.067416 | 0.085366 | node-utils | 8 | 2025-04-01T10:44:41.568286 | Apache-2.0 | false | 6d71c8490b1af7566ee81c80da852f26 |
"""\nSet operations for arrays based on sorting.\n\nNotes\n-----\n\nFor floating point arrays, inaccurate results may appear due to usual round-off\nand floating point comparison issues.\n\nSpeed could be gained in some operations by an implementation of\n`numpy.sort`, that can provide directly the permutation vectors, thus avoiding\ncalls to `numpy.argsort`.\n\nOriginal author: Robert Cimrman\n\n"""\nimport functools\nimport warnings\nfrom typing import NamedTuple\n\nimport numpy as np\nfrom numpy._core import overrides\nfrom numpy._core._multiarray_umath import _array_converter, _unique_hash\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n "ediff1d", "in1d", "intersect1d", "isin", "setdiff1d", "setxor1d",\n "union1d", "unique", "unique_all", "unique_counts", "unique_inverse",\n "unique_values"\n]\n\n\ndef _ediff1d_dispatcher(ary, to_end=None, to_begin=None):\n return (ary, to_end, to_begin)\n\n\n@array_function_dispatch(_ediff1d_dispatcher)\ndef ediff1d(ary, to_end=None, to_begin=None):\n """\n The differences between consecutive elements of an array.\n\n Parameters\n ----------\n ary : array_like\n If necessary, will be flattened before the differences are taken.\n to_end : array_like, optional\n Number(s) to append at the end of the returned differences.\n to_begin : array_like, optional\n Number(s) to prepend at the beginning of the returned differences.\n\n Returns\n -------\n ediff1d : ndarray\n The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.\n\n See Also\n --------\n diff, gradient\n\n Notes\n -----\n When applied to masked arrays, this function drops the mask information\n if the `to_begin` and/or `to_end` parameters are used.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1, 2, 4, 7, 0])\n >>> np.ediff1d(x)\n array([ 1, 2, 3, -7])\n\n >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))\n array([-99, 1, 2, ..., -7, 88, 99])\n\n The returned array is always 1D.\n\n >>> y = [[1, 2, 4], [1, 6, 24]]\n >>> np.ediff1d(y)\n array([ 1, 2, -3, 5, 18])\n\n """\n conv = _array_converter(ary)\n # Convert to (any) array and ravel:\n ary = conv[0].ravel()\n\n # enforce that the dtype of `ary` is used for the output\n dtype_req = ary.dtype\n\n # fast track default case\n if to_begin is None and to_end is None:\n return ary[1:] - ary[:-1]\n\n if to_begin is None:\n l_begin = 0\n else:\n to_begin = np.asanyarray(to_begin)\n if not np.can_cast(to_begin, dtype_req, casting="same_kind"):\n raise TypeError("dtype of `to_begin` must be compatible "\n "with input `ary` under the `same_kind` rule.")\n\n to_begin = to_begin.ravel()\n l_begin = len(to_begin)\n\n if to_end is None:\n l_end = 0\n else:\n to_end = np.asanyarray(to_end)\n if not np.can_cast(to_end, dtype_req, casting="same_kind"):\n raise TypeError("dtype of `to_end` must be compatible "\n "with input `ary` under the `same_kind` rule.")\n\n to_end = to_end.ravel()\n l_end = len(to_end)\n\n # do the calculation in place and copy to_begin and to_end\n l_diff = max(len(ary) - 1, 0)\n result = np.empty_like(ary, shape=l_diff + l_begin + l_end)\n\n if l_begin > 0:\n result[:l_begin] = to_begin\n if l_end > 0:\n result[l_begin + l_diff:] = to_end\n np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])\n\n return conv.wrap(result)\n\n\ndef _unpack_tuple(x):\n """ Unpacks one-element tuples for use as return values """\n if len(x) == 1:\n return x[0]\n else:\n return x\n\n\ndef _unique_dispatcher(ar, return_index=None, return_inverse=None,\n return_counts=None, axis=None, *, equal_nan=None,\n sorted=True):\n return (ar,)\n\n\n@array_function_dispatch(_unique_dispatcher)\ndef unique(ar, return_index=False, return_inverse=False,\n return_counts=False, axis=None, *, equal_nan=True,\n sorted=True):\n """\n Find the unique elements of an array.\n\n Returns the sorted unique elements of an array. There are three optional\n outputs in addition to the unique elements:\n\n * the indices of the input array that give the unique values\n * the indices of the unique array that reconstruct the input array\n * the number of times each unique value comes up in the input array\n\n Parameters\n ----------\n ar : array_like\n Input array. Unless `axis` is specified, this will be flattened if it\n is not already 1-D.\n return_index : bool, optional\n If True, also return the indices of `ar` (along the specified axis,\n if provided, or in the flattened array) that result in the unique array.\n return_inverse : bool, optional\n If True, also return the indices of the unique array (for the specified\n axis, if provided) that can be used to reconstruct `ar`.\n return_counts : bool, optional\n If True, also return the number of times each unique item appears\n in `ar`.\n axis : int or None, optional\n The axis to operate on. If None, `ar` will be flattened. If an integer,\n the subarrays indexed by the given axis will be flattened and treated\n as the elements of a 1-D array with the dimension of the given axis,\n see the notes for more details. Object arrays or structured arrays\n that contain objects are not supported if the `axis` kwarg is used. The\n default is None.\n\n equal_nan : bool, optional\n If True, collapses multiple NaN values in the return array into one.\n\n .. versionadded:: 1.24\n\n sorted : bool, optional\n If True, the unique elements are sorted. Elements may be sorted in\n practice even if ``sorted=False``, but this could change without\n notice.\n\n .. versionadded:: 2.3\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n unique_indices : ndarray, optional\n The indices of the first occurrences of the unique values in the\n original array. Only provided if `return_index` is True.\n unique_inverse : ndarray, optional\n The indices to reconstruct the original array from the\n unique array. Only provided if `return_inverse` is True.\n unique_counts : ndarray, optional\n The number of times each of the unique values comes up in the\n original array. Only provided if `return_counts` is True.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n sort : Return a sorted copy of an array.\n\n Notes\n -----\n When an axis is specified the subarrays indexed by the axis are sorted.\n This is done by making the specified axis the first dimension of the array\n (move the axis to the first dimension to keep the order of the other axes)\n and then flattening the subarrays in C order. The flattened subarrays are\n then viewed as a structured type with each element given a label, with the\n effect that we end up with a 1-D array of structured types that can be\n treated in the same way as any other 1-D array. The result is that the\n flattened subarrays are sorted in lexicographic order starting with the\n first element.\n\n .. versionchanged:: 1.21\n Like np.sort, NaN will sort to the end of the values.\n For complex arrays all NaN values are considered equivalent\n (no matter whether the NaN is in the real or imaginary part).\n As the representant for the returned array the smallest one in the\n lexicographical order is chosen - see np.sort for how the lexicographical\n order is defined for complex arrays.\n\n .. versionchanged:: 2.0\n For multi-dimensional inputs, ``unique_inverse`` is reshaped\n such that the input can be reconstructed using\n ``np.take(unique, unique_inverse, axis=axis)``. The result is\n now not 1-dimensional when ``axis=None``.\n\n Note that in NumPy 2.0.0 a higher dimensional array was returned also\n when ``axis`` was not ``None``. This was reverted, but\n ``inverse.reshape(-1)`` can be used to ensure compatibility with both\n versions.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.unique([1, 1, 2, 2, 3, 3])\n array([1, 2, 3])\n >>> a = np.array([[1, 1], [2, 3]])\n >>> np.unique(a)\n array([1, 2, 3])\n\n Return the unique rows of a 2D array\n\n >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])\n >>> np.unique(a, axis=0)\n array([[1, 0, 0], [2, 3, 4]])\n\n Return the indices of the original array that give the unique values:\n\n >>> a = np.array(['a', 'b', 'b', 'c', 'a'])\n >>> u, indices = np.unique(a, return_index=True)\n >>> u\n array(['a', 'b', 'c'], dtype='<U1')\n >>> indices\n array([0, 1, 3])\n >>> a[indices]\n array(['a', 'b', 'c'], dtype='<U1')\n\n Reconstruct the input array from the unique values and inverse:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> u, indices = np.unique(a, return_inverse=True)\n >>> u\n array([1, 2, 3, 4, 6])\n >>> indices\n array([0, 1, 4, 3, 1, 2, 1])\n >>> u[indices]\n array([1, 2, 6, 4, 2, 3, 2])\n\n Reconstruct the input values from the unique values and counts:\n\n >>> a = np.array([1, 2, 6, 4, 2, 3, 2])\n >>> values, counts = np.unique(a, return_counts=True)\n >>> values\n array([1, 2, 3, 4, 6])\n >>> counts\n array([1, 3, 1, 1, 1])\n >>> np.repeat(values, counts)\n array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved\n\n """\n ar = np.asanyarray(ar)\n if axis is None:\n ret = _unique1d(ar, return_index, return_inverse, return_counts,\n equal_nan=equal_nan, inverse_shape=ar.shape, axis=None,\n sorted=sorted)\n return _unpack_tuple(ret)\n\n # axis was specified and not None\n try:\n ar = np.moveaxis(ar, axis, 0)\n except np.exceptions.AxisError:\n # this removes the "axis1" or "axis2" prefix from the error message\n raise np.exceptions.AxisError(axis, ar.ndim) from None\n inverse_shape = [1] * ar.ndim\n inverse_shape[axis] = ar.shape[0]\n\n # Must reshape to a contiguous 2D array for this to work...\n orig_shape, orig_dtype = ar.shape, ar.dtype\n ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))\n ar = np.ascontiguousarray(ar)\n dtype = [(f'f{i}', ar.dtype) for i in range(ar.shape[1])]\n\n # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured\n # data type with `m` fields where each field has the data type of `ar`.\n # In the following, we create the array `consolidated`, which has\n # shape `(n,)` with data type `dtype`.\n try:\n if ar.shape[1] > 0:\n consolidated = ar.view(dtype)\n else:\n # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is\n # a data type with itemsize 0, and the call `ar.view(dtype)` will\n # fail. Instead, we'll use `np.empty` to explicitly create the\n # array with shape `(len(ar),)`. Because `dtype` in this case has\n # itemsize 0, the total size of the result is still 0 bytes.\n consolidated = np.empty(len(ar), dtype=dtype)\n except TypeError as e:\n # There's no good way to do this for object arrays, etc...\n msg = 'The axis argument to unique is not supported for dtype {dt}'\n raise TypeError(msg.format(dt=ar.dtype)) from e\n\n def reshape_uniq(uniq):\n n = len(uniq)\n uniq = uniq.view(orig_dtype)\n uniq = uniq.reshape(n, *orig_shape[1:])\n uniq = np.moveaxis(uniq, 0, axis)\n return uniq\n\n output = _unique1d(consolidated, return_index,\n return_inverse, return_counts,\n equal_nan=equal_nan, inverse_shape=inverse_shape,\n axis=axis, sorted=sorted)\n output = (reshape_uniq(output[0]),) + output[1:]\n return _unpack_tuple(output)\n\n\ndef _unique1d(ar, return_index=False, return_inverse=False,\n return_counts=False, *, equal_nan=True, inverse_shape=None,\n axis=None, sorted=True):\n """\n Find the unique elements of an array, ignoring shape.\n\n Uses a hash table to find the unique elements if possible.\n """\n ar = np.asanyarray(ar).flatten()\n if len(ar.shape) != 1:\n # np.matrix, and maybe some other array subclasses, insist on keeping\n # two dimensions for all operations. Coerce to an ndarray in such cases.\n ar = np.asarray(ar).flatten()\n\n optional_indices = return_index or return_inverse\n\n # masked arrays are not supported yet.\n if not optional_indices and not return_counts and not np.ma.is_masked(ar):\n # First we convert the array to a numpy array, later we wrap it back\n # in case it was a subclass of numpy.ndarray.\n conv = _array_converter(ar)\n ar_, = conv\n\n if (hash_unique := _unique_hash(ar_)) is not NotImplemented:\n if sorted:\n hash_unique.sort()\n # We wrap the result back in case it was a subclass of numpy.ndarray.\n return (conv.wrap(hash_unique),)\n\n # If we don't use the hash map, we use the slower sorting method.\n if optional_indices:\n perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')\n aux = ar[perm]\n else:\n ar.sort()\n aux = ar\n mask = np.empty(aux.shape, dtype=np.bool)\n mask[:1] = True\n if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and\n np.isnan(aux[-1])):\n if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent\n aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')\n else:\n aux_firstnan = np.searchsorted(aux, aux[-1], side='left')\n if aux_firstnan > 0:\n mask[1:aux_firstnan] = (\n aux[1:aux_firstnan] != aux[:aux_firstnan - 1])\n mask[aux_firstnan] = True\n mask[aux_firstnan + 1:] = False\n else:\n mask[1:] = aux[1:] != aux[:-1]\n\n ret = (aux[mask],)\n if return_index:\n ret += (perm[mask],)\n if return_inverse:\n imask = np.cumsum(mask) - 1\n inv_idx = np.empty(mask.shape, dtype=np.intp)\n inv_idx[perm] = imask\n ret += (inv_idx.reshape(inverse_shape) if axis is None else inv_idx,)\n if return_counts:\n idx = np.concatenate(np.nonzero(mask) + ([mask.size],))\n ret += (np.diff(idx),)\n return ret\n\n\n# Array API set functions\n\nclass UniqueAllResult(NamedTuple):\n values: np.ndarray\n indices: np.ndarray\n inverse_indices: np.ndarray\n counts: np.ndarray\n\n\nclass UniqueCountsResult(NamedTuple):\n values: np.ndarray\n counts: np.ndarray\n\n\nclass UniqueInverseResult(NamedTuple):\n values: np.ndarray\n inverse_indices: np.ndarray\n\n\ndef _unique_all_dispatcher(x, /):\n return (x,)\n\n\n@array_function_dispatch(_unique_all_dispatcher)\ndef unique_all(x):\n """\n Find the unique elements of an array, and counts, inverse, and indices.\n\n This function is an Array API compatible alternative to::\n\n np.unique(x, return_index=True, return_inverse=True,\n return_counts=True, equal_nan=False, sorted=False)\n\n but returns a namedtuple for easier access to each output.\n\n .. note::\n This function currently always returns a sorted result, however,\n this could change in any NumPy minor release.\n\n Parameters\n ----------\n x : array_like\n Input array. It will be flattened if it is not already 1-D.\n\n Returns\n -------\n out : namedtuple\n The result containing:\n\n * values - The unique elements of an input array.\n * indices - The first occurring indices for each unique element.\n * inverse_indices - The indices from the set of unique elements\n that reconstruct `x`.\n * counts - The corresponding counts for each unique element.\n\n See Also\n --------\n unique : Find the unique elements of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = [1, 1, 2]\n >>> uniq = np.unique_all(x)\n >>> uniq.values\n array([1, 2])\n >>> uniq.indices\n array([0, 2])\n >>> uniq.inverse_indices\n array([0, 0, 1])\n >>> uniq.counts\n array([2, 1])\n """\n result = unique(\n x,\n return_index=True,\n return_inverse=True,\n return_counts=True,\n equal_nan=False,\n )\n return UniqueAllResult(*result)\n\n\ndef _unique_counts_dispatcher(x, /):\n return (x,)\n\n\n@array_function_dispatch(_unique_counts_dispatcher)\ndef unique_counts(x):\n """\n Find the unique elements and counts of an input array `x`.\n\n This function is an Array API compatible alternative to::\n\n np.unique(x, return_counts=True, equal_nan=False, sorted=False)\n\n but returns a namedtuple for easier access to each output.\n\n .. note::\n This function currently always returns a sorted result, however,\n this could change in any NumPy minor release.\n\n Parameters\n ----------\n x : array_like\n Input array. It will be flattened if it is not already 1-D.\n\n Returns\n -------\n out : namedtuple\n The result containing:\n\n * values - The unique elements of an input array.\n * counts - The corresponding counts for each unique element.\n\n See Also\n --------\n unique : Find the unique elements of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = [1, 1, 2]\n >>> uniq = np.unique_counts(x)\n >>> uniq.values\n array([1, 2])\n >>> uniq.counts\n array([2, 1])\n """\n result = unique(\n x,\n return_index=False,\n return_inverse=False,\n return_counts=True,\n equal_nan=False,\n )\n return UniqueCountsResult(*result)\n\n\ndef _unique_inverse_dispatcher(x, /):\n return (x,)\n\n\n@array_function_dispatch(_unique_inverse_dispatcher)\ndef unique_inverse(x):\n """\n Find the unique elements of `x` and indices to reconstruct `x`.\n\n This function is an Array API compatible alternative to::\n\n np.unique(x, return_inverse=True, equal_nan=False, sorted=False)\n\n but returns a namedtuple for easier access to each output.\n\n .. note::\n This function currently always returns a sorted result, however,\n this could change in any NumPy minor release.\n\n Parameters\n ----------\n x : array_like\n Input array. It will be flattened if it is not already 1-D.\n\n Returns\n -------\n out : namedtuple\n The result containing:\n\n * values - The unique elements of an input array.\n * inverse_indices - The indices from the set of unique elements\n that reconstruct `x`.\n\n See Also\n --------\n unique : Find the unique elements of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = [1, 1, 2]\n >>> uniq = np.unique_inverse(x)\n >>> uniq.values\n array([1, 2])\n >>> uniq.inverse_indices\n array([0, 0, 1])\n """\n result = unique(\n x,\n return_index=False,\n return_inverse=True,\n return_counts=False,\n equal_nan=False,\n )\n return UniqueInverseResult(*result)\n\n\ndef _unique_values_dispatcher(x, /):\n return (x,)\n\n\n@array_function_dispatch(_unique_values_dispatcher)\ndef unique_values(x):\n """\n Returns the unique elements of an input array `x`.\n\n This function is an Array API compatible alternative to::\n\n np.unique(x, equal_nan=False, sorted=False)\n\n .. versionchanged:: 2.3\n The algorithm was changed to a faster one that does not rely on\n sorting, and hence the results are no longer implicitly sorted.\n\n Parameters\n ----------\n x : array_like\n Input array. It will be flattened if it is not already 1-D.\n\n Returns\n -------\n out : ndarray\n The unique elements of an input array.\n\n See Also\n --------\n unique : Find the unique elements of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.unique_values([1, 1, 2])\n array([1, 2]) # may vary\n\n """\n return unique(\n x,\n return_index=False,\n return_inverse=False,\n return_counts=False,\n equal_nan=False,\n sorted=False,\n )\n\n\ndef _intersect1d_dispatcher(\n ar1, ar2, assume_unique=None, return_indices=None):\n return (ar1, ar2)\n\n\n@array_function_dispatch(_intersect1d_dispatcher)\ndef intersect1d(ar1, ar2, assume_unique=False, return_indices=False):\n """\n Find the intersection of two arrays.\n\n Return the sorted, unique values that are in both of the input arrays.\n\n Parameters\n ----------\n ar1, ar2 : array_like\n Input arrays. Will be flattened if not already 1D.\n assume_unique : bool\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. If True but ``ar1`` or ``ar2`` are not\n unique, incorrect results and out-of-bounds indices could result.\n Default is False.\n return_indices : bool\n If True, the indices which correspond to the intersection of the two\n arrays are returned. The first instance of a value is used if there are\n multiple. Default is False.\n\n Returns\n -------\n intersect1d : ndarray\n Sorted 1D array of common and unique elements.\n comm1 : ndarray\n The indices of the first occurrences of the common values in `ar1`.\n Only provided if `return_indices` is True.\n comm2 : ndarray\n The indices of the first occurrences of the common values in `ar2`.\n Only provided if `return_indices` is True.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])\n array([1, 3])\n\n To intersect more than two arrays, use functools.reduce:\n\n >>> from functools import reduce\n >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))\n array([3])\n\n To return the indices of the values common to the input arrays\n along with the intersected values:\n\n >>> x = np.array([1, 1, 2, 3, 4])\n >>> y = np.array([2, 1, 4, 6])\n >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)\n >>> x_ind, y_ind\n (array([0, 2, 4]), array([1, 0, 2]))\n >>> xy, x[x_ind], y[y_ind]\n (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))\n\n """\n ar1 = np.asanyarray(ar1)\n ar2 = np.asanyarray(ar2)\n\n if not assume_unique:\n if return_indices:\n ar1, ind1 = unique(ar1, return_index=True)\n ar2, ind2 = unique(ar2, return_index=True)\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n else:\n ar1 = ar1.ravel()\n ar2 = ar2.ravel()\n\n aux = np.concatenate((ar1, ar2))\n if return_indices:\n aux_sort_indices = np.argsort(aux, kind='mergesort')\n aux = aux[aux_sort_indices]\n else:\n aux.sort()\n\n mask = aux[1:] == aux[:-1]\n int1d = aux[:-1][mask]\n\n if return_indices:\n ar1_indices = aux_sort_indices[:-1][mask]\n ar2_indices = aux_sort_indices[1:][mask] - ar1.size\n if not assume_unique:\n ar1_indices = ind1[ar1_indices]\n ar2_indices = ind2[ar2_indices]\n\n return int1d, ar1_indices, ar2_indices\n else:\n return int1d\n\n\ndef _setxor1d_dispatcher(ar1, ar2, assume_unique=None):\n return (ar1, ar2)\n\n\n@array_function_dispatch(_setxor1d_dispatcher)\ndef setxor1d(ar1, ar2, assume_unique=False):\n """\n Find the set exclusive-or of two arrays.\n\n Return the sorted, unique values that are in only one (not both) of the\n input arrays.\n\n Parameters\n ----------\n ar1, ar2 : array_like\n Input arrays.\n assume_unique : bool\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n\n Returns\n -------\n setxor1d : ndarray\n Sorted 1D array of unique values that are in only one of the input\n arrays.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1, 2, 3, 2, 4])\n >>> b = np.array([2, 3, 5, 7, 5])\n >>> np.setxor1d(a,b)\n array([1, 4, 5, 7])\n\n """\n if not assume_unique:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n\n aux = np.concatenate((ar1, ar2), axis=None)\n if aux.size == 0:\n return aux\n\n aux.sort()\n flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))\n return aux[flag[1:] & flag[:-1]]\n\n\ndef _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,\n kind=None):\n return (ar1, ar2)\n\n\n@array_function_dispatch(_in1d_dispatcher)\ndef in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):\n """\n Test whether each element of a 1-D array is also present in a second array.\n\n .. deprecated:: 2.0\n Use :func:`isin` instead of `in1d` for new code.\n\n Returns a boolean array the same length as `ar1` that is True\n where an element of `ar1` is in `ar2` and False otherwise.\n\n Parameters\n ----------\n ar1 : (M,) array_like\n Input array.\n ar2 : array_like\n The values against which to test each value of `ar1`.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted (that is,\n False where an element of `ar1` is in `ar2` and True otherwise).\n Default is False. ``np.in1d(a, b, invert=True)`` is equivalent\n to (but is faster than) ``np.invert(in1d(a, b))``.\n kind : {None, 'sort', 'table'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed and memory use. The default, None,\n will select automatically based on memory considerations.\n\n * If 'sort', will use a mergesort-based approach. This will have\n a memory usage of roughly 6 times the sum of the sizes of\n `ar1` and `ar2`, not accounting for size of dtypes.\n * If 'table', will use a lookup table approach similar\n to a counting sort. This is only available for boolean and\n integer arrays. This will have a memory usage of the\n size of `ar1` plus the max-min value of `ar2`. `assume_unique`\n has no effect when the 'table' option is used.\n * If None, will automatically choose 'table' if\n the required memory allocation is less than or equal to\n 6 times the sum of the sizes of `ar1` and `ar2`,\n otherwise will use 'sort'. This is done to not use\n a large amount of memory by default, even though\n 'table' may be faster in most cases. If 'table' is chosen,\n `assume_unique` will have no effect.\n\n Returns\n -------\n in1d : (M,) ndarray, bool\n The values `ar1[in1d]` are in `ar2`.\n\n See Also\n --------\n isin : Version of this function that preserves the\n shape of ar1.\n\n Notes\n -----\n `in1d` can be considered as an element-wise function version of the\n python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly\n equivalent to ``np.array([item in b for item in a])``.\n However, this idea fails if `ar2` is a set, or similar (non-sequence)\n container: As ``ar2`` is converted to an array, in those cases\n ``asarray(ar2)`` is an object array rather than the expected array of\n contained values.\n\n Using ``kind='table'`` tends to be faster than `kind='sort'` if the\n following relationship is true:\n ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,\n but may use greater memory. The default value for `kind` will\n be automatically selected based only on memory usage, so one may\n manually set ``kind='table'`` if memory constraints can be relaxed.\n\n Examples\n --------\n >>> import numpy as np\n >>> test = np.array([0, 1, 2, 5, 0])\n >>> states = [0, 2]\n >>> mask = np.in1d(test, states)\n >>> mask\n array([ True, False, True, False, True])\n >>> test[mask]\n array([0, 2, 0])\n >>> mask = np.in1d(test, states, invert=True)\n >>> mask\n array([False, True, False, True, False])\n >>> test[mask]\n array([1, 5])\n """\n\n # Deprecated in NumPy 2.0, 2023-08-18\n warnings.warn(\n "`in1d` is deprecated. Use `np.isin` instead.",\n DeprecationWarning,\n stacklevel=2\n )\n\n return _in1d(ar1, ar2, assume_unique, invert, kind=kind)\n\n\ndef _in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):\n # Ravel both arrays, behavior for the first array could be different\n ar1 = np.asarray(ar1).ravel()\n ar2 = np.asarray(ar2).ravel()\n\n # Ensure that iteration through object arrays yields size-1 arrays\n if ar2.dtype == object:\n ar2 = ar2.reshape(-1, 1)\n\n if kind not in {None, 'sort', 'table'}:\n raise ValueError(\n f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")\n\n # Can use the table method if all arrays are integers or boolean:\n is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))\n use_table_method = is_int_arrays and kind in {None, 'table'}\n\n if use_table_method:\n if ar2.size == 0:\n if invert:\n return np.ones_like(ar1, dtype=bool)\n else:\n return np.zeros_like(ar1, dtype=bool)\n\n # Convert booleans to uint8 so we can use the fast integer algorithm\n if ar1.dtype == bool:\n ar1 = ar1.astype(np.uint8)\n if ar2.dtype == bool:\n ar2 = ar2.astype(np.uint8)\n\n ar2_min = int(np.min(ar2))\n ar2_max = int(np.max(ar2))\n\n ar2_range = ar2_max - ar2_min\n\n # Constraints on whether we can actually use the table method:\n # 1. Assert memory usage is not too large\n below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)\n # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype\n range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max\n\n # Optimal performance is for approximately\n # log10(size) > (log10(range) - 2.27) / 0.927.\n # However, here we set the requirement that by default\n # the intermediate array can only be 6x\n # the combined memory allocation of the original\n # arrays. See discussion on\n # https://github.com/numpy/numpy/pull/12065.\n\n if (\n range_safe_from_overflow and\n (below_memory_constraint or kind == 'table')\n ):\n\n if invert:\n outgoing_array = np.ones_like(ar1, dtype=bool)\n else:\n outgoing_array = np.zeros_like(ar1, dtype=bool)\n\n # Make elements 1 where the integer exists in ar2\n if invert:\n isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 0\n else:\n isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)\n isin_helper_ar[ar2 - ar2_min] = 1\n\n # Mask out elements we know won't work\n basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)\n in_range_ar1 = ar1[basic_mask]\n if in_range_ar1.size == 0:\n # Nothing more to do, since all values are out of range.\n return outgoing_array\n\n # Unfortunately, ar2_min can be out of range for `intp` even\n # if the calculation result must fit in range (and be positive).\n # In that case, use ar2.dtype which must work for all unmasked\n # values.\n try:\n ar2_min = np.array(ar2_min, dtype=np.intp)\n dtype = np.intp\n except OverflowError:\n dtype = ar2.dtype\n\n out = np.empty_like(in_range_ar1, dtype=np.intp)\n outgoing_array[basic_mask] = isin_helper_ar[\n np.subtract(in_range_ar1, ar2_min, dtype=dtype,\n out=out, casting="unsafe")]\n\n return outgoing_array\n elif kind == 'table': # not range_safe_from_overflow\n raise RuntimeError(\n "You have specified kind='table', "\n "but the range of values in `ar2` or `ar1` exceed the "\n "maximum integer of the datatype. "\n "Please set `kind` to None or 'sort'."\n )\n elif kind == 'table':\n raise ValueError(\n "The 'table' method is only "\n "supported for boolean or integer arrays. "\n "Please select 'sort' or None for kind."\n )\n\n # Check if one of the arrays may contain arbitrary objects\n contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject\n\n # This code is run when\n # a) the first condition is true, making the code significantly faster\n # b) the second condition is true (i.e. `ar1` or `ar2` may contain\n # arbitrary objects), since then sorting is not guaranteed to work\n if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:\n if invert:\n mask = np.ones(len(ar1), dtype=bool)\n for a in ar2:\n mask &= (ar1 != a)\n else:\n mask = np.zeros(len(ar1), dtype=bool)\n for a in ar2:\n mask |= (ar1 == a)\n return mask\n\n # Otherwise use sorting\n if not assume_unique:\n ar1, rev_idx = np.unique(ar1, return_inverse=True)\n ar2 = np.unique(ar2)\n\n ar = np.concatenate((ar1, ar2))\n # We need this to be a stable sort, so always use 'mergesort'\n # here. The values from the first array should always come before\n # the values from the second array.\n order = ar.argsort(kind='mergesort')\n sar = ar[order]\n if invert:\n bool_ar = (sar[1:] != sar[:-1])\n else:\n bool_ar = (sar[1:] == sar[:-1])\n flag = np.concatenate((bool_ar, [invert]))\n ret = np.empty(ar.shape, dtype=bool)\n ret[order] = flag\n\n if assume_unique:\n return ret[:len(ar1)]\n else:\n return ret[rev_idx]\n\n\ndef _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,\n *, kind=None):\n return (element, test_elements)\n\n\n@array_function_dispatch(_isin_dispatcher)\ndef isin(element, test_elements, assume_unique=False, invert=False, *,\n kind=None):\n """\n Calculates ``element in test_elements``, broadcasting over `element` only.\n Returns a boolean array of the same shape as `element` that is True\n where an element of `element` is in `test_elements` and False otherwise.\n\n Parameters\n ----------\n element : array_like\n Input array.\n test_elements : array_like\n The values against which to test each value of `element`.\n This argument is flattened if it is an array or array_like.\n See notes for behavior with non-array-like parameters.\n assume_unique : bool, optional\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n invert : bool, optional\n If True, the values in the returned array are inverted, as if\n calculating `element not in test_elements`. Default is False.\n ``np.isin(a, b, invert=True)`` is equivalent to (but faster\n than) ``np.invert(np.isin(a, b))``.\n kind : {None, 'sort', 'table'}, optional\n The algorithm to use. This will not affect the final result,\n but will affect the speed and memory use. The default, None,\n will select automatically based on memory considerations.\n\n * If 'sort', will use a mergesort-based approach. This will have\n a memory usage of roughly 6 times the sum of the sizes of\n `element` and `test_elements`, not accounting for size of dtypes.\n * If 'table', will use a lookup table approach similar\n to a counting sort. This is only available for boolean and\n integer arrays. This will have a memory usage of the\n size of `element` plus the max-min value of `test_elements`.\n `assume_unique` has no effect when the 'table' option is used.\n * If None, will automatically choose 'table' if\n the required memory allocation is less than or equal to\n 6 times the sum of the sizes of `element` and `test_elements`,\n otherwise will use 'sort'. This is done to not use\n a large amount of memory by default, even though\n 'table' may be faster in most cases. If 'table' is chosen,\n `assume_unique` will have no effect.\n\n\n Returns\n -------\n isin : ndarray, bool\n Has the same shape as `element`. The values `element[isin]`\n are in `test_elements`.\n\n Notes\n -----\n `isin` is an element-wise function version of the python keyword `in`.\n ``isin(a, b)`` is roughly equivalent to\n ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.\n\n `element` and `test_elements` are converted to arrays if they are not\n already. If `test_elements` is a set (or other non-sequence collection)\n it will be converted to an object array with one element, rather than an\n array of the values contained in `test_elements`. This is a consequence\n of the `array` constructor's way of handling non-sequence collections.\n Converting the set to a list usually gives the desired behavior.\n\n Using ``kind='table'`` tends to be faster than `kind='sort'` if the\n following relationship is true:\n ``log10(len(test_elements)) >\n (log10(max(test_elements)-min(test_elements)) - 2.27) / 0.927``,\n but may use greater memory. The default value for `kind` will\n be automatically selected based only on memory usage, so one may\n manually set ``kind='table'`` if memory constraints can be relaxed.\n\n Examples\n --------\n >>> import numpy as np\n >>> element = 2*np.arange(4).reshape((2, 2))\n >>> element\n array([[0, 2],\n [4, 6]])\n >>> test_elements = [1, 2, 4, 8]\n >>> mask = np.isin(element, test_elements)\n >>> mask\n array([[False, True],\n [ True, False]])\n >>> element[mask]\n array([2, 4])\n\n The indices of the matched values can be obtained with `nonzero`:\n\n >>> np.nonzero(mask)\n (array([0, 1]), array([1, 0]))\n\n The test can also be inverted:\n\n >>> mask = np.isin(element, test_elements, invert=True)\n >>> mask\n array([[ True, False],\n [False, True]])\n >>> element[mask]\n array([0, 6])\n\n Because of how `array` handles sets, the following does not\n work as expected:\n\n >>> test_set = {1, 2, 4, 8}\n >>> np.isin(element, test_set)\n array([[False, False],\n [False, False]])\n\n Casting the set to a list gives the expected result:\n\n >>> np.isin(element, list(test_set))\n array([[False, True],\n [ True, False]])\n """\n element = np.asarray(element)\n return _in1d(element, test_elements, assume_unique=assume_unique,\n invert=invert, kind=kind).reshape(element.shape)\n\n\ndef _union1d_dispatcher(ar1, ar2):\n return (ar1, ar2)\n\n\n@array_function_dispatch(_union1d_dispatcher)\ndef union1d(ar1, ar2):\n """\n Find the union of two arrays.\n\n Return the unique, sorted array of values that are in either of the two\n input arrays.\n\n Parameters\n ----------\n ar1, ar2 : array_like\n Input arrays. They are flattened if they are not already 1D.\n\n Returns\n -------\n union1d : ndarray\n Unique, sorted union of the input arrays.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.union1d([-1, 0, 1], [-2, 0, 2])\n array([-2, -1, 0, 1, 2])\n\n To find the union of more than two arrays, use functools.reduce:\n\n >>> from functools import reduce\n >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))\n array([1, 2, 3, 4, 6])\n """\n return unique(np.concatenate((ar1, ar2), axis=None))\n\n\ndef _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):\n return (ar1, ar2)\n\n\n@array_function_dispatch(_setdiff1d_dispatcher)\ndef setdiff1d(ar1, ar2, assume_unique=False):\n """\n Find the set difference of two arrays.\n\n Return the unique values in `ar1` that are not in `ar2`.\n\n Parameters\n ----------\n ar1 : array_like\n Input array.\n ar2 : array_like\n Input comparison array.\n assume_unique : bool\n If True, the input arrays are both assumed to be unique, which\n can speed up the calculation. Default is False.\n\n Returns\n -------\n setdiff1d : ndarray\n 1D array of values in `ar1` that are not in `ar2`. The result\n is sorted when `assume_unique=False`, but otherwise only sorted\n if the input is sorted.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1, 2, 3, 2, 4, 1])\n >>> b = np.array([3, 4, 5, 6])\n >>> np.setdiff1d(a, b)\n array([1, 2])\n\n """\n if assume_unique:\n ar1 = np.asarray(ar1).ravel()\n else:\n ar1 = unique(ar1)\n ar2 = unique(ar2)\n return ar1[_in1d(ar1, ar2, assume_unique=True, invert=True)]\n | .venv\Lib\site-packages\numpy\lib\_arraysetops_impl.py | _arraysetops_impl.py | Python | 42,535 | 0.95 | 0.13254 | 0.072336 | awesome-app | 382 | 2023-07-30T08:57:04.643178 | GPL-3.0 | false | 26c275b692484cf99ac61aef295d0a1c |
from typing import Any, Generic, NamedTuple, SupportsIndex, TypeAlias, overload\nfrom typing import Literal as L\n\nfrom typing_extensions import TypeVar, deprecated\n\nimport numpy as np\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeNumber_co,\n)\n\n__all__ = [\n "ediff1d",\n "in1d",\n "intersect1d",\n "isin",\n "setdiff1d",\n "setxor1d",\n "union1d",\n "unique",\n "unique_all",\n "unique_counts",\n "unique_inverse",\n "unique_values",\n]\n\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_NumericT = TypeVar("_NumericT", bound=np.number | np.timedelta64 | np.object_)\n\n# Explicitly set all allowed values to prevent accidental castings to\n# abstract dtypes (their common super-type).\n# Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)\n# which could result in, for example, `int64` and `float64`producing a\n# `number[_64Bit]` array\n_EitherSCT = TypeVar(\n "_EitherSCT",\n np.bool,\n np.int8, np.int16, np.int32, np.int64, np.intp,\n np.uint8, np.uint16, np.uint32, np.uint64, np.uintp,\n np.float16, np.float32, np.float64, np.longdouble,\n np.complex64, np.complex128, np.clongdouble,\n np.timedelta64, np.datetime64,\n np.bytes_, np.str_, np.void, np.object_,\n np.integer, np.floating, np.complexfloating, np.character,\n) # fmt: skip\n\n_AnyArray: TypeAlias = NDArray[Any]\n_IntArray: TypeAlias = NDArray[np.intp]\n\n###\n\nclass UniqueAllResult(NamedTuple, Generic[_ScalarT]):\n values: NDArray[_ScalarT]\n indices: _IntArray\n inverse_indices: _IntArray\n counts: _IntArray\n\nclass UniqueCountsResult(NamedTuple, Generic[_ScalarT]):\n values: NDArray[_ScalarT]\n counts: _IntArray\n\nclass UniqueInverseResult(NamedTuple, Generic[_ScalarT]):\n values: NDArray[_ScalarT]\n inverse_indices: _IntArray\n\n#\n@overload\ndef ediff1d(\n ary: _ArrayLikeBool_co,\n to_end: ArrayLike | None = None,\n to_begin: ArrayLike | None = None,\n) -> NDArray[np.int8]: ...\n@overload\ndef ediff1d(\n ary: _ArrayLike[_NumericT],\n to_end: ArrayLike | None = None,\n to_begin: ArrayLike | None = None,\n) -> NDArray[_NumericT]: ...\n@overload\ndef ediff1d(\n ary: _ArrayLike[np.datetime64[Any]],\n to_end: ArrayLike | None = None,\n to_begin: ArrayLike | None = None,\n) -> NDArray[np.timedelta64]: ...\n@overload\ndef ediff1d(\n ary: _ArrayLikeNumber_co,\n to_end: ArrayLike | None = None,\n to_begin: ArrayLike | None = None,\n) -> _AnyArray: ...\n\n#\n@overload # known scalar-type, FFF\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False] = False,\n return_inverse: L[False] = False,\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> NDArray[_ScalarT]: ...\n@overload # unknown scalar-type, FFF\ndef unique(\n ar: ArrayLike,\n return_index: L[False] = False,\n return_inverse: L[False] = False,\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> _AnyArray: ...\n@overload # known scalar-type, TFF\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[True],\n return_inverse: L[False] = False,\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray]: ...\n@overload # unknown scalar-type, TFF\ndef unique(\n ar: ArrayLike,\n return_index: L[True],\n return_inverse: L[False] = False,\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray]: ...\n@overload # known scalar-type, FTF (positional)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False],\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray]: ...\n@overload # known scalar-type, FTF (keyword)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False] = False,\n *,\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray]: ...\n@overload # unknown scalar-type, FTF (positional)\ndef unique(\n ar: ArrayLike,\n return_index: L[False],\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray]: ...\n@overload # unknown scalar-type, FTF (keyword)\ndef unique(\n ar: ArrayLike,\n return_index: L[False] = False,\n *,\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray]: ...\n@overload # known scalar-type, FFT (positional)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False],\n return_inverse: L[False],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray]: ...\n@overload # known scalar-type, FFT (keyword)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False] = False,\n return_inverse: L[False] = False,\n *,\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray]: ...\n@overload # unknown scalar-type, FFT (positional)\ndef unique(\n ar: ArrayLike,\n return_index: L[False],\n return_inverse: L[False],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray]: ...\n@overload # unknown scalar-type, FFT (keyword)\ndef unique(\n ar: ArrayLike,\n return_index: L[False] = False,\n return_inverse: L[False] = False,\n *,\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray]: ...\n@overload # known scalar-type, TTF\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[True],\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, TTF\ndef unique(\n ar: ArrayLike,\n return_index: L[True],\n return_inverse: L[True],\n return_counts: L[False] = False,\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # known scalar-type, TFT (positional)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[True],\n return_inverse: L[False],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...\n@overload # known scalar-type, TFT (keyword)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[True],\n return_inverse: L[False] = False,\n *,\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, TFT (positional)\ndef unique(\n ar: ArrayLike,\n return_index: L[True],\n return_inverse: L[False],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, TFT (keyword)\ndef unique(\n ar: ArrayLike,\n return_index: L[True],\n return_inverse: L[False] = False,\n *,\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # known scalar-type, FTT (positional)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False],\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...\n@overload # known scalar-type, FTT (keyword)\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[False] = False,\n *,\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, FTT (positional)\ndef unique(\n ar: ArrayLike,\n return_index: L[False],\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, FTT (keyword)\ndef unique(\n ar: ArrayLike,\n return_index: L[False] = False,\n *,\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # known scalar-type, TTT\ndef unique(\n ar: _ArrayLike[_ScalarT],\n return_index: L[True],\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[NDArray[_ScalarT], _IntArray, _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, TTT\ndef unique(\n ar: ArrayLike,\n return_index: L[True],\n return_inverse: L[True],\n return_counts: L[True],\n axis: SupportsIndex | None = None,\n *,\n equal_nan: bool = True,\n) -> tuple[_AnyArray, _IntArray, _IntArray, _IntArray]: ...\n\n#\n@overload\ndef unique_all(x: _ArrayLike[_ScalarT]) -> UniqueAllResult[_ScalarT]: ...\n@overload\ndef unique_all(x: ArrayLike) -> UniqueAllResult[Any]: ...\n\n#\n@overload\ndef unique_counts(x: _ArrayLike[_ScalarT]) -> UniqueCountsResult[_ScalarT]: ...\n@overload\ndef unique_counts(x: ArrayLike) -> UniqueCountsResult[Any]: ...\n\n#\n@overload\ndef unique_inverse(x: _ArrayLike[_ScalarT]) -> UniqueInverseResult[_ScalarT]: ...\n@overload\ndef unique_inverse(x: ArrayLike) -> UniqueInverseResult[Any]: ...\n\n#\n@overload\ndef unique_values(x: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...\n@overload\ndef unique_values(x: ArrayLike) -> _AnyArray: ...\n\n#\n@overload # known scalar-type, return_indices=False (default)\ndef intersect1d(\n ar1: _ArrayLike[_EitherSCT],\n ar2: _ArrayLike[_EitherSCT],\n assume_unique: bool = False,\n return_indices: L[False] = False,\n) -> NDArray[_EitherSCT]: ...\n@overload # known scalar-type, return_indices=True (positional)\ndef intersect1d(\n ar1: _ArrayLike[_EitherSCT],\n ar2: _ArrayLike[_EitherSCT],\n assume_unique: bool,\n return_indices: L[True],\n) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...\n@overload # known scalar-type, return_indices=True (keyword)\ndef intersect1d(\n ar1: _ArrayLike[_EitherSCT],\n ar2: _ArrayLike[_EitherSCT],\n assume_unique: bool = False,\n *,\n return_indices: L[True],\n) -> tuple[NDArray[_EitherSCT], _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, return_indices=False (default)\ndef intersect1d(\n ar1: ArrayLike,\n ar2: ArrayLike,\n assume_unique: bool = False,\n return_indices: L[False] = False,\n) -> _AnyArray: ...\n@overload # unknown scalar-type, return_indices=True (positional)\ndef intersect1d(\n ar1: ArrayLike,\n ar2: ArrayLike,\n assume_unique: bool,\n return_indices: L[True],\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n@overload # unknown scalar-type, return_indices=True (keyword)\ndef intersect1d(\n ar1: ArrayLike,\n ar2: ArrayLike,\n assume_unique: bool = False,\n *,\n return_indices: L[True],\n) -> tuple[_AnyArray, _IntArray, _IntArray]: ...\n\n#\n@overload\ndef setxor1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...\n@overload\ndef setxor1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...\n\n#\n@overload\ndef union1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT]) -> NDArray[_EitherSCT]: ...\n@overload\ndef union1d(ar1: ArrayLike, ar2: ArrayLike) -> _AnyArray: ...\n\n#\n@overload\ndef setdiff1d(ar1: _ArrayLike[_EitherSCT], ar2: _ArrayLike[_EitherSCT], assume_unique: bool = False) -> NDArray[_EitherSCT]: ...\n@overload\ndef setdiff1d(ar1: ArrayLike, ar2: ArrayLike, assume_unique: bool = False) -> _AnyArray: ...\n\n#\ndef isin(\n element: ArrayLike,\n test_elements: ArrayLike,\n assume_unique: bool = False,\n invert: bool = False,\n *,\n kind: L["sort", "table"] | None = None,\n) -> NDArray[np.bool]: ...\n\n#\n@deprecated("Use 'isin' instead")\ndef in1d(\n element: ArrayLike,\n test_elements: ArrayLike,\n assume_unique: bool = False,\n invert: bool = False,\n *,\n kind: L["sort", "table"] | None = None,\n) -> NDArray[np.bool]: ...\n | .venv\Lib\site-packages\numpy\lib\_arraysetops_impl.pyi | _arraysetops_impl.pyi | Other | 13,247 | 0.95 | 0.123874 | 0.109005 | node-utils | 521 | 2025-05-08T19:31:59.954852 | MIT | false | 62ea03125def41d131d8bb15bf4847e8 |
"""\nA buffered iterator for big arrays.\n\nThis module solves the problem of iterating over a big file-based array\nwithout having to read it into memory. The `Arrayterator` class wraps\nan array object, and when iterated it will return sub-arrays with at most\na user-specified number of elements.\n\n"""\nfrom functools import reduce\nfrom operator import mul\n\n__all__ = ['Arrayterator']\n\n\nclass Arrayterator:\n """\n Buffered iterator for big arrays.\n\n `Arrayterator` creates a buffered iterator for reading big arrays in small\n contiguous blocks. The class is useful for objects stored in the\n file system. It allows iteration over the object *without* reading\n everything in memory; instead, small blocks are read and iterated over.\n\n `Arrayterator` can be used with any object that supports multidimensional\n slices. This includes NumPy arrays, but also variables from\n Scientific.IO.NetCDF or pynetcdf for example.\n\n Parameters\n ----------\n var : array_like\n The object to iterate over.\n buf_size : int, optional\n The buffer size. If `buf_size` is supplied, the maximum amount of\n data that will be read into memory is `buf_size` elements.\n Default is None, which will read as many element as possible\n into memory.\n\n Attributes\n ----------\n var\n buf_size\n start\n stop\n step\n shape\n flat\n\n See Also\n --------\n numpy.ndenumerate : Multidimensional array iterator.\n numpy.flatiter : Flat array iterator.\n numpy.memmap : Create a memory-map to an array stored\n in a binary file on disk.\n\n Notes\n -----\n The algorithm works by first finding a "running dimension", along which\n the blocks will be extracted. Given an array of dimensions\n ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the\n first dimension will be used. If, on the other hand,\n ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.\n Blocks are extracted along this dimension, and when the last block is\n returned the process continues from the next dimension, until all\n elements have been read.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)\n >>> a_itor = np.lib.Arrayterator(a, 2)\n >>> a_itor.shape\n (3, 4, 5, 6)\n\n Now we can iterate over ``a_itor``, and it will return arrays of size\n two. Since `buf_size` was smaller than any dimension, the first\n dimension will be iterated over first:\n\n >>> for subarr in a_itor:\n ... if not subarr.all():\n ... print(subarr, subarr.shape) # doctest: +SKIP\n >>> # [[[[0 1]]]] (1, 1, 1, 2)\n\n """\n\n __module__ = "numpy.lib"\n\n def __init__(self, var, buf_size=None):\n self.var = var\n self.buf_size = buf_size\n\n self.start = [0 for dim in var.shape]\n self.stop = list(var.shape)\n self.step = [1 for dim in var.shape]\n\n def __getattr__(self, attr):\n return getattr(self.var, attr)\n\n def __getitem__(self, index):\n """\n Return a new arrayterator.\n\n """\n # Fix index, handling ellipsis and incomplete slices.\n if not isinstance(index, tuple):\n index = (index,)\n fixed = []\n length, dims = len(index), self.ndim\n for slice_ in index:\n if slice_ is Ellipsis:\n fixed.extend([slice(None)] * (dims - length + 1))\n length = len(fixed)\n elif isinstance(slice_, int):\n fixed.append(slice(slice_, slice_ + 1, 1))\n else:\n fixed.append(slice_)\n index = tuple(fixed)\n if len(index) < dims:\n index += (slice(None),) * (dims - len(index))\n\n # Return a new arrayterator object.\n out = self.__class__(self.var, self.buf_size)\n for i, (start, stop, step, slice_) in enumerate(\n zip(self.start, self.stop, self.step, index)):\n out.start[i] = start + (slice_.start or 0)\n out.step[i] = step * (slice_.step or 1)\n out.stop[i] = start + (slice_.stop or stop - start)\n out.stop[i] = min(stop, out.stop[i])\n return out\n\n def __array__(self, dtype=None, copy=None):\n """\n Return corresponding data.\n\n """\n slice_ = tuple(slice(*t) for t in zip(\n self.start, self.stop, self.step))\n return self.var[slice_]\n\n @property\n def flat(self):\n """\n A 1-D flat iterator for Arrayterator objects.\n\n This iterator returns elements of the array to be iterated over in\n `~lib.Arrayterator` one by one.\n It is similar to `flatiter`.\n\n See Also\n --------\n lib.Arrayterator\n flatiter\n\n Examples\n --------\n >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)\n >>> a_itor = np.lib.Arrayterator(a, 2)\n\n >>> for subarr in a_itor.flat:\n ... if not subarr:\n ... print(subarr, type(subarr))\n ...\n 0 <class 'numpy.int64'>\n\n """\n for block in self:\n yield from block.flat\n\n @property\n def shape(self):\n """\n The shape of the array to be iterated over.\n\n For an example, see `Arrayterator`.\n\n """\n return tuple(((stop - start - 1) // step + 1) for start, stop, step in\n zip(self.start, self.stop, self.step))\n\n def __iter__(self):\n # Skip arrays with degenerate dimensions\n if [dim for dim in self.shape if dim <= 0]:\n return\n\n start = self.start[:]\n stop = self.stop[:]\n step = self.step[:]\n ndims = self.var.ndim\n\n while True:\n count = self.buf_size or reduce(mul, self.shape)\n\n # iterate over each dimension, looking for the\n # running dimension (ie, the dimension along which\n # the blocks will be built from)\n rundim = 0\n for i in range(ndims - 1, -1, -1):\n # if count is zero we ran out of elements to read\n # along higher dimensions, so we read only a single position\n if count == 0:\n stop[i] = start[i] + 1\n elif count <= self.shape[i]:\n # limit along this dimension\n stop[i] = start[i] + count * step[i]\n rundim = i\n else:\n # read everything along this dimension\n stop[i] = self.stop[i]\n stop[i] = min(self.stop[i], stop[i])\n count = count // self.shape[i]\n\n # yield a block\n slice_ = tuple(slice(*t) for t in zip(start, stop, step))\n yield self.var[slice_]\n\n # Update start position, taking care of overflow to\n # other dimensions\n start[rundim] = stop[rundim] # start where we stopped\n for i in range(ndims - 1, 0, -1):\n if start[i] >= self.stop[i]:\n start[i] = self.start[i]\n start[i - 1] += self.step[i - 1]\n if start[0] >= self.stop[0]:\n return\n | .venv\Lib\site-packages\numpy\lib\_arrayterator_impl.py | _arrayterator_impl.py | Python | 7,442 | 0.95 | 0.196429 | 0.07027 | awesome-app | 776 | 2024-01-06T08:09:08.543371 | Apache-2.0 | false | 6ac7583d5b1203ac31c4a62998552078 |
# pyright: reportIncompatibleMethodOverride=false\n\nfrom collections.abc import Generator\nfrom types import EllipsisType\nfrom typing import Any, Final, TypeAlias, overload\n\nfrom typing_extensions import TypeVar\n\nimport numpy as np\nfrom numpy._typing import _AnyShape, _Shape\n\n__all__ = ["Arrayterator"]\n\n_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n\n_AnyIndex: TypeAlias = EllipsisType | int | slice | tuple[EllipsisType | int | slice, ...]\n\n# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,\n# but its ``__getattr__` method does wrap around the former and thus has\n# access to all its methods\n\nclass Arrayterator(np.ndarray[_ShapeT_co, _DTypeT_co]):\n var: np.ndarray[_ShapeT_co, _DTypeT_co] # type: ignore[assignment]\n buf_size: Final[int | None]\n start: Final[list[int]]\n stop: Final[list[int]]\n step: Final[list[int]]\n\n @property # type: ignore[misc]\n def shape(self) -> _ShapeT_co: ...\n @property\n def flat(self: Arrayterator[Any, np.dtype[_ScalarT]]) -> Generator[_ScalarT]: ... # type: ignore[override]\n\n #\n def __init__(self, /, var: np.ndarray[_ShapeT_co, _DTypeT_co], buf_size: int | None = None) -> None: ...\n def __getitem__(self, index: _AnyIndex, /) -> Arrayterator[_AnyShape, _DTypeT_co]: ... # type: ignore[override]\n def __iter__(self) -> Generator[np.ndarray[_AnyShape, _DTypeT_co]]: ...\n\n #\n @overload # type: ignore[override]\n def __array__(self, /, dtype: None = None, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...\n @overload\n def __array__(self, /, dtype: _DTypeT, copy: bool | None = None) -> np.ndarray[_ShapeT_co, _DTypeT]: ...\n | .venv\Lib\site-packages\numpy\lib\_arrayterator_impl.pyi | _arrayterator_impl.pyi | Other | 1,922 | 0.95 | 0.173913 | 0.171429 | vue-tools | 485 | 2023-10-22T02:34:44.234430 | MIT | false | 976e37d812a33b174503a63b14a78d69 |
"""\nMiscellaneous utils.\n"""\nfrom numpy._core import asarray\nfrom numpy._core.numeric import normalize_axis_index, normalize_axis_tuple\nfrom numpy._utils import set_module\n\n__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]\n\n\n@set_module("numpy.lib.array_utils")\ndef byte_bounds(a):\n """\n Returns pointers to the end-points of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array. It must conform to the Python-side of the array\n interface.\n\n Returns\n -------\n (low, high) : tuple of 2 integers\n The first integer is the first byte of the array, the second\n integer is just past the last byte of the array. If `a` is not\n contiguous it will not use every byte between the (`low`, `high`)\n values.\n\n Examples\n --------\n >>> import numpy as np\n >>> I = np.eye(2, dtype='f'); I.dtype\n dtype('float32')\n >>> low, high = np.lib.array_utils.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n >>> I = np.eye(2); I.dtype\n dtype('float64')\n >>> low, high = np.lib.array_utils.byte_bounds(I)\n >>> high - low == I.size*I.itemsize\n True\n\n """\n ai = a.__array_interface__\n a_data = ai['data'][0]\n astrides = ai['strides']\n ashape = ai['shape']\n bytes_a = asarray(a).dtype.itemsize\n\n a_low = a_high = a_data\n if astrides is None:\n # contiguous case\n a_high += a.size * bytes_a\n else:\n for shape, stride in zip(ashape, astrides):\n if stride < 0:\n a_low += (shape - 1) * stride\n else:\n a_high += (shape - 1) * stride\n a_high += bytes_a\n return a_low, a_high\n | .venv\Lib\site-packages\numpy\lib\_array_utils_impl.py | _array_utils_impl.py | Python | 1,759 | 0.95 | 0.064516 | 0.018519 | python-kit | 259 | 2024-06-16T00:10:50.181800 | MIT | false | 96bb2c48aa5101bba98c17b0e2343a87 |
from collections.abc import Iterable\nfrom typing import Any\n\nfrom numpy import generic\nfrom numpy.typing import NDArray\n\n__all__ = ["byte_bounds", "normalize_axis_tuple", "normalize_axis_index"]\n\n# NOTE: In practice `byte_bounds` can (potentially) take any object\n# implementing the `__array_interface__` protocol. The caveat is\n# that certain keys, marked as optional in the spec, must be present for\n# `byte_bounds`. This concerns `"strides"` and `"data"`.\ndef byte_bounds(a: generic | NDArray[Any]) -> tuple[int, int]: ...\n\ndef normalize_axis_tuple(\n axis: int | Iterable[int],\n ndim: int = ...,\n argname: str | None = ...,\n allow_duplicate: bool | None = ...,\n) -> tuple[int, int]: ...\n\ndef normalize_axis_index(\n axis: int = ...,\n ndim: int = ...,\n msg_prefix: str | None = ...,\n) -> int: ...\n | .venv\Lib\site-packages\numpy\lib\_array_utils_impl.pyi | _array_utils_impl.pyi | Other | 846 | 0.95 | 0.153846 | 0.190476 | awesome-app | 32 | 2024-04-20T08:35:31.906405 | MIT | false | 9318f6ff079be444bf208cd36ee7e215 |
"""A file interface for handling local and remote data files.\n\nThe goal of datasource is to abstract some of the file system operations\nwhen dealing with data files so the researcher doesn't have to know all the\nlow-level details. Through datasource, a researcher can obtain and use a\nfile with one function call, regardless of location of the file.\n\nDataSource is meant to augment standard python libraries, not replace them.\nIt should work seamlessly with standard file IO operations and the os\nmodule.\n\nDataSource files can originate locally or remotely:\n\n- local files : '/home/guido/src/local/data.txt'\n- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'\n\nDataSource files can also be compressed or uncompressed. Currently only\ngzip, bz2 and xz are supported.\n\nExample::\n\n >>> # Create a DataSource, use os.curdir (default) for local storage.\n >>> from numpy import DataSource\n >>> ds = DataSource()\n >>>\n >>> # Open a remote file.\n >>> # DataSource downloads the file, stores it locally in:\n >>> # './www.google.com/index.html'\n >>> # opens the file and returns a file object.\n >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP\n >>>\n >>> # Use the file as you normally would\n >>> fp.read() # doctest: +SKIP\n >>> fp.close() # doctest: +SKIP\n\n"""\nimport os\n\nfrom numpy._utils import set_module\n\n_open = open\n\n\ndef _check_mode(mode, encoding, newline):\n """Check mode and that encoding and newline are compatible.\n\n Parameters\n ----------\n mode : str\n File open mode.\n encoding : str\n File encoding.\n newline : str\n Newline for text files.\n\n """\n if "t" in mode:\n if "b" in mode:\n raise ValueError(f"Invalid mode: {mode!r}")\n else:\n if encoding is not None:\n raise ValueError("Argument 'encoding' not supported in binary mode")\n if newline is not None:\n raise ValueError("Argument 'newline' not supported in binary mode")\n\n\n# Using a class instead of a module-level dictionary\n# to reduce the initial 'import numpy' overhead by\n# deferring the import of lzma, bz2 and gzip until needed\n\n# TODO: .zip support, .tar support?\nclass _FileOpeners:\n """\n Container for different methods to open (un-)compressed files.\n\n `_FileOpeners` contains a dictionary that holds one method for each\n supported file format. Attribute lookup is implemented in such a way\n that an instance of `_FileOpeners` itself can be indexed with the keys\n of that dictionary. Currently uncompressed files as well as files\n compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.\n\n Notes\n -----\n `_file_openers`, an instance of `_FileOpeners`, is made available for\n use in the `_datasource` module.\n\n Examples\n --------\n >>> import gzip\n >>> np.lib._datasource._file_openers.keys()\n [None, '.bz2', '.gz', '.xz', '.lzma']\n >>> np.lib._datasource._file_openers['.gz'] is gzip.open\n True\n\n """\n\n def __init__(self):\n self._loaded = False\n self._file_openers = {None: open}\n\n def _load(self):\n if self._loaded:\n return\n\n try:\n import bz2\n self._file_openers[".bz2"] = bz2.open\n except ImportError:\n pass\n\n try:\n import gzip\n self._file_openers[".gz"] = gzip.open\n except ImportError:\n pass\n\n try:\n import lzma\n self._file_openers[".xz"] = lzma.open\n self._file_openers[".lzma"] = lzma.open\n except (ImportError, AttributeError):\n # There are incompatible backports of lzma that do not have the\n # lzma.open attribute, so catch that as well as ImportError.\n pass\n\n self._loaded = True\n\n def keys(self):\n """\n Return the keys of currently supported file openers.\n\n Parameters\n ----------\n None\n\n Returns\n -------\n keys : list\n The keys are None for uncompressed files and the file extension\n strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression\n methods.\n\n """\n self._load()\n return list(self._file_openers.keys())\n\n def __getitem__(self, key):\n self._load()\n return self._file_openers[key]\n\n\n_file_openers = _FileOpeners()\n\ndef open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):\n """\n Open `path` with `mode` and return the file object.\n\n If ``path`` is an URL, it will be downloaded, stored in the\n `DataSource` `destpath` directory and opened from there.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Local file path or URL to open.\n mode : str, optional\n Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to\n append. Available modes depend on the type of object specified by\n path. Default is 'r'.\n destpath : str, optional\n Path to the directory where the source file gets downloaded to for\n use. If `destpath` is None, a temporary directory will be created.\n The default path is the current directory.\n encoding : {None, str}, optional\n Open text file with given encoding. The default encoding will be\n what `open` uses.\n newline : {None, str}, optional\n Newline to use when reading text file.\n\n Returns\n -------\n out : file object\n The opened file.\n\n Notes\n -----\n This is a convenience function that instantiates a `DataSource` and\n returns the file object from ``DataSource.open(path)``.\n\n """\n\n ds = DataSource(destpath)\n return ds.open(path, mode, encoding=encoding, newline=newline)\n\n\n@set_module('numpy.lib.npyio')\nclass DataSource:\n """\n DataSource(destpath='.')\n\n A generic data source file (file, http, ftp, ...).\n\n DataSources can be local files or remote files/URLs. The files may\n also be compressed or uncompressed. DataSource hides some of the\n low-level details of downloading the file, allowing you to simply pass\n in a valid file path (or URL) and obtain a file object.\n\n Parameters\n ----------\n destpath : str or None, optional\n Path to the directory where the source file gets downloaded to for\n use. If `destpath` is None, a temporary directory will be created.\n The default path is the current directory.\n\n Notes\n -----\n URLs require a scheme string (``http://``) to be used, without it they\n will fail::\n\n >>> repos = np.lib.npyio.DataSource()\n >>> repos.exists('www.google.com/index.html')\n False\n >>> repos.exists('http://www.google.com/index.html')\n True\n\n Temporary directories are deleted when the DataSource is deleted.\n\n Examples\n --------\n ::\n\n >>> ds = np.lib.npyio.DataSource('/home/guido')\n >>> urlname = 'http://www.google.com/'\n >>> gfile = ds.open('http://www.google.com/')\n >>> ds.abspath(urlname)\n '/home/guido/www.google.com/index.html'\n\n >>> ds = np.lib.npyio.DataSource(None) # use with temporary file\n >>> ds.open('/home/guido/foobar.txt')\n <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>\n >>> ds.abspath('/home/guido/foobar.txt')\n '/tmp/.../home/guido/foobar.txt'\n\n """\n\n def __init__(self, destpath=os.curdir):\n """Create a DataSource with a local path at destpath."""\n if destpath:\n self._destpath = os.path.abspath(destpath)\n self._istmpdest = False\n else:\n import tempfile # deferring import to improve startup time\n self._destpath = tempfile.mkdtemp()\n self._istmpdest = True\n\n def __del__(self):\n # Remove temp directories\n if hasattr(self, '_istmpdest') and self._istmpdest:\n import shutil\n\n shutil.rmtree(self._destpath)\n\n def _iszip(self, filename):\n """Test if the filename is a zip file by looking at the file extension.\n\n """\n fname, ext = os.path.splitext(filename)\n return ext in _file_openers.keys()\n\n def _iswritemode(self, mode):\n """Test if the given mode will open a file for writing."""\n\n # Currently only used to test the bz2 files.\n _writemodes = ("w", "+")\n return any(c in _writemodes for c in mode)\n\n def _splitzipext(self, filename):\n """Split zip extension from filename and return filename.\n\n Returns\n -------\n base, zip_ext : {tuple}\n\n """\n\n if self._iszip(filename):\n return os.path.splitext(filename)\n else:\n return filename, None\n\n def _possible_names(self, filename):\n """Return a tuple containing compressed filename variations."""\n names = [filename]\n if not self._iszip(filename):\n for zipext in _file_openers.keys():\n if zipext:\n names.append(filename + zipext)\n return names\n\n def _isurl(self, path):\n """Test if path is a net location. Tests the scheme and netloc."""\n\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # BUG : URLs require a scheme string ('http://') to be used.\n # www.google.com will fail.\n # Should we prepend the scheme for those that don't have it and\n # test that also? Similar to the way we append .gz and test for\n # for compressed versions of files.\n\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n return bool(scheme and netloc)\n\n def _cache(self, path):\n """Cache the file specified by path.\n\n Creates a copy of the file in the datasource cache.\n\n """\n # We import these here because importing them is slow and\n # a significant fraction of numpy's total import time.\n import shutil\n from urllib.request import urlopen\n\n upath = self.abspath(path)\n\n # ensure directory exists\n if not os.path.exists(os.path.dirname(upath)):\n os.makedirs(os.path.dirname(upath))\n\n # TODO: Doesn't handle compressed files!\n if self._isurl(path):\n with urlopen(path) as openedurl:\n with _open(upath, 'wb') as f:\n shutil.copyfileobj(openedurl, f)\n else:\n shutil.copyfile(path, upath)\n return upath\n\n def _findfile(self, path):\n """Searches for ``path`` and returns full path if found.\n\n If path is an URL, _findfile will cache a local copy and return the\n path to the cached file. If path is a local file, _findfile will\n return a path to that local file.\n\n The search will include possible compressed versions of the file\n and return the first occurrence found.\n\n """\n\n # Build list of possible local file paths\n if not self._isurl(path):\n # Valid local paths\n filelist = self._possible_names(path)\n # Paths in self._destpath\n filelist += self._possible_names(self.abspath(path))\n else:\n # Cached URLs in self._destpath\n filelist = self._possible_names(self.abspath(path))\n # Remote URLs\n filelist = filelist + self._possible_names(path)\n\n for name in filelist:\n if self.exists(name):\n if self._isurl(name):\n name = self._cache(name)\n return name\n return None\n\n def abspath(self, path):\n """\n Return absolute path of file in the DataSource directory.\n\n If `path` is an URL, then `abspath` will return either the location\n the file exists locally or the location it would exist when opened\n using the `open` method.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Can be a local file or a remote URL.\n\n Returns\n -------\n out : str\n Complete path, including the `DataSource` destination directory.\n\n Notes\n -----\n The functionality is based on `os.path.abspath`.\n\n """\n # We do this here to reduce the 'import numpy' initial import time.\n from urllib.parse import urlparse\n\n # TODO: This should be more robust. Handles case where path includes\n # the destpath, but not other sub-paths. Failing case:\n # path = /home/guido/datafile.txt\n # destpath = /home/alex/\n # upath = self.abspath(path)\n # upath == '/home/alex/home/guido/datafile.txt'\n\n # handle case where path includes self._destpath\n splitpath = path.split(self._destpath, 2)\n if len(splitpath) > 1:\n path = splitpath[1]\n scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)\n netloc = self._sanitize_relative_path(netloc)\n upath = self._sanitize_relative_path(upath)\n return os.path.join(self._destpath, netloc, upath)\n\n def _sanitize_relative_path(self, path):\n """Return a sanitised relative path for which\n os.path.abspath(os.path.join(base, path)).startswith(base)\n """\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).removeprefix('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path\n\n def exists(self, path):\n """\n Test if path exists.\n\n Test if `path` exists as (and in this order):\n\n - a local file.\n - a remote URL that has been downloaded and stored locally in the\n `DataSource` directory.\n - a remote URL that has not been downloaded, but is valid and\n accessible.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Can be a local file or a remote URL.\n\n Returns\n -------\n out : bool\n True if `path` exists.\n\n Notes\n -----\n When `path` is an URL, `exists` will return True if it's either\n stored locally in the `DataSource` directory, or is a valid remote\n URL. `DataSource` does not discriminate between the two, the file\n is accessible if it exists in either location.\n\n """\n\n # First test for local path\n if os.path.exists(path):\n return True\n\n # We import this here because importing urllib is slow and\n # a significant fraction of numpy's total import time.\n from urllib.error import URLError\n from urllib.request import urlopen\n\n # Test cached url\n upath = self.abspath(path)\n if os.path.exists(upath):\n return True\n\n # Test remote url\n if self._isurl(path):\n try:\n netfile = urlopen(path)\n netfile.close()\n del netfile\n return True\n except URLError:\n return False\n return False\n\n def open(self, path, mode='r', encoding=None, newline=None):\n """\n Open and return file-like object.\n\n If `path` is an URL, it will be downloaded, stored in the\n `DataSource` directory and opened from there.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Local file path or URL to open.\n mode : {'r', 'w', 'a'}, optional\n Mode to open `path`. Mode 'r' for reading, 'w' for writing,\n 'a' to append. Available modes depend on the type of object\n specified by `path`. Default is 'r'.\n encoding : {None, str}, optional\n Open text file with given encoding. The default encoding will be\n what `open` uses.\n newline : {None, str}, optional\n Newline to use when reading text file.\n\n Returns\n -------\n out : file object\n File object.\n\n """\n\n # TODO: There is no support for opening a file for writing which\n # doesn't exist yet (creating a file). Should there be?\n\n # TODO: Add a ``subdir`` parameter for specifying the subdirectory\n # used to store URLs in self._destpath.\n\n if self._isurl(path) and self._iswritemode(mode):\n raise ValueError("URLs are not writeable")\n\n # NOTE: _findfile will fail on a new file opened for writing.\n found = self._findfile(path)\n if found:\n _fname, ext = self._splitzipext(found)\n if ext == 'bz2':\n mode.replace("+", "")\n return _file_openers[ext](found, mode=mode,\n encoding=encoding, newline=newline)\n else:\n raise FileNotFoundError(f"{path} not found.")\n\n\nclass Repository (DataSource):\n """\n Repository(baseurl, destpath='.')\n\n A data repository where multiple DataSource's share a base\n URL/directory.\n\n `Repository` extends `DataSource` by prepending a base URL (or\n directory) to all the files it handles. Use `Repository` when you will\n be working with multiple files from one base URL. Initialize\n `Repository` with the base URL, then refer to each file by its filename\n only.\n\n Parameters\n ----------\n baseurl : str\n Path to the local directory or remote location that contains the\n data files.\n destpath : str or None, optional\n Path to the directory where the source file gets downloaded to for\n use. If `destpath` is None, a temporary directory will be created.\n The default path is the current directory.\n\n Examples\n --------\n To analyze all files in the repository, do something like this\n (note: this is not self-contained code)::\n\n >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')\n >>> for filename in filelist:\n ... fp = repos.open(filename)\n ... fp.analyze()\n ... fp.close()\n\n Similarly you could use a URL for a repository::\n\n >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')\n\n """\n\n def __init__(self, baseurl, destpath=os.curdir):\n """Create a Repository with a shared url or directory of baseurl."""\n DataSource.__init__(self, destpath=destpath)\n self._baseurl = baseurl\n\n def __del__(self):\n DataSource.__del__(self)\n\n def _fullpath(self, path):\n """Return complete path for path. Prepends baseurl if necessary."""\n splitpath = path.split(self._baseurl, 2)\n if len(splitpath) == 1:\n result = os.path.join(self._baseurl, path)\n else:\n result = path # path contains baseurl already\n return result\n\n def _findfile(self, path):\n """Extend DataSource method to prepend baseurl to ``path``."""\n return DataSource._findfile(self, self._fullpath(path))\n\n def abspath(self, path):\n """\n Return absolute path of file in the Repository directory.\n\n If `path` is an URL, then `abspath` will return either the location\n the file exists locally or the location it would exist when opened\n using the `open` method.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Can be a local file or a remote URL. This may, but does not\n have to, include the `baseurl` with which the `Repository` was\n initialized.\n\n Returns\n -------\n out : str\n Complete path, including the `DataSource` destination directory.\n\n """\n return DataSource.abspath(self, self._fullpath(path))\n\n def exists(self, path):\n """\n Test if path exists prepending Repository base URL to path.\n\n Test if `path` exists as (and in this order):\n\n - a local file.\n - a remote URL that has been downloaded and stored locally in the\n `DataSource` directory.\n - a remote URL that has not been downloaded, but is valid and\n accessible.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Can be a local file or a remote URL. This may, but does not\n have to, include the `baseurl` with which the `Repository` was\n initialized.\n\n Returns\n -------\n out : bool\n True if `path` exists.\n\n Notes\n -----\n When `path` is an URL, `exists` will return True if it's either\n stored locally in the `DataSource` directory, or is a valid remote\n URL. `DataSource` does not discriminate between the two, the file\n is accessible if it exists in either location.\n\n """\n return DataSource.exists(self, self._fullpath(path))\n\n def open(self, path, mode='r', encoding=None, newline=None):\n """\n Open and return file-like object prepending Repository base URL.\n\n If `path` is an URL, it will be downloaded, stored in the\n DataSource directory and opened from there.\n\n Parameters\n ----------\n path : str or pathlib.Path\n Local file path or URL to open. This may, but does not have to,\n include the `baseurl` with which the `Repository` was\n initialized.\n mode : {'r', 'w', 'a'}, optional\n Mode to open `path`. Mode 'r' for reading, 'w' for writing,\n 'a' to append. Available modes depend on the type of object\n specified by `path`. Default is 'r'.\n encoding : {None, str}, optional\n Open text file with given encoding. The default encoding will be\n what `open` uses.\n newline : {None, str}, optional\n Newline to use when reading text file.\n\n Returns\n -------\n out : file object\n File object.\n\n """\n return DataSource.open(self, self._fullpath(path), mode,\n encoding=encoding, newline=newline)\n\n def listdir(self):\n """\n List files in the source Repository.\n\n Returns\n -------\n files : list of str or pathlib.Path\n List of file names (not containing a directory part).\n\n Notes\n -----\n Does not currently work for remote repositories.\n\n """\n if self._isurl(self._baseurl):\n raise NotImplementedError(\n "Directory listing of URLs, not supported yet.")\n else:\n return os.listdir(self._baseurl)\n | .venv\Lib\site-packages\numpy\lib\_datasource.py | _datasource.py | Python | 23,431 | 0.95 | 0.162857 | 0.075676 | vue-tools | 953 | 2024-01-24T03:34:29.753069 | BSD-3-Clause | false | 90d66371a10c7a654cf3b6406adea5d5 |
from pathlib import Path\nfrom typing import IO, Any, TypeAlias\n\nfrom _typeshed import OpenBinaryMode, OpenTextMode\n\n_Mode: TypeAlias = OpenBinaryMode | OpenTextMode\n\n###\n\n# exported in numpy.lib.nppyio\nclass DataSource:\n def __init__(self, /, destpath: Path | str | None = ...) -> None: ...\n def __del__(self, /) -> None: ...\n def abspath(self, /, path: str) -> str: ...\n def exists(self, /, path: str) -> bool: ...\n\n # Whether the file-object is opened in string or bytes mode (by default)\n # depends on the file-extension of `path`\n def open(self, /, path: str, mode: _Mode = "r", encoding: str | None = None, newline: str | None = None) -> IO[Any]: ...\n\nclass Repository(DataSource):\n def __init__(self, /, baseurl: str, destpath: str | None = ...) -> None: ...\n def listdir(self, /) -> list[str]: ...\n\ndef open(\n path: str,\n mode: _Mode = "r",\n destpath: str | None = ...,\n encoding: str | None = None,\n newline: str | None = None,\n) -> IO[Any]: ...\n | .venv\Lib\site-packages\numpy\lib\_datasource.pyi | _datasource.pyi | Other | 1,027 | 0.95 | 0.322581 | 0.166667 | awesome-app | 9 | 2024-11-06T01:26:36.491929 | MIT | false | a9a7c33b706898a0789dd6d310d52ddc |
"""\nBinary serialization\n\nNPY format\n==========\n\nA simple format for saving numpy arrays to disk with the full\ninformation about them.\n\nThe ``.npy`` format is the standard binary file format in NumPy for\npersisting a *single* arbitrary NumPy array on disk. The format stores all\nof the shape and dtype information necessary to reconstruct the array\ncorrectly even on another machine with a different architecture.\nThe format is designed to be as simple as possible while achieving\nits limited goals.\n\nThe ``.npz`` format is the standard format for persisting *multiple* NumPy\narrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``\nfiles, one for each array.\n\nCapabilities\n------------\n\n- Can represent all NumPy arrays including nested record arrays and\n object arrays.\n\n- Represents the data in its native binary form.\n\n- Supports Fortran-contiguous arrays directly.\n\n- Stores all of the necessary information to reconstruct the array\n including shape and dtype on a machine of a different\n architecture. Both little-endian and big-endian arrays are\n supported, and a file with little-endian numbers will yield\n a little-endian array on any machine reading the file. The\n types are described in terms of their actual sizes. For example,\n if a machine with a 64-bit C "long int" writes out an array with\n "long ints", a reading machine with 32-bit C "long ints" will yield\n an array with 64-bit integers.\n\n- Is straightforward to reverse engineer. Datasets often live longer than\n the programs that created them. A competent developer should be\n able to create a solution in their preferred programming language to\n read most ``.npy`` files that they have been given without much\n documentation.\n\n- Allows memory-mapping of the data. See `open_memmap`.\n\n- Can be read from a filelike stream object instead of an actual file.\n\n- Stores object arrays, i.e. arrays containing elements that are arbitrary\n Python objects. Files with object arrays are not to be mmapable, but\n can be read and written to disk.\n\nLimitations\n-----------\n\n- Arbitrary subclasses of numpy.ndarray are not completely preserved.\n Subclasses will be accepted for writing, but only the array data will\n be written out. A regular numpy.ndarray object will be created\n upon reading the file.\n\n.. warning::\n\n Due to limitations in the interpretation of structured dtypes, dtypes\n with fields with empty names will have the names replaced by 'f0', 'f1',\n etc. Such arrays will not round-trip through the format entirely\n accurately. The data is intact; only the field names will differ. We are\n working on a fix for this. This fix will not require a change in the\n file format. The arrays with such structures can still be saved and\n restored, and the correct dtype may be restored by using the\n ``loadedarray.view(correct_dtype)`` method.\n\nFile extensions\n---------------\n\nWe recommend using the ``.npy`` and ``.npz`` extensions for files saved\nin this format. This is by no means a requirement; applications may wish\nto use these file formats but use an extension specific to the\napplication. In the absence of an obvious alternative, however,\nwe suggest using ``.npy`` and ``.npz``.\n\nVersion numbering\n-----------------\n\nThe version numbering of these formats is independent of NumPy version\nnumbering. If the format is upgraded, the code in `numpy.io` will still\nbe able to read and write Version 1.0 files.\n\nFormat Version 1.0\n------------------\n\nThe first 6 bytes are a magic string: exactly ``\\x93NUMPY``.\n\nThe next 1 byte is an unsigned byte: the major version number of the file\nformat, e.g. ``\\x01``.\n\nThe next 1 byte is an unsigned byte: the minor version number of the file\nformat, e.g. ``\\x00``. Note: the version of the file format is not tied\nto the version of the numpy package.\n\nThe next 2 bytes form a little-endian unsigned short int: the length of\nthe header data HEADER_LEN.\n\nThe next HEADER_LEN bytes form the header data describing the array's\nformat. It is an ASCII string which contains a Python literal expression\nof a dictionary. It is terminated by a newline (``\\n``) and padded with\nspaces (``\\x20``) to make the total of\n``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible\nby 64 for alignment purposes.\n\nThe dictionary contains three keys:\n\n "descr" : dtype.descr\n An object that can be passed as an argument to the `numpy.dtype`\n constructor to create the array's dtype.\n "fortran_order" : bool\n Whether the array data is Fortran-contiguous or not. Since\n Fortran-contiguous arrays are a common form of non-C-contiguity,\n we allow them to be written directly to disk for efficiency.\n "shape" : tuple of int\n The shape of the array.\n\nFor repeatability and readability, the dictionary keys are sorted in\nalphabetic order. This is for convenience only. A writer SHOULD implement\nthis if possible. A reader MUST NOT depend on this.\n\nFollowing the header comes the array data. If the dtype contains Python\nobjects (i.e. ``dtype.hasobject is True``), then the data is a Python\npickle of the array. Otherwise the data is the contiguous (either C-\nor Fortran-, depending on ``fortran_order``) bytes of the array.\nConsumers can figure out the number of bytes by multiplying the number\nof elements given by the shape (noting that ``shape=()`` means there is\n1 element) by ``dtype.itemsize``.\n\nFormat Version 2.0\n------------------\n\nThe version 1.0 format only allowed the array header to have a total size of\n65535 bytes. This can be exceeded by structured arrays with a large number of\ncolumns. The version 2.0 format extends the header size to 4 GiB.\n`numpy.save` will automatically save in 2.0 format if the data requires it,\nelse it will always use the more compatible 1.0 format.\n\nThe description of the fourth element of the header therefore has become:\n"The next 4 bytes form a little-endian unsigned int: the length of the header\ndata HEADER_LEN."\n\nFormat Version 3.0\n------------------\n\nThis version replaces the ASCII string (which in practice was latin1) with\na utf8-encoded string, so supports structured types with any unicode field\nnames.\n\nNotes\n-----\nThe ``.npy`` format, including motivation for creating it and a comparison of\nalternatives, is described in the\n:doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have\nevolved with time and this document is more current.\n\n"""\nimport io\nimport os\nimport pickle\nimport warnings\n\nimport numpy\nfrom numpy._utils import set_module\nfrom numpy.lib._utils_impl import drop_metadata\n\n__all__ = []\n\ndrop_metadata.__module__ = "numpy.lib.format"\n\nEXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}\nMAGIC_PREFIX = b'\x93NUMPY'\nMAGIC_LEN = len(MAGIC_PREFIX) + 2\nARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096\nBUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes\n# allow growth within the address space of a 64 bit machine along one axis\nGROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype\n\n# difference between version 1.0 and 2.0 is a 4 byte (I) header length\n# instead of 2 bytes (H) allowing storage of large structured arrays\n_header_size_info = {\n (1, 0): ('<H', 'latin1'),\n (2, 0): ('<I', 'latin1'),\n (3, 0): ('<I', 'utf8'),\n}\n\n# Python's literal_eval is not actually safe for large inputs, since parsing\n# may become slow or even cause interpreter crashes.\n# This is an arbitrary, low limit which should make it safe in practice.\n_MAX_HEADER_SIZE = 10000\n\n\ndef _check_version(version):\n if version not in [(1, 0), (2, 0), (3, 0), None]:\n msg = "we only support format version (1,0), (2,0), and (3,0), not %s"\n raise ValueError(msg % (version,))\n\n\n@set_module("numpy.lib.format")\ndef magic(major, minor):\n """ Return the magic string for the given file format version.\n\n Parameters\n ----------\n major : int in [0, 255]\n minor : int in [0, 255]\n\n Returns\n -------\n magic : str\n\n Raises\n ------\n ValueError if the version cannot be formatted.\n """\n if major < 0 or major > 255:\n raise ValueError("major version must be 0 <= major < 256")\n if minor < 0 or minor > 255:\n raise ValueError("minor version must be 0 <= minor < 256")\n return MAGIC_PREFIX + bytes([major, minor])\n\n\n@set_module("numpy.lib.format")\ndef read_magic(fp):\n """ Read the magic string to get the version of the file format.\n\n Parameters\n ----------\n fp : filelike object\n\n Returns\n -------\n major : int\n minor : int\n """\n magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")\n if magic_str[:-2] != MAGIC_PREFIX:\n msg = "the magic string is not correct; expected %r, got %r"\n raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))\n major, minor = magic_str[-2:]\n return major, minor\n\n\n@set_module("numpy.lib.format")\ndef dtype_to_descr(dtype):\n """\n Get a serializable descriptor from the dtype.\n\n The .descr attribute of a dtype object cannot be round-tripped through\n the dtype() constructor. Simple types, like dtype('float32'), have\n a descr which looks like a record array with one field with '' as\n a name. The dtype() constructor interprets this as a request to give\n a default name. Instead, we construct descriptor that can be passed to\n dtype().\n\n Parameters\n ----------\n dtype : dtype\n The dtype of the array that will be written to disk.\n\n Returns\n -------\n descr : object\n An object that can be passed to `numpy.dtype()` in order to\n replicate the input dtype.\n\n """\n # NOTE: that drop_metadata may not return the right dtype e.g. for user\n # dtypes. In that case our code below would fail the same, though.\n new_dtype = drop_metadata(dtype)\n if new_dtype is not dtype:\n warnings.warn("metadata on a dtype is not saved to an npy/npz. "\n "Use another format (such as pickle) to store it.",\n UserWarning, stacklevel=2)\n dtype = new_dtype\n\n if dtype.names is not None:\n # This is a record array. The .descr is fine. XXX: parts of the\n # record array with an empty name, like padding bytes, still get\n # fiddled with. This needs to be fixed in the C implementation of\n # dtype().\n return dtype.descr\n elif not type(dtype)._legacy:\n # this must be a user-defined dtype since numpy does not yet expose any\n # non-legacy dtypes in the public API\n #\n # non-legacy dtypes don't yet have __array_interface__\n # support. Instead, as a hack, we use pickle to save the array, and lie\n # that the dtype is object. When the array is loaded, the descriptor is\n # unpickled with the array and the object dtype in the header is\n # discarded.\n #\n # a future NEP should define a way to serialize user-defined\n # descriptors and ideally work out the possible security implications\n warnings.warn("Custom dtypes are saved as python objects using the "\n "pickle protocol. Loading this file requires "\n "allow_pickle=True to be set.",\n UserWarning, stacklevel=2)\n return "|O"\n else:\n return dtype.str\n\n\n@set_module("numpy.lib.format")\ndef descr_to_dtype(descr):\n """\n Returns a dtype based off the given description.\n\n This is essentially the reverse of `~lib.format.dtype_to_descr`. It will\n remove the valueless padding fields created by, i.e. simple fields like\n dtype('float32'), and then convert the description to its corresponding\n dtype.\n\n Parameters\n ----------\n descr : object\n The object retrieved by dtype.descr. Can be passed to\n `numpy.dtype` in order to replicate the input dtype.\n\n Returns\n -------\n dtype : dtype\n The dtype constructed by the description.\n\n """\n if isinstance(descr, str):\n # No padding removal needed\n return numpy.dtype(descr)\n elif isinstance(descr, tuple):\n # subtype, will always have a shape descr[1]\n dt = descr_to_dtype(descr[0])\n return numpy.dtype((dt, descr[1]))\n\n titles = []\n names = []\n formats = []\n offsets = []\n offset = 0\n for field in descr:\n if len(field) == 2:\n name, descr_str = field\n dt = descr_to_dtype(descr_str)\n else:\n name, descr_str, shape = field\n dt = numpy.dtype((descr_to_dtype(descr_str), shape))\n\n # Ignore padding bytes, which will be void bytes with '' as name\n # Once support for blank names is removed, only "if name == ''" needed)\n is_pad = (name == '' and dt.type is numpy.void and dt.names is None)\n if not is_pad:\n title, name = name if isinstance(name, tuple) else (None, name)\n titles.append(title)\n names.append(name)\n formats.append(dt)\n offsets.append(offset)\n offset += dt.itemsize\n\n return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,\n 'offsets': offsets, 'itemsize': offset})\n\n\n@set_module("numpy.lib.format")\ndef header_data_from_array_1_0(array):\n """ Get the dictionary of header metadata from a numpy.ndarray.\n\n Parameters\n ----------\n array : numpy.ndarray\n\n Returns\n -------\n d : dict\n This has the appropriate entries for writing its string representation\n to the header of the file.\n """\n d = {'shape': array.shape}\n if array.flags.c_contiguous:\n d['fortran_order'] = False\n elif array.flags.f_contiguous:\n d['fortran_order'] = True\n else:\n # Totally non-contiguous data. We will have to make it C-contiguous\n # before writing. Note that we need to test for C_CONTIGUOUS first\n # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.\n d['fortran_order'] = False\n\n d['descr'] = dtype_to_descr(array.dtype)\n return d\n\n\ndef _wrap_header(header, version):\n """\n Takes a stringified header, and attaches the prefix and padding to it\n """\n import struct\n assert version is not None\n fmt, encoding = _header_size_info[version]\n header = header.encode(encoding)\n hlen = len(header) + 1\n padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)\n try:\n header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)\n except struct.error:\n msg = f"Header length {hlen} too big for version={version}"\n raise ValueError(msg) from None\n\n # Pad the header with spaces and a final newline such that the magic\n # string, the header-length short and the header are aligned on a\n # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes\n # aligned up to ARRAY_ALIGN on systems like Linux where mmap()\n # offset must be page-aligned (i.e. the beginning of the file).\n return header_prefix + header + b' ' * padlen + b'\n'\n\n\ndef _wrap_header_guess_version(header):\n """\n Like `_wrap_header`, but chooses an appropriate version given the contents\n """\n try:\n return _wrap_header(header, (1, 0))\n except ValueError:\n pass\n\n try:\n ret = _wrap_header(header, (2, 0))\n except UnicodeEncodeError:\n pass\n else:\n warnings.warn("Stored array in format 2.0. It can only be"\n "read by NumPy >= 1.9", UserWarning, stacklevel=2)\n return ret\n\n header = _wrap_header(header, (3, 0))\n warnings.warn("Stored array in format 3.0. It can only be "\n "read by NumPy >= 1.17", UserWarning, stacklevel=2)\n return header\n\n\ndef _write_array_header(fp, d, version=None):\n """ Write the header for an array and returns the version used\n\n Parameters\n ----------\n fp : filelike object\n d : dict\n This has the appropriate entries for writing its string representation\n to the header of the file.\n version : tuple or None\n None means use oldest that works. Providing an explicit version will\n raise a ValueError if the format does not allow saving this data.\n Default: None\n """\n header = ["{"]\n for key, value in sorted(d.items()):\n # Need to use repr here, since we eval these when reading\n header.append(f"'{key}': {repr(value)}, ")\n header.append("}")\n header = "".join(header)\n\n # Add some spare space so that the array header can be modified in-place\n # when changing the array size, e.g. when growing it by appending data at\n # the end.\n shape = d['shape']\n header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(\n shape[-1 if d['fortran_order'] else 0]\n ))) if len(shape) > 0 else 0)\n\n if version is None:\n header = _wrap_header_guess_version(header)\n else:\n header = _wrap_header(header, version)\n fp.write(header)\n\n\n@set_module("numpy.lib.format")\ndef write_array_header_1_0(fp, d):\n """ Write the header for an array using the 1.0 format.\n\n Parameters\n ----------\n fp : filelike object\n d : dict\n This has the appropriate entries for writing its string\n representation to the header of the file.\n """\n _write_array_header(fp, d, (1, 0))\n\n\n@set_module("numpy.lib.format")\ndef write_array_header_2_0(fp, d):\n """ Write the header for an array using the 2.0 format.\n The 2.0 format allows storing very large structured arrays.\n\n Parameters\n ----------\n fp : filelike object\n d : dict\n This has the appropriate entries for writing its string\n representation to the header of the file.\n """\n _write_array_header(fp, d, (2, 0))\n\n\n@set_module("numpy.lib.format")\ndef read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):\n """\n Read an array header from a filelike object using the 1.0 file format\n version.\n\n This will leave the file object located just after the header.\n\n Parameters\n ----------\n fp : filelike object\n A file object or something with a `.read()` method like a file.\n\n Returns\n -------\n shape : tuple of int\n The shape of the array.\n fortran_order : bool\n The array data will be written out directly if it is either\n C-contiguous or Fortran-contiguous. Otherwise, it will be made\n contiguous before writing it out.\n dtype : dtype\n The dtype of the file's data.\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n\n Raises\n ------\n ValueError\n If the data is invalid.\n\n """\n return _read_array_header(\n fp, version=(1, 0), max_header_size=max_header_size)\n\n\n@set_module("numpy.lib.format")\ndef read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):\n """\n Read an array header from a filelike object using the 2.0 file format\n version.\n\n This will leave the file object located just after the header.\n\n Parameters\n ----------\n fp : filelike object\n A file object or something with a `.read()` method like a file.\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n\n Returns\n -------\n shape : tuple of int\n The shape of the array.\n fortran_order : bool\n The array data will be written out directly if it is either\n C-contiguous or Fortran-contiguous. Otherwise, it will be made\n contiguous before writing it out.\n dtype : dtype\n The dtype of the file's data.\n\n Raises\n ------\n ValueError\n If the data is invalid.\n\n """\n return _read_array_header(\n fp, version=(2, 0), max_header_size=max_header_size)\n\n\ndef _filter_header(s):\n """Clean up 'L' in npz header ints.\n\n Cleans up the 'L' in strings representing integers. Needed to allow npz\n headers produced in Python2 to be read in Python3.\n\n Parameters\n ----------\n s : string\n Npy file header.\n\n Returns\n -------\n header : str\n Cleaned up header.\n\n """\n import tokenize\n from io import StringIO\n\n tokens = []\n last_token_was_number = False\n for token in tokenize.generate_tokens(StringIO(s).readline):\n token_type = token[0]\n token_string = token[1]\n if (last_token_was_number and\n token_type == tokenize.NAME and\n token_string == "L"):\n continue\n else:\n tokens.append(token)\n last_token_was_number = (token_type == tokenize.NUMBER)\n return tokenize.untokenize(tokens)\n\n\ndef _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):\n """\n see read_array_header_1_0\n """\n # Read an unsigned, little-endian short int which has the length of the\n # header.\n import ast\n import struct\n hinfo = _header_size_info.get(version)\n if hinfo is None:\n raise ValueError(f"Invalid version {version!r}")\n hlength_type, encoding = hinfo\n\n hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")\n header_length = struct.unpack(hlength_type, hlength_str)[0]\n header = _read_bytes(fp, header_length, "array header")\n header = header.decode(encoding)\n if len(header) > max_header_size:\n raise ValueError(\n f"Header info length ({len(header)}) is large and may not be safe "\n "to load securely.\n"\n "To allow loading, adjust `max_header_size` or fully trust "\n "the `.npy` file using `allow_pickle=True`.\n"\n "For safety against large resource use or crashes, sandboxing "\n "may be necessary.")\n\n # The header is a pretty-printed string representation of a literal\n # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte\n # boundary. The keys are strings.\n # "shape" : tuple of int\n # "fortran_order" : bool\n # "descr" : dtype.descr\n # Versions (2, 0) and (1, 0) could have been created by a Python 2\n # implementation before header filtering was implemented.\n #\n # For performance reasons, we try without _filter_header first though\n try:\n d = ast.literal_eval(header)\n except SyntaxError as e:\n if version <= (2, 0):\n header = _filter_header(header)\n try:\n d = ast.literal_eval(header)\n except SyntaxError as e2:\n msg = "Cannot parse header: {!r}"\n raise ValueError(msg.format(header)) from e2\n else:\n warnings.warn(\n "Reading `.npy` or `.npz` file required additional "\n "header parsing as it was created on Python 2. Save the "\n "file again to speed up loading and avoid this warning.",\n UserWarning, stacklevel=4)\n else:\n msg = "Cannot parse header: {!r}"\n raise ValueError(msg.format(header)) from e\n if not isinstance(d, dict):\n msg = "Header is not a dictionary: {!r}"\n raise ValueError(msg.format(d))\n\n if EXPECTED_KEYS != d.keys():\n keys = sorted(d.keys())\n msg = "Header does not contain the correct keys: {!r}"\n raise ValueError(msg.format(keys))\n\n # Sanity-check the values.\n if (not isinstance(d['shape'], tuple) or\n not all(isinstance(x, int) for x in d['shape'])):\n msg = "shape is not valid: {!r}"\n raise ValueError(msg.format(d['shape']))\n if not isinstance(d['fortran_order'], bool):\n msg = "fortran_order is not a valid bool: {!r}"\n raise ValueError(msg.format(d['fortran_order']))\n try:\n dtype = descr_to_dtype(d['descr'])\n except TypeError as e:\n msg = "descr is not a valid dtype descriptor: {!r}"\n raise ValueError(msg.format(d['descr'])) from e\n\n return d['shape'], d['fortran_order'], dtype\n\n\n@set_module("numpy.lib.format")\ndef write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):\n """\n Write an array to an NPY file, including a header.\n\n If the array is neither C-contiguous nor Fortran-contiguous AND the\n file_like object is not a real file object, this function will have to\n copy data in memory.\n\n Parameters\n ----------\n fp : file_like object\n An open, writable file object, or similar object with a\n ``.write()`` method.\n array : ndarray\n The array to write to disk.\n version : (int, int) or None, optional\n The version number of the format. None means use the oldest\n supported version that is able to store the data. Default: None\n allow_pickle : bool, optional\n Whether to allow writing pickled data. Default: True\n pickle_kwargs : dict, optional\n Additional keyword arguments to pass to pickle.dump, excluding\n 'protocol'. These are only useful when pickling objects in object\n arrays to Python 2 compatible format.\n\n Raises\n ------\n ValueError\n If the array cannot be persisted. This includes the case of\n allow_pickle=False and array being an object array.\n Various other errors\n If the array contains Python objects as part of its dtype, the\n process of pickling them may raise various errors if the objects\n are not picklable.\n\n """\n _check_version(version)\n _write_array_header(fp, header_data_from_array_1_0(array), version)\n\n if array.itemsize == 0:\n buffersize = 0\n else:\n # Set buffer size to 16 MiB to hide the Python loop overhead.\n buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)\n\n dtype_class = type(array.dtype)\n\n if array.dtype.hasobject or not dtype_class._legacy:\n # We contain Python objects so we cannot write out the data\n # directly. Instead, we will pickle it out\n if not allow_pickle:\n if array.dtype.hasobject:\n raise ValueError("Object arrays cannot be saved when "\n "allow_pickle=False")\n if not dtype_class._legacy:\n raise ValueError("User-defined dtypes cannot be saved "\n "when allow_pickle=False")\n if pickle_kwargs is None:\n pickle_kwargs = {}\n pickle.dump(array, fp, protocol=4, **pickle_kwargs)\n elif array.flags.f_contiguous and not array.flags.c_contiguous:\n if isfileobj(fp):\n array.T.tofile(fp)\n else:\n for chunk in numpy.nditer(\n array, flags=['external_loop', 'buffered', 'zerosize_ok'],\n buffersize=buffersize, order='F'):\n fp.write(chunk.tobytes('C'))\n elif isfileobj(fp):\n array.tofile(fp)\n else:\n for chunk in numpy.nditer(\n array, flags=['external_loop', 'buffered', 'zerosize_ok'],\n buffersize=buffersize, order='C'):\n fp.write(chunk.tobytes('C'))\n\n\n@set_module("numpy.lib.format")\ndef read_array(fp, allow_pickle=False, pickle_kwargs=None, *,\n max_header_size=_MAX_HEADER_SIZE):\n """\n Read an array from an NPY file.\n\n Parameters\n ----------\n fp : file_like object\n If this is not a real file object, then this may take extra memory\n and time.\n allow_pickle : bool, optional\n Whether to allow writing pickled data. Default: False\n pickle_kwargs : dict\n Additional keyword arguments to pass to pickle.load. These are only\n useful when loading object arrays saved on Python 2.\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n This option is ignored when `allow_pickle` is passed. In that case\n the file is by definition trusted and the limit is unnecessary.\n\n Returns\n -------\n array : ndarray\n The array from the data on disk.\n\n Raises\n ------\n ValueError\n If the data is invalid, or allow_pickle=False and the file contains\n an object array.\n\n """\n if allow_pickle:\n # Effectively ignore max_header_size, since `allow_pickle` indicates\n # that the input is fully trusted.\n max_header_size = 2**64\n\n version = read_magic(fp)\n _check_version(version)\n shape, fortran_order, dtype = _read_array_header(\n fp, version, max_header_size=max_header_size)\n if len(shape) == 0:\n count = 1\n else:\n count = numpy.multiply.reduce(shape, dtype=numpy.int64)\n\n # Now read the actual data.\n if dtype.hasobject:\n # The array contained Python objects. We need to unpickle the data.\n if not allow_pickle:\n raise ValueError("Object arrays cannot be loaded when "\n "allow_pickle=False")\n if pickle_kwargs is None:\n pickle_kwargs = {}\n try:\n array = pickle.load(fp, **pickle_kwargs)\n except UnicodeError as err:\n # Friendlier error message\n raise UnicodeError("Unpickling a python object failed: %r\n"\n "You may need to pass the encoding= option "\n "to numpy.load" % (err,)) from err\n else:\n if isfileobj(fp):\n # We can use the fast fromfile() function.\n array = numpy.fromfile(fp, dtype=dtype, count=count)\n else:\n # This is not a real file. We have to read it the\n # memory-intensive way.\n # crc32 module fails on reads greater than 2 ** 32 bytes,\n # breaking large reads from gzip streams. Chunk reads to\n # BUFFER_SIZE bytes to avoid issue and reduce memory overhead\n # of the read. In non-chunked case count < max_read_count, so\n # only one read is performed.\n\n # Use np.ndarray instead of np.empty since the latter does\n # not correctly instantiate zero-width string dtypes; see\n # https://github.com/numpy/numpy/pull/6430\n array = numpy.ndarray(count, dtype=dtype)\n\n if dtype.itemsize > 0:\n # If dtype.itemsize == 0 then there's nothing more to read\n max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)\n\n for i in range(0, count, max_read_count):\n read_count = min(max_read_count, count - i)\n read_size = int(read_count * dtype.itemsize)\n data = _read_bytes(fp, read_size, "array data")\n array[i:i + read_count] = numpy.frombuffer(data, dtype=dtype,\n count=read_count)\n\n if array.size != count:\n raise ValueError(\n "Failed to read all data for array. "\n f"Expected {shape} = {count} elements, "\n f"could only read {array.size} elements. "\n "(file seems not fully written?)"\n )\n\n if fortran_order:\n array.shape = shape[::-1]\n array = array.transpose()\n else:\n array.shape = shape\n\n return array\n\n\n@set_module("numpy.lib.format")\ndef open_memmap(filename, mode='r+', dtype=None, shape=None,\n fortran_order=False, version=None, *,\n max_header_size=_MAX_HEADER_SIZE):\n """\n Open a .npy file as a memory-mapped array.\n\n This may be used to read an existing file or create a new one.\n\n Parameters\n ----------\n filename : str or path-like\n The name of the file on disk. This may *not* be a file-like\n object.\n mode : str, optional\n The mode in which to open the file; the default is 'r+'. In\n addition to the standard file modes, 'c' is also accepted to mean\n "copy on write." See `memmap` for the available mode strings.\n dtype : data-type, optional\n The data type of the array if we are creating a new file in "write"\n mode, if not, `dtype` is ignored. The default value is None, which\n results in a data-type of `float64`.\n shape : tuple of int\n The shape of the array if we are creating a new file in "write"\n mode, in which case this parameter is required. Otherwise, this\n parameter is ignored and is thus optional.\n fortran_order : bool, optional\n Whether the array should be Fortran-contiguous (True) or\n C-contiguous (False, the default) if we are creating a new file in\n "write" mode.\n version : tuple of int (major, minor) or None\n If the mode is a "write" mode, then this is the version of the file\n format used to create the file. None means use the oldest\n supported version that is able to store the data. Default: None\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n\n Returns\n -------\n marray : memmap\n The memory-mapped array.\n\n Raises\n ------\n ValueError\n If the data or the mode is invalid.\n OSError\n If the file is not found or cannot be opened correctly.\n\n See Also\n --------\n numpy.memmap\n\n """\n if isfileobj(filename):\n raise ValueError("Filename must be a string or a path-like object."\n " Memmap cannot use existing file handles.")\n\n if 'w' in mode:\n # We are creating the file, not reading it.\n # Check if we ought to create the file.\n _check_version(version)\n # Ensure that the given dtype is an authentic dtype object rather\n # than just something that can be interpreted as a dtype object.\n dtype = numpy.dtype(dtype)\n if dtype.hasobject:\n msg = "Array can't be memory-mapped: Python objects in dtype."\n raise ValueError(msg)\n d = {\n "descr": dtype_to_descr(dtype),\n "fortran_order": fortran_order,\n "shape": shape,\n }\n # If we got here, then it should be safe to create the file.\n with open(os.fspath(filename), mode + 'b') as fp:\n _write_array_header(fp, d, version)\n offset = fp.tell()\n else:\n # Read the header of the file first.\n with open(os.fspath(filename), 'rb') as fp:\n version = read_magic(fp)\n _check_version(version)\n\n shape, fortran_order, dtype = _read_array_header(\n fp, version, max_header_size=max_header_size)\n if dtype.hasobject:\n msg = "Array can't be memory-mapped: Python objects in dtype."\n raise ValueError(msg)\n offset = fp.tell()\n\n if fortran_order:\n order = 'F'\n else:\n order = 'C'\n\n # We need to change a write-only mode to a read-write mode since we've\n # already written data to the file.\n if mode == 'w+':\n mode = 'r+'\n\n marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,\n mode=mode, offset=offset)\n\n return marray\n\n\ndef _read_bytes(fp, size, error_template="ran out of data"):\n """\n Read from file-like object until size bytes are read.\n Raises ValueError if not EOF is encountered before size bytes are read.\n Non-blocking objects only supported if they derive from io objects.\n\n Required as e.g. ZipExtFile in python 2.6 can return less data than\n requested.\n """\n data = b""\n while True:\n # io files (default in python3) return None or raise on\n # would-block, python2 file will truncate, probably nothing can be\n # done about that. note that regular files can't be non-blocking\n try:\n r = fp.read(size - len(data))\n data += r\n if len(r) == 0 or len(data) == size:\n break\n except BlockingIOError:\n pass\n if len(data) != size:\n msg = "EOF: reading %s, expected %d bytes got %d"\n raise ValueError(msg % (error_template, size, len(data)))\n else:\n return data\n\n\n@set_module("numpy.lib.format")\ndef isfileobj(f):\n if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):\n return False\n try:\n # BufferedReader/Writer may raise OSError when\n # fetching `fileno()` (e.g. when wrapping BytesIO).\n f.fileno()\n return True\n except OSError:\n return False\n | .venv\Lib\site-packages\numpy\lib\_format_impl.py | _format_impl.py | Python | 37,901 | 0.95 | 0.130309 | 0.097477 | vue-tools | 912 | 2023-11-30T07:46:59.551993 | BSD-3-Clause | false | 7e91467dad3db30d131b7a267362bea8 |
from typing import Final, Literal\n\nfrom numpy.lib._utils_impl import drop_metadata # noqa: F401\n\n__all__: list[str] = []\n\nEXPECTED_KEYS: Final[set[str]]\nMAGIC_PREFIX: Final[bytes]\nMAGIC_LEN: Literal[8]\nARRAY_ALIGN: Literal[64]\nBUFFER_SIZE: Literal[262144] # 2**18\nGROWTH_AXIS_MAX_DIGITS: Literal[21]\n\ndef magic(major, minor): ...\ndef read_magic(fp): ...\ndef dtype_to_descr(dtype): ...\ndef descr_to_dtype(descr): ...\ndef header_data_from_array_1_0(array): ...\ndef write_array_header_1_0(fp, d): ...\ndef write_array_header_2_0(fp, d): ...\ndef read_array_header_1_0(fp): ...\ndef read_array_header_2_0(fp): ...\ndef write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...\ndef read_array(fp, allow_pickle=..., pickle_kwargs=...): ...\ndef open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...\ndef isfileobj(f): ...\n | .venv\Lib\site-packages\numpy\lib\_format_impl.pyi | _format_impl.pyi | Other | 895 | 0.95 | 0.5 | 0 | python-kit | 497 | 2025-03-02T23:07:31.010445 | Apache-2.0 | false | a8f8717818aca25e9982a0f2e515621f |
# ruff: noqa: ANN401\nfrom collections.abc import Callable, Iterable, Sequence\nfrom typing import (\n Any,\n Concatenate,\n ParamSpec,\n Protocol,\n SupportsIndex,\n SupportsInt,\n TypeAlias,\n TypeVar,\n overload,\n type_check_only,\n)\nfrom typing import Literal as L\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import TypeIs, deprecated\n\nimport numpy as np\nfrom numpy import (\n _OrderKACF,\n bool_,\n complex128,\n complexfloating,\n datetime64,\n float64,\n floating,\n generic,\n integer,\n intp,\n object_,\n timedelta64,\n vectorize,\n)\nfrom numpy._core.multiarray import bincount\nfrom numpy._globals import _NoValueType\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeDT64_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeNumber_co,\n _ArrayLikeObject_co,\n _ArrayLikeTD64_co,\n _ComplexLike_co,\n _DTypeLike,\n _FloatLike_co,\n _NestedSequence,\n _NumberLike_co,\n _ScalarLike_co,\n _ShapeLike,\n)\n\n__all__ = [\n "select",\n "piecewise",\n "trim_zeros",\n "copy",\n "iterable",\n "percentile",\n "diff",\n "gradient",\n "angle",\n "unwrap",\n "sort_complex",\n "flip",\n "rot90",\n "extract",\n "place",\n "vectorize",\n "asarray_chkfinite",\n "average",\n "bincount",\n "digitize",\n "cov",\n "corrcoef",\n "median",\n "sinc",\n "hamming",\n "hanning",\n "bartlett",\n "blackman",\n "kaiser",\n "trapezoid",\n "trapz",\n "i0",\n "meshgrid",\n "delete",\n "insert",\n "append",\n "interp",\n "quantile",\n]\n\n_T = TypeVar("_T")\n_T_co = TypeVar("_T_co", covariant=True)\n# The `{}ss` suffix refers to the Python 3.12 syntax: `**P`\n_Pss = ParamSpec("_Pss")\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_ScalarT1 = TypeVar("_ScalarT1", bound=generic)\n_ScalarT2 = TypeVar("_ScalarT2", bound=generic)\n_ArrayT = TypeVar("_ArrayT", bound=np.ndarray)\n\n_2Tuple: TypeAlias = tuple[_T, _T]\n_MeshgridIdx: TypeAlias = L['ij', 'xy']\n\n@type_check_only\nclass _TrimZerosSequence(Protocol[_T_co]):\n def __len__(self, /) -> int: ...\n @overload\n def __getitem__(self, key: int, /) -> object: ...\n @overload\n def __getitem__(self, key: slice, /) -> _T_co: ...\n\n###\n\n@overload\ndef rot90(\n m: _ArrayLike[_ScalarT],\n k: int = ...,\n axes: tuple[int, int] = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef rot90(\n m: ArrayLike,\n k: int = ...,\n axes: tuple[int, int] = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef flip(m: _ScalarT, axis: None = ...) -> _ScalarT: ...\n@overload\ndef flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...\n@overload\ndef flip(m: _ArrayLike[_ScalarT], axis: _ShapeLike | None = ...) -> NDArray[_ScalarT]: ...\n@overload\ndef flip(m: ArrayLike, axis: _ShapeLike | None = ...) -> NDArray[Any]: ...\n\ndef iterable(y: object) -> TypeIs[Iterable[Any]]: ...\n\n@overload\ndef average(\n a: _ArrayLikeFloat_co,\n axis: None = None,\n weights: _ArrayLikeFloat_co | None = None,\n returned: L[False] = False,\n *,\n keepdims: L[False] | _NoValueType = ...,\n) -> floating: ...\n@overload\ndef average(\n a: _ArrayLikeFloat_co,\n axis: None = None,\n weights: _ArrayLikeFloat_co | None = None,\n *,\n returned: L[True],\n keepdims: L[False] | _NoValueType = ...,\n) -> _2Tuple[floating]: ...\n@overload\ndef average(\n a: _ArrayLikeComplex_co,\n axis: None = None,\n weights: _ArrayLikeComplex_co | None = None,\n returned: L[False] = False,\n *,\n keepdims: L[False] | _NoValueType = ...,\n) -> complexfloating: ...\n@overload\ndef average(\n a: _ArrayLikeComplex_co,\n axis: None = None,\n weights: _ArrayLikeComplex_co | None = None,\n *,\n returned: L[True],\n keepdims: L[False] | _NoValueType = ...,\n) -> _2Tuple[complexfloating]: ...\n@overload\ndef average(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = None,\n weights: object | None = None,\n *,\n returned: L[True],\n keepdims: bool | bool_ | _NoValueType = ...,\n) -> _2Tuple[Incomplete]: ...\n@overload\ndef average(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = None,\n weights: object | None = None,\n returned: bool | bool_ = False,\n *,\n keepdims: bool | bool_ | _NoValueType = ...,\n) -> Incomplete: ...\n\n@overload\ndef asarray_chkfinite(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asarray_chkfinite(\n a: object,\n dtype: None = ...,\n order: _OrderKACF = ...,\n) -> NDArray[Any]: ...\n@overload\ndef asarray_chkfinite(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asarray_chkfinite(\n a: Any,\n dtype: DTypeLike,\n order: _OrderKACF = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef piecewise(\n x: _ArrayLike[_ScalarT],\n condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],\n funclist: Sequence[\n Callable[Concatenate[NDArray[_ScalarT], _Pss], NDArray[_ScalarT | Any]]\n | _ScalarT | object\n ],\n /,\n *args: _Pss.args,\n **kw: _Pss.kwargs,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef piecewise(\n x: ArrayLike,\n condlist: _ArrayLike[bool_] | Sequence[_ArrayLikeBool_co],\n funclist: Sequence[\n Callable[Concatenate[NDArray[Any], _Pss], NDArray[Any]]\n | object\n ],\n /,\n *args: _Pss.args,\n **kw: _Pss.kwargs,\n) -> NDArray[Any]: ...\n\ndef select(\n condlist: Sequence[ArrayLike],\n choicelist: Sequence[ArrayLike],\n default: ArrayLike = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef copy(\n a: _ArrayT,\n order: _OrderKACF,\n subok: L[True],\n) -> _ArrayT: ...\n@overload\ndef copy(\n a: _ArrayT,\n order: _OrderKACF = ...,\n *,\n subok: L[True],\n) -> _ArrayT: ...\n@overload\ndef copy(\n a: _ArrayLike[_ScalarT],\n order: _OrderKACF = ...,\n subok: L[False] = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef copy(\n a: ArrayLike,\n order: _OrderKACF = ...,\n subok: L[False] = ...,\n) -> NDArray[Any]: ...\n\ndef gradient(\n f: ArrayLike,\n *varargs: ArrayLike,\n axis: _ShapeLike | None = ...,\n edge_order: L[1, 2] = ...,\n) -> Any: ...\n\n@overload\ndef diff(\n a: _T,\n n: L[0],\n axis: SupportsIndex = ...,\n prepend: ArrayLike = ...,\n append: ArrayLike = ...,\n) -> _T: ...\n@overload\ndef diff(\n a: ArrayLike,\n n: int = ...,\n axis: SupportsIndex = ...,\n prepend: ArrayLike = ...,\n append: ArrayLike = ...,\n) -> NDArray[Any]: ...\n\n@overload # float scalar\ndef interp(\n x: _FloatLike_co,\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLikeFloat_co,\n left: _FloatLike_co | None = None,\n right: _FloatLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> float64: ...\n@overload # float array\ndef interp(\n x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLikeFloat_co,\n left: _FloatLike_co | None = None,\n right: _FloatLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[float64]: ...\n@overload # float scalar or array\ndef interp(\n x: _ArrayLikeFloat_co,\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLikeFloat_co,\n left: _FloatLike_co | None = None,\n right: _FloatLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[float64] | float64: ...\n@overload # complex scalar\ndef interp(\n x: _FloatLike_co,\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLike[complexfloating],\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> complex128: ...\n@overload # complex or float scalar\ndef interp(\n x: _FloatLike_co,\n xp: _ArrayLikeFloat_co,\n fp: Sequence[complex | complexfloating],\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> complex128 | float64: ...\n@overload # complex array\ndef interp(\n x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLike[complexfloating],\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[complex128]: ...\n@overload # complex or float array\ndef interp(\n x: NDArray[floating | integer | np.bool] | _NestedSequence[_FloatLike_co],\n xp: _ArrayLikeFloat_co,\n fp: Sequence[complex | complexfloating],\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[complex128 | float64]: ...\n@overload # complex scalar or array\ndef interp(\n x: _ArrayLikeFloat_co,\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLike[complexfloating],\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[complex128] | complex128: ...\n@overload # complex or float scalar or array\ndef interp(\n x: _ArrayLikeFloat_co,\n xp: _ArrayLikeFloat_co,\n fp: _ArrayLikeNumber_co,\n left: _NumberLike_co | None = None,\n right: _NumberLike_co | None = None,\n period: _FloatLike_co | None = None,\n) -> NDArray[complex128 | float64] | complex128 | float64: ...\n\n@overload\ndef angle(z: _ComplexLike_co, deg: bool = ...) -> floating: ...\n@overload\ndef angle(z: object_, deg: bool = ...) -> Any: ...\n@overload\ndef angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating]: ...\n@overload\ndef angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...\n\n@overload\ndef unwrap(\n p: _ArrayLikeFloat_co,\n discont: float | None = ...,\n axis: int = ...,\n *,\n period: float = ...,\n) -> NDArray[floating]: ...\n@overload\ndef unwrap(\n p: _ArrayLikeObject_co,\n discont: float | None = ...,\n axis: int = ...,\n *,\n period: float = ...,\n) -> NDArray[object_]: ...\n\ndef sort_complex(a: ArrayLike) -> NDArray[complexfloating]: ...\n\ndef trim_zeros(\n filt: _TrimZerosSequence[_T],\n trim: L["f", "b", "fb", "bf"] = ...,\n) -> _T: ...\n\n@overload\ndef extract(condition: ArrayLike, arr: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...\n@overload\ndef extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...\n\ndef place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...\n\n@overload\ndef cov(\n m: _ArrayLikeFloat_co,\n y: _ArrayLikeFloat_co | None = ...,\n rowvar: bool = ...,\n bias: bool = ...,\n ddof: SupportsIndex | SupportsInt | None = ...,\n fweights: ArrayLike | None = ...,\n aweights: ArrayLike | None = ...,\n *,\n dtype: None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef cov(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = ...,\n rowvar: bool = ...,\n bias: bool = ...,\n ddof: SupportsIndex | SupportsInt | None = ...,\n fweights: ArrayLike | None = ...,\n aweights: ArrayLike | None = ...,\n *,\n dtype: None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef cov(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = ...,\n rowvar: bool = ...,\n bias: bool = ...,\n ddof: SupportsIndex | SupportsInt | None = ...,\n fweights: ArrayLike | None = ...,\n aweights: ArrayLike | None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cov(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = ...,\n rowvar: bool = ...,\n bias: bool = ...,\n ddof: SupportsIndex | SupportsInt | None = ...,\n fweights: ArrayLike | None = ...,\n aweights: ArrayLike | None = ...,\n *,\n dtype: DTypeLike,\n) -> NDArray[Any]: ...\n\n# NOTE `bias` and `ddof` are deprecated and ignored\n@overload\ndef corrcoef(\n m: _ArrayLikeFloat_co,\n y: _ArrayLikeFloat_co | None = None,\n rowvar: bool = True,\n bias: _NoValueType = ...,\n ddof: _NoValueType = ...,\n *,\n dtype: None = None,\n) -> NDArray[floating]: ...\n@overload\ndef corrcoef(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = None,\n rowvar: bool = True,\n bias: _NoValueType = ...,\n ddof: _NoValueType = ...,\n *,\n dtype: None = None,\n) -> NDArray[complexfloating]: ...\n@overload\ndef corrcoef(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = None,\n rowvar: bool = True,\n bias: _NoValueType = ...,\n ddof: _NoValueType = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n) -> NDArray[_ScalarT]: ...\n@overload\ndef corrcoef(\n m: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co | None = None,\n rowvar: bool = True,\n bias: _NoValueType = ...,\n ddof: _NoValueType = ...,\n *,\n dtype: DTypeLike | None = None,\n) -> NDArray[Any]: ...\n\ndef blackman(M: _FloatLike_co) -> NDArray[floating]: ...\n\ndef bartlett(M: _FloatLike_co) -> NDArray[floating]: ...\n\ndef hanning(M: _FloatLike_co) -> NDArray[floating]: ...\n\ndef hamming(M: _FloatLike_co) -> NDArray[floating]: ...\n\ndef i0(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...\n\ndef kaiser(\n M: _FloatLike_co,\n beta: _FloatLike_co,\n) -> NDArray[floating]: ...\n\n@overload\ndef sinc(x: _FloatLike_co) -> floating: ...\n@overload\ndef sinc(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef sinc(x: _ArrayLikeFloat_co) -> NDArray[floating]: ...\n@overload\ndef sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef median(\n a: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n keepdims: L[False] = ...,\n) -> floating: ...\n@overload\ndef median(\n a: _ArrayLikeComplex_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n keepdims: L[False] = ...,\n) -> complexfloating: ...\n@overload\ndef median(\n a: _ArrayLikeTD64_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n keepdims: L[False] = ...,\n) -> timedelta64: ...\n@overload\ndef median(\n a: _ArrayLikeObject_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n keepdims: L[False] = ...,\n) -> Any: ...\n@overload\ndef median(\n a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n keepdims: bool = ...,\n) -> Any: ...\n@overload\ndef median(\n a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n out: _ArrayT,\n overwrite_input: bool = ...,\n keepdims: bool = ...,\n) -> _ArrayT: ...\n@overload\ndef median(\n a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n *,\n out: _ArrayT,\n overwrite_input: bool = ...,\n keepdims: bool = ...,\n) -> _ArrayT: ...\n\n_MethodKind = L[\n "inverted_cdf",\n "averaged_inverted_cdf",\n "closest_observation",\n "interpolated_inverted_cdf",\n "hazen",\n "weibull",\n "linear",\n "median_unbiased",\n "normal_unbiased",\n "lower",\n "higher",\n "midpoint",\n "nearest",\n]\n\n@overload\ndef percentile(\n a: _ArrayLikeFloat_co,\n q: _FloatLike_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> floating: ...\n@overload\ndef percentile(\n a: _ArrayLikeComplex_co,\n q: _FloatLike_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> complexfloating: ...\n@overload\ndef percentile(\n a: _ArrayLikeTD64_co,\n q: _FloatLike_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> timedelta64: ...\n@overload\ndef percentile(\n a: _ArrayLikeDT64_co,\n q: _FloatLike_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> datetime64: ...\n@overload\ndef percentile(\n a: _ArrayLikeObject_co,\n q: _FloatLike_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> Any: ...\n@overload\ndef percentile(\n a: _ArrayLikeFloat_co,\n q: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef percentile(\n a: _ArrayLikeComplex_co,\n q: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef percentile(\n a: _ArrayLikeTD64_co,\n q: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef percentile(\n a: _ArrayLikeDT64_co,\n q: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[datetime64]: ...\n@overload\ndef percentile(\n a: _ArrayLikeObject_co,\n q: _ArrayLikeFloat_co,\n axis: None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: L[False] = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef percentile(\n a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,\n q: _ArrayLikeFloat_co,\n axis: _ShapeLike | None = ...,\n out: None = ...,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: bool = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> Any: ...\n@overload\ndef percentile(\n a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,\n q: _ArrayLikeFloat_co,\n axis: _ShapeLike | None,\n out: _ArrayT,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: bool = ...,\n *,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> _ArrayT: ...\n@overload\ndef percentile(\n a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeDT64_co | _ArrayLikeObject_co,\n q: _ArrayLikeFloat_co,\n axis: _ShapeLike | None = ...,\n *,\n out: _ArrayT,\n overwrite_input: bool = ...,\n method: _MethodKind = ...,\n keepdims: bool = ...,\n weights: _ArrayLikeFloat_co | None = ...,\n) -> _ArrayT: ...\n\n# NOTE: Not an alias, but they do have identical signatures\n# (that we can reuse)\nquantile = percentile\n\n_ScalarT_fm = TypeVar(\n "_ScalarT_fm",\n bound=floating | complexfloating | timedelta64,\n)\n\nclass _SupportsRMulFloat(Protocol[_T_co]):\n def __rmul__(self, other: float, /) -> _T_co: ...\n\n@overload\ndef trapezoid( # type: ignore[overload-overlap]\n y: Sequence[_FloatLike_co],\n x: Sequence[_FloatLike_co] | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> float64: ...\n@overload\ndef trapezoid(\n y: Sequence[_ComplexLike_co],\n x: Sequence[_ComplexLike_co] | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> complex128: ...\n@overload\ndef trapezoid(\n y: _ArrayLike[bool_ | integer],\n x: _ArrayLike[bool_ | integer] | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> float64 | NDArray[float64]: ...\n@overload\ndef trapezoid( # type: ignore[overload-overlap]\n y: _ArrayLikeObject_co,\n x: _ArrayLikeFloat_co | _ArrayLikeObject_co | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> float | NDArray[object_]: ...\n@overload\ndef trapezoid(\n y: _ArrayLike[_ScalarT_fm],\n x: _ArrayLike[_ScalarT_fm] | _ArrayLikeInt_co | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> _ScalarT_fm | NDArray[_ScalarT_fm]: ...\n@overload\ndef trapezoid(\n y: Sequence[_SupportsRMulFloat[_T]],\n x: Sequence[_SupportsRMulFloat[_T] | _T] | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> _T: ...\n@overload\ndef trapezoid(\n y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n x: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co | None = ...,\n dx: float = ...,\n axis: SupportsIndex = ...,\n) -> (\n floating | complexfloating | timedelta64\n | NDArray[floating | complexfloating | timedelta64 | object_]\n): ...\n\n@deprecated("Use 'trapezoid' instead")\ndef trapz(y: ArrayLike, x: ArrayLike | None = None, dx: float = 1.0, axis: int = -1) -> generic | NDArray[generic]: ...\n\n@overload\ndef meshgrid(\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[()]: ...\n@overload\ndef meshgrid(\n x1: _ArrayLike[_ScalarT],\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[_ScalarT]]: ...\n@overload\ndef meshgrid(\n x1: ArrayLike,\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any]]: ...\n@overload\ndef meshgrid(\n x1: _ArrayLike[_ScalarT1],\n x2: _ArrayLike[_ScalarT2],\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...\n@overload\ndef meshgrid(\n x1: ArrayLike,\n x2: _ArrayLike[_ScalarT],\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any], NDArray[_ScalarT]]: ...\n@overload\ndef meshgrid(\n x1: _ArrayLike[_ScalarT],\n x2: ArrayLike,\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[_ScalarT], NDArray[Any]]: ...\n@overload\ndef meshgrid(\n x1: ArrayLike,\n x2: ArrayLike,\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any], NDArray[Any]]: ...\n@overload\ndef meshgrid(\n x1: ArrayLike,\n x2: ArrayLike,\n x3: ArrayLike,\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any]]: ...\n@overload\ndef meshgrid(\n x1: ArrayLike,\n x2: ArrayLike,\n x3: ArrayLike,\n x4: ArrayLike,\n /,\n *,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any], NDArray[Any], NDArray[Any], NDArray[Any]]: ...\n@overload\ndef meshgrid(\n *xi: ArrayLike,\n copy: bool = ...,\n sparse: bool = ...,\n indexing: _MeshgridIdx = ...,\n) -> tuple[NDArray[Any], ...]: ...\n\n@overload\ndef delete(\n arr: _ArrayLike[_ScalarT],\n obj: slice | _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef delete(\n arr: ArrayLike,\n obj: slice | _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef insert(\n arr: _ArrayLike[_ScalarT],\n obj: slice | _ArrayLikeInt_co,\n values: ArrayLike,\n axis: SupportsIndex | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef insert(\n arr: ArrayLike,\n obj: slice | _ArrayLikeInt_co,\n values: ArrayLike,\n axis: SupportsIndex | None = ...,\n) -> NDArray[Any]: ...\n\ndef append(\n arr: ArrayLike,\n values: ArrayLike,\n axis: SupportsIndex | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef digitize(\n x: _FloatLike_co,\n bins: _ArrayLikeFloat_co,\n right: bool = ...,\n) -> intp: ...\n@overload\ndef digitize(\n x: _ArrayLikeFloat_co,\n bins: _ArrayLikeFloat_co,\n right: bool = ...,\n) -> NDArray[intp]: ...\n | .venv\Lib\site-packages\numpy\lib\_function_base_impl.pyi | _function_base_impl.pyi | Other | 25,101 | 0.95 | 0.117766 | 0.055378 | vue-tools | 555 | 2024-07-31T16:16:36.277078 | BSD-3-Clause | false | 97c08576ff274b70ba3e3b1cc5c6bdce |
"""\nHistogram-related functions\n"""\nimport contextlib\nimport functools\nimport operator\nimport warnings\n\nimport numpy as np\nfrom numpy._core import overrides\n\n__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n# range is a keyword argument to many functions, so save the builtin so they can\n# use it.\n_range = range\n\n\ndef _ptp(x):\n """Peak-to-peak value of x.\n\n This implementation avoids the problem of signed integer arrays having a\n peak-to-peak value that cannot be represented with the array's data type.\n This function returns an unsigned value for signed integer arrays.\n """\n return _unsigned_subtract(x.max(), x.min())\n\n\ndef _hist_bin_sqrt(x, range):\n """\n Square root histogram bin estimator.\n\n Bin width is inversely proportional to the data size. Used by many\n programs for its simplicity.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n return _ptp(x) / np.sqrt(x.size)\n\n\ndef _hist_bin_sturges(x, range):\n """\n Sturges histogram bin estimator.\n\n A very simplistic estimator based on the assumption of normality of\n the data. This estimator has poor performance for non-normal data,\n which becomes especially obvious for large data sets. The estimate\n depends only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n return _ptp(x) / (np.log2(x.size) + 1.0)\n\n\ndef _hist_bin_rice(x, range):\n """\n Rice histogram bin estimator.\n\n Another simple estimator with no normality assumption. It has better\n performance for large data than Sturges, but tends to overestimate\n the number of bins. The number of bins is proportional to the cube\n root of data size (asymptotically optimal). The estimate depends\n only on size of the data.\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n return _ptp(x) / (2.0 * x.size ** (1.0 / 3))\n\n\ndef _hist_bin_scott(x, range):\n """\n Scott histogram bin estimator.\n\n The binwidth is proportional to the standard deviation of the data\n and inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)\n\n\ndef _hist_bin_stone(x, range):\n """\n Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).\n\n The number of bins is chosen by minimizing the estimated ISE against the unknown\n true distribution. The ISE is estimated using cross-validation and can be regarded\n as a generalization of Scott's rule.\n https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule\n\n This paper by Stone appears to be the origination of this rule.\n https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n range : (float, float)\n The lower and upper range of the bins.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """ # noqa: E501\n\n n = x.size\n ptp_x = _ptp(x)\n if n <= 1 or ptp_x == 0:\n return 0\n\n def jhat(nbins):\n hh = ptp_x / nbins\n p_k = np.histogram(x, bins=nbins, range=range)[0] / n\n return (2 - (n + 1) * p_k.dot(p_k)) / hh\n\n nbins_upper_bound = max(100, int(np.sqrt(n)))\n nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)\n if nbins == nbins_upper_bound:\n warnings.warn("The number of bins estimated may be suboptimal.",\n RuntimeWarning, stacklevel=3)\n return ptp_x / nbins\n\n\ndef _hist_bin_doane(x, range):\n """\n Doane's histogram bin estimator.\n\n Improved version of Sturges' formula which works better for\n non-normal data. See\n stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n if x.size > 2:\n sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))\n sigma = np.std(x)\n if sigma > 0.0:\n # These three operations add up to\n # g1 = np.mean(((x - np.mean(x)) / sigma)**3)\n # but use only one temp array instead of three\n temp = x - np.mean(x)\n np.true_divide(temp, sigma, temp)\n np.power(temp, 3, temp)\n g1 = np.mean(temp)\n return _ptp(x) / (1.0 + np.log2(x.size) +\n np.log2(1.0 + np.absolute(g1) / sg1))\n return 0.0\n\n\ndef _hist_bin_fd(x, range):\n """\n The Freedman-Diaconis histogram bin estimator.\n\n The Freedman-Diaconis rule uses interquartile range (IQR) to\n estimate binwidth. It is considered a variation of the Scott rule\n with more robustness as the IQR is less affected by outliers than\n the standard deviation. However, the IQR depends on fewer points\n than the standard deviation, so it is less accurate, especially for\n long tailed distributions.\n\n If the IQR is 0, this function returns 0 for the bin width.\n Binwidth is inversely proportional to the cube root of data size\n (asymptotically optimal).\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n """\n del range # unused\n iqr = np.subtract(*np.percentile(x, [75, 25]))\n return 2.0 * iqr * x.size ** (-1.0 / 3.0)\n\n\ndef _hist_bin_auto(x, range):\n """\n Histogram bin estimator that uses the minimum width of a relaxed\n Freedman-Diaconis and Sturges estimators if the FD bin width does\n not result in a large number of bins. The relaxed Freedman-Diaconis estimator\n limits the bin width to half the sqrt estimated to avoid small bins.\n\n The FD estimator is usually the most robust method, but its width\n estimate tends to be too large for small `x` and bad for data with limited\n variance. The Sturges estimator is quite good for small (<1000) datasets\n and is the default in the R language. This method gives good off-the-shelf\n behaviour.\n\n\n Parameters\n ----------\n x : array_like\n Input data that is to be histogrammed, trimmed to range. May not\n be empty.\n range : Tuple with range for the histogram\n\n Returns\n -------\n h : An estimate of the optimal bin width for the given data.\n\n See Also\n --------\n _hist_bin_fd, _hist_bin_sturges\n """\n fd_bw = _hist_bin_fd(x, range)\n sturges_bw = _hist_bin_sturges(x, range)\n sqrt_bw = _hist_bin_sqrt(x, range)\n # heuristic to limit the maximal number of bins\n fd_bw_corrected = max(fd_bw, sqrt_bw / 2)\n return min(fd_bw_corrected, sturges_bw)\n\n\n# Private dict initialized at module load time\n_hist_bin_selectors = {'stone': _hist_bin_stone,\n 'auto': _hist_bin_auto,\n 'doane': _hist_bin_doane,\n 'fd': _hist_bin_fd,\n 'rice': _hist_bin_rice,\n 'scott': _hist_bin_scott,\n 'sqrt': _hist_bin_sqrt,\n 'sturges': _hist_bin_sturges}\n\n\ndef _ravel_and_check_weights(a, weights):\n """ Check a and weights have matching shapes, and ravel both """\n a = np.asarray(a)\n\n # Ensure that the array is a "subtractable" dtype\n if a.dtype == np.bool:\n msg = f"Converting input from {a.dtype} to {np.uint8} for compatibility."\n warnings.warn(msg, RuntimeWarning, stacklevel=3)\n a = a.astype(np.uint8)\n\n if weights is not None:\n weights = np.asarray(weights)\n if weights.shape != a.shape:\n raise ValueError(\n 'weights should have the same shape as a.')\n weights = weights.ravel()\n a = a.ravel()\n return a, weights\n\n\ndef _get_outer_edges(a, range):\n """\n Determine the outer bin edges to use, from either the data or the range\n argument\n """\n if range is not None:\n first_edge, last_edge = range\n if first_edge > last_edge:\n raise ValueError(\n 'max must be larger than min in range parameter.')\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n f"supplied range of [{first_edge}, {last_edge}] is not finite")\n elif a.size == 0:\n # handle empty arrays. Can't determine range, so use 0-1.\n first_edge, last_edge = 0, 1\n else:\n first_edge, last_edge = a.min(), a.max()\n if not (np.isfinite(first_edge) and np.isfinite(last_edge)):\n raise ValueError(\n f"autodetected range of [{first_edge}, {last_edge}] is not finite")\n\n # expand empty range to avoid divide by zero\n if first_edge == last_edge:\n first_edge = first_edge - 0.5\n last_edge = last_edge + 0.5\n\n return first_edge, last_edge\n\n\ndef _unsigned_subtract(a, b):\n """\n Subtract two values where a >= b, and produce an unsigned result\n\n This is needed when finding the difference between the upper and lower\n bound of an int16 histogram\n """\n # coerce to a single type\n signed_to_unsigned = {\n np.byte: np.ubyte,\n np.short: np.ushort,\n np.intc: np.uintc,\n np.int_: np.uint,\n np.longlong: np.ulonglong\n }\n dt = np.result_type(a, b)\n try:\n unsigned_dt = signed_to_unsigned[dt.type]\n except KeyError:\n return np.subtract(a, b, dtype=dt)\n else:\n # we know the inputs are integers, and we are deliberately casting\n # signed to unsigned. The input may be negative python integers so\n # ensure we pass in arrays with the initial dtype (related to NEP 50).\n return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt),\n casting='unsafe', dtype=unsigned_dt)\n\n\ndef _get_bin_edges(a, bins, range, weights):\n """\n Computes the bins used internally by `histogram`.\n\n Parameters\n ==========\n a : ndarray\n Ravelled data array\n bins, range\n Forwarded arguments from `histogram`.\n weights : ndarray, optional\n Ravelled weights array, or None\n\n Returns\n =======\n bin_edges : ndarray\n Array of bin edges\n uniform_bins : (Number, Number, int):\n The upper bound, lowerbound, and number of bins, used in the optimized\n implementation of `histogram` that works on uniform bins.\n """\n # parse the overloaded bins argument\n n_equal_bins = None\n bin_edges = None\n\n if isinstance(bins, str):\n bin_name = bins\n # if `bins` is a string for an automatic method,\n # this will replace it with the number of bins calculated\n if bin_name not in _hist_bin_selectors:\n raise ValueError(\n f"{bin_name!r} is not a valid estimator for `bins`")\n if weights is not None:\n raise TypeError("Automated estimation of the number of "\n "bins is not supported for weighted data")\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n # truncate the range if needed\n if range is not None:\n keep = (a >= first_edge)\n keep &= (a <= last_edge)\n if not np.logical_and.reduce(keep):\n a = a[keep]\n\n if a.size == 0:\n n_equal_bins = 1\n else:\n # Do not call selectors on empty arrays\n width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))\n if width:\n if np.issubdtype(a.dtype, np.integer) and width < 1:\n width = 1\n delta = _unsigned_subtract(last_edge, first_edge)\n n_equal_bins = int(np.ceil(delta / width))\n else:\n # Width can be zero for some estimators, e.g. FD when\n # the IQR of the data is zero.\n n_equal_bins = 1\n\n elif np.ndim(bins) == 0:\n try:\n n_equal_bins = operator.index(bins)\n except TypeError as e:\n raise TypeError(\n '`bins` must be an integer, a string, or an array') from e\n if n_equal_bins < 1:\n raise ValueError('`bins` must be positive, when an integer')\n\n first_edge, last_edge = _get_outer_edges(a, range)\n\n elif np.ndim(bins) == 1:\n bin_edges = np.asarray(bins)\n if np.any(bin_edges[:-1] > bin_edges[1:]):\n raise ValueError(\n '`bins` must increase monotonically, when an array')\n\n else:\n raise ValueError('`bins` must be 1d, when an array')\n\n if n_equal_bins is not None:\n # gh-10322 means that type resolution rules are dependent on array\n # shapes. To avoid this causing problems, we pick a type now and stick\n # with it throughout.\n bin_type = np.result_type(first_edge, last_edge, a)\n if np.issubdtype(bin_type, np.integer):\n bin_type = np.result_type(bin_type, float)\n\n # bin edges must be computed\n bin_edges = np.linspace(\n first_edge, last_edge, n_equal_bins + 1,\n endpoint=True, dtype=bin_type)\n if np.any(bin_edges[:-1] >= bin_edges[1:]):\n raise ValueError(\n f'Too many bins for data range. Cannot create {n_equal_bins} '\n f'finite-sized bins.')\n return bin_edges, (first_edge, last_edge, n_equal_bins)\n else:\n return bin_edges, None\n\n\ndef _search_sorted_inclusive(a, v):\n """\n Like `searchsorted`, but where the last item in `v` is placed on the right.\n\n In the context of a histogram, this makes the last bin edge inclusive\n """\n return np.concatenate((\n a.searchsorted(v[:-1], 'left'),\n a.searchsorted(v[-1:], 'right')\n ))\n\n\ndef _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_bin_edges_dispatcher)\ndef histogram_bin_edges(a, bins=10, range=None, weights=None):\n r"""\n Function to calculate only the edges of the bins used by the `histogram`\n function.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines the bin edges, including the rightmost\n edge, allowing for non-uniform bin widths.\n\n If `bins` is a string from the list below, `histogram_bin_edges` will\n use the method chosen to calculate the optimal bin width and\n consequently the number of bins (see the Notes section for more detail\n on the estimators) from the data that falls within the requested range.\n While the bin width will be optimal for the actual data\n in the range, the number of bins will be computed to fill the\n entire range, including the empty portions. For visualisation,\n using the 'auto' option is suggested. Weighted data is not\n supported for automated bin size selection.\n\n 'auto'\n Minimum bin width between the 'sturges' and 'fd' estimators.\n Provides good all-around performance.\n\n 'fd' (Freedman Diaconis Estimator)\n Robust (resilient to outliers) estimator that takes into\n account data variability and data size.\n\n 'doane'\n An improved version of Sturges' estimator that works better\n with non-normal datasets.\n\n 'scott'\n Less robust estimator that takes into account data variability\n and data size.\n\n 'stone'\n Estimator based on leave-one-out cross-validation estimate of\n the integrated squared error. Can be regarded as a generalization\n of Scott's rule.\n\n 'rice'\n Estimator does not take variability into account, only data\n size. Commonly overestimates number of bins required.\n\n 'sturges'\n R's default method, only accounts for data size. Only\n optimal for gaussian data and underestimates number of bins\n for large non-gaussian datasets.\n\n 'sqrt'\n Square root (of data size) estimator, used by Excel and\n other programs for its speed and simplicity.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). This is currently not used by any of the bin estimators,\n but may be in the future.\n\n Returns\n -------\n bin_edges : array of dtype float\n The edges to pass into `histogram`\n\n See Also\n --------\n histogram\n\n Notes\n -----\n The methods to estimate the optimal number of bins are well founded\n in literature, and are inspired by the choices R provides for\n histogram visualisation. Note that having the number of bins\n proportional to :math:`n^{1/3}` is asymptotically optimal, which is\n why it appears in most estimators. These are simply plug-in methods\n that give good starting points for number of bins. In the equations\n below, :math:`h` is the binwidth and :math:`n_h` is the number of\n bins. All estimators that compute bin counts are recast to bin width\n using the `ptp` of the data. The final bin count is obtained from\n ``np.round(np.ceil(range / h))``. The final bin width is often less\n than what is returned by the estimators below.\n\n 'auto' (minimum bin width of the 'sturges' and 'fd' estimators)\n A compromise to get a good value. For small datasets the Sturges\n value will usually be chosen, while larger datasets will usually\n default to FD. Avoids the overly conservative behaviour of FD\n and Sturges for small and large datasets respectively.\n Switchover point is usually :math:`a.size \approx 1000`.\n\n 'fd' (Freedman Diaconis Estimator)\n .. math:: h = 2 \frac{IQR}{n^{1/3}}\n\n The binwidth is proportional to the interquartile range (IQR)\n and inversely proportional to cube root of a.size. Can be too\n conservative for small datasets, but is quite good for large\n datasets. The IQR is very robust to outliers.\n\n 'scott'\n .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}\n\n The binwidth is proportional to the standard deviation of the\n data and inversely proportional to cube root of ``x.size``. Can\n be too conservative for small datasets, but is quite good for\n large datasets. The standard deviation is not very robust to\n outliers. Values are very similar to the Freedman-Diaconis\n estimator in the absence of outliers.\n\n 'rice'\n .. math:: n_h = 2n^{1/3}\n\n The number of bins is only proportional to cube root of\n ``a.size``. It tends to overestimate the number of bins and it\n does not take into account data variability.\n\n 'sturges'\n .. math:: n_h = \log _{2}(n) + 1\n\n The number of bins is the base 2 log of ``a.size``. This\n estimator assumes normality of data and is too conservative for\n larger, non-normal datasets. This is the default method in R's\n ``hist`` method.\n\n 'doane'\n .. math:: n_h = 1 + \log_{2}(n) +\n \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)\n\n g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]\n\n \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}\n\n An improved version of Sturges' formula that produces better\n estimates for non-normal datasets. This estimator attempts to\n account for the skew of the data.\n\n 'sqrt'\n .. math:: n_h = \sqrt n\n\n The simplest and fastest estimator. Only takes into account the\n data size.\n\n Additionally, if the data is of integer dtype, then the binwidth will never\n be less than 1.\n\n Examples\n --------\n >>> import numpy as np\n >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])\n >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))\n array([0. , 0.25, 0.5 , 0.75, 1. ])\n >>> np.histogram_bin_edges(arr, bins=2)\n array([0. , 2.5, 5. ])\n\n For consistency with histogram, an array of pre-computed bins is\n passed through unmodified:\n\n >>> np.histogram_bin_edges(arr, [1, 2])\n array([1, 2])\n\n This function allows one set of bins to be computed, and reused across\n multiple histograms:\n\n >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')\n >>> shared_bins\n array([0., 1., 2., 3., 4., 5.])\n\n >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])\n >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)\n >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)\n\n >>> hist_0; hist_1\n array([1, 1, 0, 1, 0])\n array([2, 0, 1, 1, 2])\n\n Which gives more easily comparable results than using separate bins for\n each histogram:\n\n >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')\n >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')\n >>> hist_0; hist_1\n array([1, 1, 1])\n array([2, 1, 1, 2])\n >>> bins_0; bins_1\n array([0., 1., 2., 3.])\n array([0. , 1.25, 2.5 , 3.75, 5. ])\n\n """\n a, weights = _ravel_and_check_weights(a, weights)\n bin_edges, _ = _get_bin_edges(a, bins, range, weights)\n return bin_edges\n\n\ndef _histogram_dispatcher(\n a, bins=None, range=None, density=None, weights=None):\n return (a, bins, weights)\n\n\n@array_function_dispatch(_histogram_dispatcher)\ndef histogram(a, bins=10, range=None, density=None, weights=None):\n r"""\n Compute the histogram of a dataset.\n\n Parameters\n ----------\n a : array_like\n Input data. The histogram is computed over the flattened array.\n bins : int or sequence of scalars or str, optional\n If `bins` is an int, it defines the number of equal-width\n bins in the given range (10, by default). If `bins` is a\n sequence, it defines a monotonically increasing array of bin edges,\n including the rightmost edge, allowing for non-uniform bin widths.\n\n If `bins` is a string, it defines the method used to calculate the\n optimal bin width, as defined by `histogram_bin_edges`.\n\n range : (float, float), optional\n The lower and upper range of the bins. If not provided, range\n is simply ``(a.min(), a.max())``. Values outside the range are\n ignored. The first element of the range must be less than or\n equal to the second. `range` affects the automatic bin\n computation as well. While bin width is computed to be optimal\n based on the actual data within `range`, the bin count will fill\n the entire range including portions containing no data.\n weights : array_like, optional\n An array of weights, of the same shape as `a`. Each value in\n `a` only contributes its associated weight towards the bin count\n (instead of 1). If `density` is True, the weights are\n normalized, so that the integral of the density over the range\n remains 1.\n Please note that the ``dtype`` of `weights` will also become the\n ``dtype`` of the returned accumulator (`hist`), so it must be\n large enough to hold accumulated values as well.\n density : bool, optional\n If ``False``, the result will contain the number of samples in\n each bin. If ``True``, the result is the value of the\n probability *density* function at the bin, normalized such that\n the *integral* over the range is 1. Note that the sum of the\n histogram values will not be equal to 1 unless bins of unity\n width are chosen; it is not a probability *mass* function.\n\n Returns\n -------\n hist : array\n The values of the histogram. See `density` and `weights` for a\n description of the possible semantics. If `weights` are given,\n ``hist.dtype`` will be taken from `weights`.\n bin_edges : array of dtype float\n Return the bin edges ``(length(hist)+1)``.\n\n\n See Also\n --------\n histogramdd, bincount, searchsorted, digitize, histogram_bin_edges\n\n Notes\n -----\n All but the last (righthand-most) bin is half-open. In other words,\n if `bins` is::\n\n [1, 2, 3, 4]\n\n then the first bin is ``[1, 2)`` (including 1, but excluding 2) and\n the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which\n *includes* 4.\n\n\n Examples\n --------\n >>> import numpy as np\n >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])\n (array([0, 2, 1]), array([0, 1, 2, 3]))\n >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)\n (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))\n >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])\n (array([1, 4, 1]), array([0, 1, 2, 3]))\n\n >>> a = np.arange(5)\n >>> hist, bin_edges = np.histogram(a, density=True)\n >>> hist\n array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])\n >>> hist.sum()\n 2.4999999999999996\n >>> np.sum(hist * np.diff(bin_edges))\n 1.0\n\n Automated Bin Selection Methods example, using 2 peak random data\n with 2000 points.\n\n .. plot::\n :include-source:\n\n import matplotlib.pyplot as plt\n import numpy as np\n\n rng = np.random.RandomState(10) # deterministic random data\n a = np.hstack((rng.normal(size=1000),\n rng.normal(loc=5, scale=2, size=1000)))\n plt.hist(a, bins='auto') # arguments are passed to np.histogram\n plt.title("Histogram with 'auto' bins")\n plt.show()\n\n """\n a, weights = _ravel_and_check_weights(a, weights)\n\n bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)\n\n # Histogram is an integer or a float array depending on the weights.\n if weights is None:\n ntype = np.dtype(np.intp)\n else:\n ntype = weights.dtype\n\n # We set a block size, as this allows us to iterate over chunks when\n # computing histograms, to minimize memory usage.\n BLOCK = 65536\n\n # The fast path uses bincount, but that only works for certain types\n # of weight\n simple_weights = (\n weights is None or\n np.can_cast(weights.dtype, np.double) or\n np.can_cast(weights.dtype, complex)\n )\n\n if uniform_bins is not None and simple_weights:\n # Fast algorithm for equal bins\n # We now convert values of a to bin indices, under the assumption of\n # equal bin widths (which is valid here).\n first_edge, last_edge, n_equal_bins = uniform_bins\n\n # Initialize empty histogram\n n = np.zeros(n_equal_bins, ntype)\n\n # Pre-compute histogram scaling factor\n norm_numerator = n_equal_bins\n norm_denom = _unsigned_subtract(last_edge, first_edge)\n\n # We iterate over blocks here for two reasons: the first is that for\n # large arrays, it is actually faster (for example for a 10^8 array it\n # is 2x as fast) and it results in a memory footprint 3x lower in the\n # limit of large arrays.\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i + BLOCK]\n if weights is None:\n tmp_w = None\n else:\n tmp_w = weights[i:i + BLOCK]\n\n # Only include values in the right range\n keep = (tmp_a >= first_edge)\n keep &= (tmp_a <= last_edge)\n if not np.logical_and.reduce(keep):\n tmp_a = tmp_a[keep]\n if tmp_w is not None:\n tmp_w = tmp_w[keep]\n\n # This cast ensures no type promotions occur below, which gh-10322\n # make unpredictable. Getting it wrong leads to precision errors\n # like gh-8123.\n tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)\n\n # Compute the bin indices, and for values that lie exactly on\n # last_edge we need to subtract one\n f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)\n * norm_numerator)\n indices = f_indices.astype(np.intp)\n indices[indices == n_equal_bins] -= 1\n\n # The index computation is not guaranteed to give exactly\n # consistent results within ~1 ULP of the bin edges.\n decrement = tmp_a < bin_edges[indices]\n indices[decrement] -= 1\n # The last bin includes the right edge. The other bins do not.\n increment = ((tmp_a >= bin_edges[indices + 1])\n & (indices != n_equal_bins - 1))\n indices[increment] += 1\n\n # We now compute the histogram using bincount\n if ntype.kind == 'c':\n n.real += np.bincount(indices, weights=tmp_w.real,\n minlength=n_equal_bins)\n n.imag += np.bincount(indices, weights=tmp_w.imag,\n minlength=n_equal_bins)\n else:\n n += np.bincount(indices, weights=tmp_w,\n minlength=n_equal_bins).astype(ntype)\n else:\n # Compute via cumulative histogram\n cum_n = np.zeros(bin_edges.shape, ntype)\n if weights is None:\n for i in _range(0, len(a), BLOCK):\n sa = np.sort(a[i:i + BLOCK])\n cum_n += _search_sorted_inclusive(sa, bin_edges)\n else:\n zero = np.zeros(1, dtype=ntype)\n for i in _range(0, len(a), BLOCK):\n tmp_a = a[i:i + BLOCK]\n tmp_w = weights[i:i + BLOCK]\n sorting_index = np.argsort(tmp_a)\n sa = tmp_a[sorting_index]\n sw = tmp_w[sorting_index]\n cw = np.concatenate((zero, sw.cumsum()))\n bin_index = _search_sorted_inclusive(sa, bin_edges)\n cum_n += cw[bin_index]\n\n n = np.diff(cum_n)\n\n if density:\n db = np.array(np.diff(bin_edges), float)\n return n / db / n.sum(), bin_edges\n\n return n, bin_edges\n\n\ndef _histogramdd_dispatcher(sample, bins=None, range=None, density=None,\n weights=None):\n if hasattr(sample, 'shape'): # same condition as used in histogramdd\n yield sample\n else:\n yield from sample\n with contextlib.suppress(TypeError):\n yield from bins\n yield weights\n\n\n@array_function_dispatch(_histogramdd_dispatcher)\ndef histogramdd(sample, bins=10, range=None, density=None, weights=None):\n """\n Compute the multidimensional histogram of some data.\n\n Parameters\n ----------\n sample : (N, D) array, or (N, D) array_like\n The data to be histogrammed.\n\n Note the unusual interpretation of sample when an array_like:\n\n * When an array, each row is a coordinate in a D-dimensional space -\n such as ``histogramdd(np.array([p1, p2, p3]))``.\n * When an array_like, each element is the list of values for single\n coordinate - such as ``histogramdd((X, Y, Z))``.\n\n The first form should be preferred.\n\n bins : sequence or int, optional\n The bin specification:\n\n * A sequence of arrays describing the monotonically increasing bin\n edges along each dimension.\n * The number of bins for each dimension (nx, ny, ... =bins)\n * The number of bins for all dimensions (nx=ny=...=bins).\n\n range : sequence, optional\n A sequence of length D, each an optional (lower, upper) tuple giving\n the outer bin edges to be used if the edges are not given explicitly in\n `bins`.\n An entry of None in the sequence results in the minimum and maximum\n values being used for the corresponding dimension.\n The default, None, is equivalent to passing a tuple of D None values.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_volume``.\n weights : (N,) array_like, optional\n An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.\n Weights are normalized to 1 if density is True. If density is False,\n the values of the returned histogram are equal to the sum of the\n weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray\n The multidimensional histogram of sample x. See density and weights\n for the different possible semantics.\n edges : tuple of ndarrays\n A tuple of D arrays describing the bin edges for each dimension.\n\n See Also\n --------\n histogram: 1-D histogram\n histogram2d: 2-D histogram\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> r = rng.normal(size=(100,3))\n >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))\n >>> H.shape, edges[0].size, edges[1].size, edges[2].size\n ((5, 8, 4), 6, 9, 5)\n\n """\n\n try:\n # Sample is an ND-array.\n N, D = sample.shape\n except (AttributeError, ValueError):\n # Sample is a sequence of 1D arrays.\n sample = np.atleast_2d(sample).T\n N, D = sample.shape\n\n nbin = np.empty(D, np.intp)\n edges = D * [None]\n dedges = D * [None]\n if weights is not None:\n weights = np.asarray(weights)\n\n try:\n M = len(bins)\n if M != D:\n raise ValueError(\n 'The dimension of bins must be equal to the dimension of the '\n 'sample x.')\n except TypeError:\n # bins is an integer\n bins = D * [bins]\n\n # normalize the range argument\n if range is None:\n range = (None,) * D\n elif len(range) != D:\n raise ValueError('range argument must have one entry per dimension')\n\n # Create edge arrays\n for i in _range(D):\n if np.ndim(bins[i]) == 0:\n if bins[i] < 1:\n raise ValueError(\n f'`bins[{i}]` must be positive, when an integer')\n smin, smax = _get_outer_edges(sample[:, i], range[i])\n try:\n n = operator.index(bins[i])\n\n except TypeError as e:\n raise TypeError(\n f"`bins[{i}]` must be an integer, when a scalar"\n ) from e\n\n edges[i] = np.linspace(smin, smax, n + 1)\n elif np.ndim(bins[i]) == 1:\n edges[i] = np.asarray(bins[i])\n if np.any(edges[i][:-1] > edges[i][1:]):\n raise ValueError(\n f'`bins[{i}]` must be monotonically increasing, when an array')\n else:\n raise ValueError(\n f'`bins[{i}]` must be a scalar or 1d array')\n\n nbin[i] = len(edges[i]) + 1 # includes an outlier on each end\n dedges[i] = np.diff(edges[i])\n\n # Compute the bin number each sample falls into.\n Ncount = tuple(\n # avoid np.digitize to work around gh-11022\n np.searchsorted(edges[i], sample[:, i], side='right')\n for i in _range(D)\n )\n\n # Using digitize, values that fall on an edge are put in the right bin.\n # For the rightmost bin, we want values equal to the right edge to be\n # counted in the last bin, and not as an outlier.\n for i in _range(D):\n # Find which points are on the rightmost edge.\n on_edge = (sample[:, i] == edges[i][-1])\n # Shift these points one bin to the left.\n Ncount[i][on_edge] -= 1\n\n # Compute the sample indices in the flattened histogram matrix.\n # This raises an error if the array is too large.\n xy = np.ravel_multi_index(Ncount, nbin)\n\n # Compute the number of repetitions in xy and assign it to the\n # flattened histmat.\n hist = np.bincount(xy, weights, minlength=nbin.prod())\n\n # Shape into a proper matrix\n hist = hist.reshape(nbin)\n\n # This preserves the (bad) behavior observed in gh-7845, for now.\n hist = hist.astype(float, casting='safe')\n\n # Remove outliers (indices 0 and -1 for each dimension).\n core = D * (slice(1, -1),)\n hist = hist[core]\n\n if density:\n # calculate the probability density function\n s = hist.sum()\n for i in _range(D):\n shape = np.ones(D, int)\n shape[i] = nbin[i] - 2\n hist = hist / dedges[i].reshape(shape)\n hist /= s\n\n if (hist.shape != nbin - 2).any():\n raise RuntimeError(\n "Internal Shape Error")\n return hist, edges\n | .venv\Lib\site-packages\numpy\lib\_histograms_impl.py | _histograms_impl.py | Python | 39,517 | 0.95 | 0.142857 | 0.086809 | python-kit | 461 | 2025-06-12T08:00:49.292759 | BSD-3-Clause | false | 5d3fa42522bed5a1473ac265e19380d6 |
from collections.abc import Sequence\nfrom typing import (\n Any,\n SupportsIndex,\n TypeAlias,\n)\nfrom typing import (\n Literal as L,\n)\n\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n)\n\n__all__ = ["histogram", "histogramdd", "histogram_bin_edges"]\n\n_BinKind: TypeAlias = L[\n "stone",\n "auto",\n "doane",\n "fd",\n "rice",\n "scott",\n "sqrt",\n "sturges",\n]\n\ndef histogram_bin_edges(\n a: ArrayLike,\n bins: _BinKind | SupportsIndex | ArrayLike = ...,\n range: tuple[float, float] | None = ...,\n weights: ArrayLike | None = ...,\n) -> NDArray[Any]: ...\n\ndef histogram(\n a: ArrayLike,\n bins: _BinKind | SupportsIndex | ArrayLike = ...,\n range: tuple[float, float] | None = ...,\n density: bool = ...,\n weights: ArrayLike | None = ...,\n) -> tuple[NDArray[Any], NDArray[Any]]: ...\n\ndef histogramdd(\n sample: ArrayLike,\n bins: SupportsIndex | ArrayLike = ...,\n range: Sequence[tuple[float, float]] = ...,\n density: bool | None = ...,\n weights: ArrayLike | None = ...,\n) -> tuple[NDArray[Any], tuple[NDArray[Any], ...]]: ...\n | .venv\Lib\site-packages\numpy\lib\_histograms_impl.pyi | _histograms_impl.pyi | Other | 1,143 | 0.85 | 0.06 | 0 | python-kit | 666 | 2024-01-14T06:09:32.332255 | BSD-3-Clause | false | 9331bdb106187219ebc3893533291908 |
import functools\nimport math\nimport sys\nimport warnings\n\nimport numpy as np\nimport numpy._core.numeric as _nx\nimport numpy.matrixlib as matrixlib\nfrom numpy._core import linspace, overrides\nfrom numpy._core.multiarray import ravel_multi_index, unravel_index\nfrom numpy._core.numeric import ScalarType, array\nfrom numpy._core.numerictypes import issubdtype\nfrom numpy._utils import set_module\nfrom numpy.lib._function_base_impl import diff\nfrom numpy.lib.stride_tricks import as_strided\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',\n 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',\n 'diag_indices', 'diag_indices_from'\n]\n\n\ndef _ix__dispatcher(*args):\n return args\n\n\n@array_function_dispatch(_ix__dispatcher)\ndef ix_(*args):\n """\n Construct an open mesh from multiple sequences.\n\n This function takes N 1-D sequences and returns N outputs with N\n dimensions each, such that the shape is 1 in all but one dimension\n and the dimension with the non-unit shape value cycles through all\n N dimensions.\n\n Using `ix_` one can quickly construct index arrays that will index\n the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array\n ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.\n\n Parameters\n ----------\n args : 1-D sequences\n Each sequence should be of integer or boolean type.\n Boolean sequences will be interpreted as boolean masks for the\n corresponding dimension (equivalent to passing in\n ``np.nonzero(boolean_sequence)``).\n\n Returns\n -------\n out : tuple of ndarrays\n N arrays with N dimensions each, with N the number of input\n sequences. Together these arrays form an open mesh.\n\n See Also\n --------\n ogrid, mgrid, meshgrid\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(10).reshape(2, 5)\n >>> a\n array([[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9]])\n >>> ixgrid = np.ix_([0, 1], [2, 4])\n >>> ixgrid\n (array([[0],\n [1]]), array([[2, 4]]))\n >>> ixgrid[0].shape, ixgrid[1].shape\n ((2, 1), (1, 2))\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n\n >>> ixgrid = np.ix_([True, True], [2, 4])\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])\n >>> a[ixgrid]\n array([[2, 4],\n [7, 9]])\n\n """\n out = []\n nd = len(args)\n for k, new in enumerate(args):\n if not isinstance(new, _nx.ndarray):\n new = np.asarray(new)\n if new.size == 0:\n # Explicitly type empty arrays to avoid float default\n new = new.astype(_nx.intp)\n if new.ndim != 1:\n raise ValueError("Cross index must be 1 dimensional")\n if issubdtype(new.dtype, _nx.bool):\n new, = new.nonzero()\n new = new.reshape((1,) * k + (new.size,) + (1,) * (nd - k - 1))\n out.append(new)\n return tuple(out)\n\n\nclass nd_grid:\n """\n Construct a multi-dimensional "meshgrid".\n\n ``grid = nd_grid()`` creates an instance which will return a mesh-grid\n when indexed. The dimension and number of the output arrays are equal\n to the number of indexing dimensions. If the step length is not a\n complex number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then the\n integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n If instantiated with an argument of ``sparse=True``, the mesh-grid is\n open (or not fleshed out) so that only one-dimension of each returned\n argument is greater than 1.\n\n Parameters\n ----------\n sparse : bool, optional\n Whether the grid is sparse or not. Default is False.\n\n Notes\n -----\n Two instances of `nd_grid` are made available in the NumPy namespace,\n `mgrid` and `ogrid`, approximately defined as::\n\n mgrid = nd_grid(sparse=False)\n ogrid = nd_grid(sparse=True)\n\n Users should use these pre-defined instances instead of using `nd_grid`\n directly.\n """\n __slots__ = ('sparse',)\n\n def __init__(self, sparse=False):\n self.sparse = sparse\n\n def __getitem__(self, key):\n try:\n size = []\n # Mimic the behavior of `np.arange` and use a data type\n # which is at least as large as `np.int_`\n num_list = [0]\n for k in range(len(key)):\n step = key[k].step\n start = key[k].start\n stop = key[k].stop\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n step = abs(step)\n size.append(int(step))\n else:\n size.append(\n math.ceil((stop - start) / step))\n num_list += [start, stop, step]\n typ = _nx.result_type(*num_list)\n if self.sparse:\n nn = [_nx.arange(_x, dtype=_t)\n for _x, _t in zip(size, (typ,) * len(size))]\n else:\n nn = _nx.indices(size, typ)\n for k, kk in enumerate(key):\n step = kk.step\n start = kk.start\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n step = int(abs(step))\n if step != 1:\n step = (kk.stop - start) / float(step - 1)\n nn[k] = (nn[k] * step + start)\n if self.sparse:\n slobj = [_nx.newaxis] * len(size)\n for k in range(len(size)):\n slobj[k] = slice(None, None)\n nn[k] = nn[k][tuple(slobj)]\n slobj[k] = _nx.newaxis\n return tuple(nn) # ogrid -> tuple of arrays\n return nn # mgrid -> ndarray\n except (IndexError, TypeError):\n step = key.step\n stop = key.stop\n start = key.start\n if start is None:\n start = 0\n if isinstance(step, (_nx.complexfloating, complex)):\n # Prevent the (potential) creation of integer arrays\n step_float = abs(step)\n step = length = int(step_float)\n if step != 1:\n step = (key.stop - start) / float(step - 1)\n typ = _nx.result_type(start, stop, step_float)\n return _nx.arange(0, length, 1, dtype=typ) * step + start\n else:\n return _nx.arange(start, stop, step)\n\n\nclass MGridClass(nd_grid):\n """\n An instance which returns a dense multi-dimensional "meshgrid".\n\n An instance which returns a dense (or fleshed out) mesh-grid\n when indexed, so that each returned argument has the same shape.\n The dimensions and number of the output arrays are equal to the\n number of indexing dimensions. If the step length is not a complex\n number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then\n the integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n Returns\n -------\n mesh-grid : ndarray\n A single array, containing a set of `ndarray`\\ s all of the same\n dimensions. stacked along the first axis.\n\n See Also\n --------\n ogrid : like `mgrid` but returns open (not fleshed out) mesh grids\n meshgrid: return coordinate matrices from coordinate vectors\n r_ : array concatenator\n :ref:`how-to-partition`\n\n Examples\n --------\n >>> import numpy as np\n >>> np.mgrid[0:5, 0:5]\n array([[[0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1],\n [2, 2, 2, 2, 2],\n [3, 3, 3, 3, 3],\n [4, 4, 4, 4, 4]],\n [[0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4],\n [0, 1, 2, 3, 4]]])\n >>> np.mgrid[-1:1:5j]\n array([-1. , -0.5, 0. , 0.5, 1. ])\n\n >>> np.mgrid[0:4].shape\n (4,)\n >>> np.mgrid[0:4, 0:5].shape\n (2, 4, 5)\n >>> np.mgrid[0:4, 0:5, 0:6].shape\n (3, 4, 5, 6)\n\n """\n __slots__ = ()\n\n def __init__(self):\n super().__init__(sparse=False)\n\n\nmgrid = MGridClass()\n\n\nclass OGridClass(nd_grid):\n """\n An instance which returns an open multi-dimensional "meshgrid".\n\n An instance which returns an open (i.e. not fleshed out) mesh-grid\n when indexed, so that only one dimension of each returned array is\n greater than 1. The dimension and number of the output arrays are\n equal to the number of indexing dimensions. If the step length is\n not a complex number, then the stop is not inclusive.\n\n However, if the step length is a **complex number** (e.g. 5j), then\n the integer part of its magnitude is interpreted as specifying the\n number of points to create between the start and stop values, where\n the stop value **is inclusive**.\n\n Returns\n -------\n mesh-grid : ndarray or tuple of ndarrays\n If the input is a single slice, returns an array.\n If the input is multiple slices, returns a tuple of arrays, with\n only one dimension not equal to 1.\n\n See Also\n --------\n mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids\n meshgrid: return coordinate matrices from coordinate vectors\n r_ : array concatenator\n :ref:`how-to-partition`\n\n Examples\n --------\n >>> from numpy import ogrid\n >>> ogrid[-1:1:5j]\n array([-1. , -0.5, 0. , 0.5, 1. ])\n >>> ogrid[0:5, 0:5]\n (array([[0],\n [1],\n [2],\n [3],\n [4]]),\n array([[0, 1, 2, 3, 4]]))\n\n """\n __slots__ = ()\n\n def __init__(self):\n super().__init__(sparse=True)\n\n\nogrid = OGridClass()\n\n\nclass AxisConcatenator:\n """\n Translates slice objects to concatenation along an axis.\n\n For detailed documentation on usage, see `r_`.\n """\n __slots__ = ('axis', 'matrix', 'ndmin', 'trans1d')\n\n # allow ma.mr_ to override this\n concatenate = staticmethod(_nx.concatenate)\n makemat = staticmethod(matrixlib.matrix)\n\n def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):\n self.axis = axis\n self.matrix = matrix\n self.trans1d = trans1d\n self.ndmin = ndmin\n\n def __getitem__(self, key):\n # handle matrix builder syntax\n if isinstance(key, str):\n frame = sys._getframe().f_back\n mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)\n return mymat\n\n if not isinstance(key, tuple):\n key = (key,)\n\n # copy attributes, since they can be overridden in the first argument\n trans1d = self.trans1d\n ndmin = self.ndmin\n matrix = self.matrix\n axis = self.axis\n\n objs = []\n # dtypes or scalars for weak scalar handling in result_type\n result_type_objs = []\n\n for k, item in enumerate(key):\n scalar = False\n if isinstance(item, slice):\n step = item.step\n start = item.start\n stop = item.stop\n if start is None:\n start = 0\n if step is None:\n step = 1\n if isinstance(step, (_nx.complexfloating, complex)):\n size = int(abs(step))\n newobj = linspace(start, stop, num=size)\n else:\n newobj = _nx.arange(start, stop, step)\n if ndmin > 1:\n newobj = array(newobj, copy=None, ndmin=ndmin)\n if trans1d != -1:\n newobj = newobj.swapaxes(-1, trans1d)\n elif isinstance(item, str):\n if k != 0:\n raise ValueError("special directives must be the "\n "first entry.")\n if item in ('r', 'c'):\n matrix = True\n col = (item == 'c')\n continue\n if ',' in item:\n vec = item.split(',')\n try:\n axis, ndmin = [int(x) for x in vec[:2]]\n if len(vec) == 3:\n trans1d = int(vec[2])\n continue\n except Exception as e:\n raise ValueError(\n f"unknown special directive {item!r}"\n ) from e\n try:\n axis = int(item)\n continue\n except (ValueError, TypeError) as e:\n raise ValueError("unknown special directive") from e\n elif type(item) in ScalarType:\n scalar = True\n newobj = item\n else:\n item_ndim = np.ndim(item)\n newobj = array(item, copy=None, subok=True, ndmin=ndmin)\n if trans1d != -1 and item_ndim < ndmin:\n k2 = ndmin - item_ndim\n k1 = trans1d\n if k1 < 0:\n k1 += k2 + 1\n defaxes = list(range(ndmin))\n axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]\n newobj = newobj.transpose(axes)\n\n objs.append(newobj)\n if scalar:\n result_type_objs.append(item)\n else:\n result_type_objs.append(newobj.dtype)\n\n # Ensure that scalars won't up-cast unless warranted, for 0, drops\n # through to error in concatenate.\n if len(result_type_objs) != 0:\n final_dtype = _nx.result_type(*result_type_objs)\n # concatenate could do cast, but that can be overridden:\n objs = [array(obj, copy=None, subok=True,\n ndmin=ndmin, dtype=final_dtype) for obj in objs]\n\n res = self.concatenate(tuple(objs), axis=axis)\n\n if matrix:\n oldndim = res.ndim\n res = self.makemat(res)\n if oldndim == 1 and col:\n res = res.T\n return res\n\n def __len__(self):\n return 0\n\n# separate classes are used here instead of just making r_ = concatenator(0),\n# etc. because otherwise we couldn't get the doc string to come out right\n# in help(r_)\n\n\nclass RClass(AxisConcatenator):\n """\n Translates slice objects to concatenation along the first axis.\n\n This is a simple way to build up arrays quickly. There are two use cases.\n\n 1. If the index expression contains comma separated arrays, then stack\n them along their first axis.\n 2. If the index expression contains slice notation or scalars then create\n a 1-D array with a range indicated by the slice notation.\n\n If slice notation is used, the syntax ``start:stop:step`` is equivalent\n to ``np.arange(start, stop, step)`` inside of the brackets. However, if\n ``step`` is an imaginary number (i.e. 100j) then its integer portion is\n interpreted as a number-of-points desired and the start and stop are\n inclusive. In other words ``start:stop:stepj`` is interpreted as\n ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.\n After expansion of slice notation, all comma separated sequences are\n concatenated together.\n\n Optional character strings placed as the first element of the index\n expression can be used to change the output. The strings 'r' or 'c' result\n in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)\n matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1\n (column) matrix is produced. If the result is 2-D then both provide the\n same matrix result.\n\n A string integer specifies which axis to stack multiple comma separated\n arrays along. A string of two comma-separated integers allows indication\n of the minimum number of dimensions to force each entry into as the\n second integer (the axis to concatenate along is still the first integer).\n\n A string with three comma-separated integers allows specification of the\n axis to concatenate along, the minimum number of dimensions to force the\n entries to, and which axis should contain the start of the arrays which\n are less than the specified number of dimensions. In other words the third\n integer allows you to specify where the 1's should be placed in the shape\n of the arrays that have their shapes upgraded. By default, they are placed\n in the front of the shape tuple. The third argument allows you to specify\n where the start of the array should be instead. Thus, a third argument of\n '0' would place the 1's at the end of the array shape. Negative integers\n specify where in the new shape tuple the last dimension of upgraded arrays\n should be placed, so the default is '-1'.\n\n Parameters\n ----------\n Not a function, so takes no parameters\n\n\n Returns\n -------\n A concatenated ndarray or matrix.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n c_ : Translates slice objects to concatenation along the second axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]\n array([1, 2, 3, ..., 4, 5, 6])\n >>> np.r_[-1:1:6j, [0]*3, 5, 6]\n array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])\n\n String integers specify the axis to concatenate along or the minimum\n number of dimensions to force entries into.\n\n >>> a = np.array([[0, 1, 2], [3, 4, 5]])\n >>> np.r_['-1', a, a] # concatenate along last axis\n array([[0, 1, 2, 0, 1, 2],\n [3, 4, 5, 3, 4, 5]])\n >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> np.r_['0,2,0', [1,2,3], [4,5,6]]\n array([[1],\n [2],\n [3],\n [4],\n [5],\n [6]])\n >>> np.r_['1,2,0', [1,2,3], [4,5,6]]\n array([[1, 4],\n [2, 5],\n [3, 6]])\n\n Using 'r' or 'c' as a first string argument creates a matrix.\n\n >>> np.r_['r',[1,2,3], [4,5,6]]\n matrix([[1, 2, 3, 4, 5, 6]])\n\n """\n __slots__ = ()\n\n def __init__(self):\n AxisConcatenator.__init__(self, 0)\n\n\nr_ = RClass()\n\n\nclass CClass(AxisConcatenator):\n """\n Translates slice objects to concatenation along the second axis.\n\n This is short-hand for ``np.r_['-1,2,0', index expression]``, which is\n useful because of its common occurrence. In particular, arrays will be\n stacked along their last axis after being upgraded to at least 2-D with\n 1's post-pended to the shape (column vectors made out of 1-D arrays).\n\n See Also\n --------\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n r_ : For more detailed documentation.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]\n array([[1, 4],\n [2, 5],\n [3, 6]])\n >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]\n array([[1, 2, 3, ..., 4, 5, 6]])\n\n """\n __slots__ = ()\n\n def __init__(self):\n AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)\n\n\nc_ = CClass()\n\n\n@set_module('numpy')\nclass ndenumerate:\n """\n Multidimensional index iterator.\n\n Return an iterator yielding pairs of array coordinates and values.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n\n See Also\n --------\n ndindex, flatiter\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, 4]])\n >>> for index, x in np.ndenumerate(a):\n ... print(index, x)\n (0, 0) 1\n (0, 1) 2\n (1, 0) 3\n (1, 1) 4\n\n """\n\n def __init__(self, arr):\n self.iter = np.asarray(arr).flat\n\n def __next__(self):\n """\n Standard iterator method, returns the index tuple and array value.\n\n Returns\n -------\n coords : tuple of ints\n The indices of the current iteration.\n val : scalar\n The array element of the current iteration.\n\n """\n return self.iter.coords, next(self.iter)\n\n def __iter__(self):\n return self\n\n\n@set_module('numpy')\nclass ndindex:\n """\n An N-dimensional iterator object to index arrays.\n\n Given the shape of an array, an `ndindex` instance iterates over\n the N-dimensional index of the array. At each iteration a tuple\n of indices is returned, the last dimension is iterated over first.\n\n Parameters\n ----------\n shape : ints, or a single tuple of ints\n The size of each dimension of the array can be passed as\n individual parameters or as the elements of a tuple.\n\n See Also\n --------\n ndenumerate, flatiter\n\n Examples\n --------\n >>> import numpy as np\n\n Dimensions as individual arguments\n\n >>> for index in np.ndindex(3, 2, 1):\n ... print(index)\n (0, 0, 0)\n (0, 1, 0)\n (1, 0, 0)\n (1, 1, 0)\n (2, 0, 0)\n (2, 1, 0)\n\n Same dimensions - but in a tuple ``(3, 2, 1)``\n\n >>> for index in np.ndindex((3, 2, 1)):\n ... print(index)\n (0, 0, 0)\n (0, 1, 0)\n (1, 0, 0)\n (1, 1, 0)\n (2, 0, 0)\n (2, 1, 0)\n\n """\n\n def __init__(self, *shape):\n if len(shape) == 1 and isinstance(shape[0], tuple):\n shape = shape[0]\n x = as_strided(_nx.zeros(1), shape=shape,\n strides=_nx.zeros_like(shape))\n self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],\n order='C')\n\n def __iter__(self):\n return self\n\n def ndincr(self):\n """\n Increment the multi-dimensional index by one.\n\n This method is for backward compatibility only: do not use.\n\n .. deprecated:: 1.20.0\n This method has been advised against since numpy 1.8.0, but only\n started emitting DeprecationWarning as of this version.\n """\n # NumPy 1.20.0, 2020-09-08\n warnings.warn(\n "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",\n DeprecationWarning, stacklevel=2)\n next(self)\n\n def __next__(self):\n """\n Standard iterator method, updates the index and returns the index\n tuple.\n\n Returns\n -------\n val : tuple of ints\n Returns a tuple containing the indices of the current\n iteration.\n\n """\n next(self._it)\n return self._it.multi_index\n\n\n# You can do all this with slice() plus a few special objects,\n# but there's a lot to remember. This version is simpler because\n# it uses the standard array indexing syntax.\n#\n# Written by Konrad Hinsen <hinsen@cnrs-orleans.fr>\n# last revision: 1999-7-23\n#\n# Cosmetic changes by T. Oliphant 2001\n#\n#\n\nclass IndexExpression:\n """\n A nicer way to build up index tuples for arrays.\n\n .. note::\n Use one of the two predefined instances ``index_exp`` or `s_`\n rather than directly using `IndexExpression`.\n\n For any index combination, including slicing and axis insertion,\n ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any\n array `a`. However, ``np.index_exp[indices]`` can be used anywhere\n in Python code and returns a tuple of slice objects that can be\n used in the construction of complex index expressions.\n\n Parameters\n ----------\n maketuple : bool\n If True, always returns a tuple.\n\n See Also\n --------\n s_ : Predefined instance without tuple conversion:\n `s_ = IndexExpression(maketuple=False)`.\n The ``index_exp`` is another predefined instance that\n always returns a tuple:\n `index_exp = IndexExpression(maketuple=True)`.\n\n Notes\n -----\n You can do all this with :class:`slice` plus a few special objects,\n but there's a lot to remember and this version is simpler because\n it uses the standard array indexing syntax.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.s_[2::2]\n slice(2, None, 2)\n >>> np.index_exp[2::2]\n (slice(2, None, 2),)\n\n >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]\n array([2, 4])\n\n """\n __slots__ = ('maketuple',)\n\n def __init__(self, maketuple):\n self.maketuple = maketuple\n\n def __getitem__(self, item):\n if self.maketuple and not isinstance(item, tuple):\n return (item,)\n else:\n return item\n\n\nindex_exp = IndexExpression(maketuple=True)\ns_ = IndexExpression(maketuple=False)\n\n# End contribution from Konrad.\n\n\n# The following functions complement those in twodim_base, but are\n# applicable to N-dimensions.\n\n\ndef _fill_diagonal_dispatcher(a, val, wrap=None):\n return (a,)\n\n\n@array_function_dispatch(_fill_diagonal_dispatcher)\ndef fill_diagonal(a, val, wrap=False):\n """Fill the main diagonal of the given array of any dimensionality.\n\n For an array `a` with ``a.ndim >= 2``, the diagonal is the list of\n values ``a[i, ..., i]`` with indices ``i`` all identical. This function\n modifies the input array in-place without returning a value.\n\n Parameters\n ----------\n a : array, at least 2-D.\n Array whose diagonal is to be filled in-place.\n val : scalar or array_like\n Value(s) to write on the diagonal. If `val` is scalar, the value is\n written along the diagonal. If array-like, the flattened `val` is\n written along the diagonal, repeating if necessary to fill all\n diagonal entries.\n\n wrap : bool\n For tall matrices in NumPy version up to 1.6.2, the\n diagonal "wrapped" after N columns. You can have this behavior\n with this option. This affects only tall matrices.\n\n See also\n --------\n diag_indices, diag_indices_from\n\n Notes\n -----\n This functionality can be obtained via `diag_indices`, but internally\n this version uses a much faster implementation that never constructs the\n indices and uses simple slicing.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.zeros((3, 3), int)\n >>> np.fill_diagonal(a, 5)\n >>> a\n array([[5, 0, 0],\n [0, 5, 0],\n [0, 0, 5]])\n\n The same function can operate on a 4-D array:\n\n >>> a = np.zeros((3, 3, 3, 3), int)\n >>> np.fill_diagonal(a, 4)\n\n We only show a few blocks for clarity:\n\n >>> a[0, 0]\n array([[4, 0, 0],\n [0, 0, 0],\n [0, 0, 0]])\n >>> a[1, 1]\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 0]])\n >>> a[2, 2]\n array([[0, 0, 0],\n [0, 0, 0],\n [0, 0, 4]])\n\n The wrap option affects only tall matrices:\n\n >>> # tall matrices no wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [0, 0, 0]])\n\n >>> # tall matrices wrap\n >>> a = np.zeros((5, 3), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0],\n [0, 4, 0],\n [0, 0, 4],\n [0, 0, 0],\n [4, 0, 0]])\n\n >>> # wide matrices\n >>> a = np.zeros((3, 5), int)\n >>> np.fill_diagonal(a, 4, wrap=True)\n >>> a\n array([[4, 0, 0, 0, 0],\n [0, 4, 0, 0, 0],\n [0, 0, 4, 0, 0]])\n\n The anti-diagonal can be filled by reversing the order of elements\n using either `numpy.flipud` or `numpy.fliplr`.\n\n >>> a = np.zeros((3, 3), int);\n >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip\n >>> a\n array([[0, 0, 1],\n [0, 2, 0],\n [3, 0, 0]])\n >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip\n >>> a\n array([[0, 0, 3],\n [0, 2, 0],\n [1, 0, 0]])\n\n Note that the order in which the diagonal is filled varies depending\n on the flip function.\n """\n if a.ndim < 2:\n raise ValueError("array must be at least 2-d")\n end = None\n if a.ndim == 2:\n # Explicit, fast formula for the common case. For 2-d arrays, we\n # accept rectangular ones.\n step = a.shape[1] + 1\n # This is needed to don't have tall matrix have the diagonal wrap.\n if not wrap:\n end = a.shape[1] * a.shape[1]\n else:\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not np.all(diff(a.shape) == 0):\n raise ValueError("All dimensions of input must be of equal length")\n step = 1 + (np.cumprod(a.shape[:-1])).sum()\n\n # Write the value out into the diagonal.\n a.flat[:end:step] = val\n\n\n@set_module('numpy')\ndef diag_indices(n, ndim=2):\n """\n Return the indices to access the main diagonal of an array.\n\n This returns a tuple of indices that can be used to access the main\n diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape\n (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for\n ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``\n for ``i = [0..n-1]``.\n\n Parameters\n ----------\n n : int\n The size, along each dimension, of the arrays for which the returned\n indices can be used.\n\n ndim : int, optional\n The number of dimensions.\n\n See Also\n --------\n diag_indices_from\n\n Examples\n --------\n >>> import numpy as np\n\n Create a set of indices to access the diagonal of a (4, 4) array:\n\n >>> di = np.diag_indices(4)\n >>> di\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n >>> a[di] = 100\n >>> a\n array([[100, 1, 2, 3],\n [ 4, 100, 6, 7],\n [ 8, 9, 100, 11],\n [ 12, 13, 14, 100]])\n\n Now, we create indices to manipulate a 3-D array:\n\n >>> d3 = np.diag_indices(2, 3)\n >>> d3\n (array([0, 1]), array([0, 1]), array([0, 1]))\n\n And use it to set the diagonal of an array of zeros to 1:\n\n >>> a = np.zeros((2, 2, 2), dtype=int)\n >>> a[d3] = 1\n >>> a\n array([[[1, 0],\n [0, 0]],\n [[0, 0],\n [0, 1]]])\n\n """\n idx = np.arange(n)\n return (idx,) * ndim\n\n\ndef _diag_indices_from(arr):\n return (arr,)\n\n\n@array_function_dispatch(_diag_indices_from)\ndef diag_indices_from(arr):\n """\n Return the indices to access the main diagonal of an n-dimensional array.\n\n See `diag_indices` for full details.\n\n Parameters\n ----------\n arr : array, at least 2-D\n\n See Also\n --------\n diag_indices\n\n Examples\n --------\n >>> import numpy as np\n\n Create a 4 by 4 array.\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Get the indices of the diagonal elements.\n\n >>> di = np.diag_indices_from(a)\n >>> di\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n\n >>> a[di]\n array([ 0, 5, 10, 15])\n\n This is simply syntactic sugar for diag_indices.\n\n >>> np.diag_indices(a.shape[0])\n (array([0, 1, 2, 3]), array([0, 1, 2, 3]))\n\n """\n\n if not arr.ndim >= 2:\n raise ValueError("input array must be at least 2-d")\n # For more than d=2, the strided formula is only valid for arrays with\n # all dimensions equal, so we check first.\n if not np.all(diff(arr.shape) == 0):\n raise ValueError("All dimensions of input must be of equal length")\n\n return diag_indices(arr.shape[0], arr.ndim)\n | .venv\Lib\site-packages\numpy\lib\_index_tricks_impl.py | _index_tricks_impl.py | Python | 33,253 | 0.95 | 0.109653 | 0.041522 | node-utils | 718 | 2024-02-20T23:33:59.505826 | BSD-3-Clause | false | dace44f8860cd962f544f4d3c83530fb |
from collections.abc import Sequence\nfrom typing import Any, ClassVar, Final, Generic, Self, SupportsIndex, final, overload\nfrom typing import Literal as L\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import TypeVar, deprecated\n\nimport numpy as np\nfrom numpy._core.multiarray import ravel_multi_index, unravel_index\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _AnyShape,\n _FiniteNestedSequence,\n _NestedSequence,\n _SupportsArray,\n _SupportsDType,\n)\n\n__all__ = [ # noqa: RUF022\n "ravel_multi_index",\n "unravel_index",\n "mgrid",\n "ogrid",\n "r_",\n "c_",\n "s_",\n "index_exp",\n "ix_",\n "ndenumerate",\n "ndindex",\n "fill_diagonal",\n "diag_indices",\n "diag_indices_from",\n]\n\n###\n\n_T = TypeVar("_T")\n_TupleT = TypeVar("_TupleT", bound=tuple[Any, ...])\n_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)\n_BoolT_co = TypeVar("_BoolT_co", bound=bool, default=bool, covariant=True)\n\n_AxisT_co = TypeVar("_AxisT_co", bound=int, default=L[0], covariant=True)\n_MatrixT_co = TypeVar("_MatrixT_co", bound=bool, default=L[False], covariant=True)\n_NDMinT_co = TypeVar("_NDMinT_co", bound=int, default=L[1], covariant=True)\n_Trans1DT_co = TypeVar("_Trans1DT_co", bound=int, default=L[-1], covariant=True)\n\n###\n\nclass ndenumerate(Generic[_ScalarT_co]):\n @overload\n def __new__(cls, arr: _FiniteNestedSequence[_SupportsArray[np.dtype[_ScalarT]]]) -> ndenumerate[_ScalarT]: ...\n @overload\n def __new__(cls, arr: str | _NestedSequence[str]) -> ndenumerate[np.str_]: ...\n @overload\n def __new__(cls, arr: bytes | _NestedSequence[bytes]) -> ndenumerate[np.bytes_]: ...\n @overload\n def __new__(cls, arr: bool | _NestedSequence[bool]) -> ndenumerate[np.bool]: ...\n @overload\n def __new__(cls, arr: int | _NestedSequence[int]) -> ndenumerate[np.intp]: ...\n @overload\n def __new__(cls, arr: float | _NestedSequence[float]) -> ndenumerate[np.float64]: ...\n @overload\n def __new__(cls, arr: complex | _NestedSequence[complex]) -> ndenumerate[np.complex128]: ...\n @overload\n def __new__(cls, arr: object) -> ndenumerate[Any]: ...\n\n # The first overload is a (semi-)workaround for a mypy bug (tested with v1.10 and v1.11)\n @overload\n def __next__(\n self: ndenumerate[np.bool | np.number | np.flexible | np.datetime64 | np.timedelta64],\n /,\n ) -> tuple[_AnyShape, _ScalarT_co]: ...\n @overload\n def __next__(self: ndenumerate[np.object_], /) -> tuple[_AnyShape, Incomplete]: ...\n @overload\n def __next__(self, /) -> tuple[_AnyShape, _ScalarT_co]: ...\n\n #\n def __iter__(self) -> Self: ...\n\nclass ndindex:\n @overload\n def __init__(self, shape: tuple[SupportsIndex, ...], /) -> None: ...\n @overload\n def __init__(self, /, *shape: SupportsIndex) -> None: ...\n\n #\n def __iter__(self) -> Self: ...\n def __next__(self) -> _AnyShape: ...\n\n #\n @deprecated("Deprecated since 1.20.0.")\n def ndincr(self, /) -> None: ...\n\nclass nd_grid(Generic[_BoolT_co]):\n sparse: _BoolT_co\n def __init__(self, sparse: _BoolT_co = ...) -> None: ...\n @overload\n def __getitem__(self: nd_grid[L[False]], key: slice | Sequence[slice]) -> NDArray[Incomplete]: ...\n @overload\n def __getitem__(self: nd_grid[L[True]], key: slice | Sequence[slice]) -> tuple[NDArray[Incomplete], ...]: ...\n\n@final\nclass MGridClass(nd_grid[L[False]]):\n def __init__(self) -> None: ...\n\n@final\nclass OGridClass(nd_grid[L[True]]):\n def __init__(self) -> None: ...\n\nclass AxisConcatenator(Generic[_AxisT_co, _MatrixT_co, _NDMinT_co, _Trans1DT_co]):\n __slots__ = "axis", "matrix", "ndmin", "trans1d"\n\n makemat: ClassVar[type[np.matrix[tuple[int, int], np.dtype]]]\n\n axis: _AxisT_co\n matrix: _MatrixT_co\n ndmin: _NDMinT_co\n trans1d: _Trans1DT_co\n\n #\n def __init__(\n self,\n /,\n axis: _AxisT_co = ...,\n matrix: _MatrixT_co = ...,\n ndmin: _NDMinT_co = ...,\n trans1d: _Trans1DT_co = ...,\n ) -> None: ...\n\n # TODO(jorenham): annotate this\n def __getitem__(self, key: Incomplete, /) -> Incomplete: ...\n def __len__(self, /) -> L[0]: ...\n\n #\n @staticmethod\n @overload\n def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: _ArrayT) -> _ArrayT: ...\n @staticmethod\n @overload\n def concatenate(*a: ArrayLike, axis: SupportsIndex | None = 0, out: None = None) -> NDArray[Incomplete]: ...\n\n@final\nclass RClass(AxisConcatenator[L[0], L[False], L[1], L[-1]]):\n def __init__(self, /) -> None: ...\n\n@final\nclass CClass(AxisConcatenator[L[-1], L[False], L[2], L[0]]):\n def __init__(self, /) -> None: ...\n\nclass IndexExpression(Generic[_BoolT_co]):\n maketuple: _BoolT_co\n def __init__(self, maketuple: _BoolT_co) -> None: ...\n @overload\n def __getitem__(self, item: _TupleT) -> _TupleT: ...\n @overload\n def __getitem__(self: IndexExpression[L[True]], item: _T) -> tuple[_T]: ...\n @overload\n def __getitem__(self: IndexExpression[L[False]], item: _T) -> _T: ...\n\n@overload\ndef ix_(*args: _FiniteNestedSequence[_SupportsDType[_DTypeT]]) -> tuple[np.ndarray[_AnyShape, _DTypeT], ...]: ...\n@overload\ndef ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[np.str_], ...]: ...\n@overload\ndef ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[np.bytes_], ...]: ...\n@overload\ndef ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[np.bool], ...]: ...\n@overload\ndef ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[np.intp], ...]: ...\n@overload\ndef ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[np.float64], ...]: ...\n@overload\ndef ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[np.complex128], ...]: ...\n\n#\ndef fill_diagonal(a: NDArray[Any], val: object, wrap: bool = ...) -> None: ...\n\n#\ndef diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[np.intp], ...]: ...\ndef diag_indices_from(arr: ArrayLike) -> tuple[NDArray[np.intp], ...]: ...\n\n#\nmgrid: Final[MGridClass] = ...\nogrid: Final[OGridClass] = ...\n\nr_: Final[RClass] = ...\nc_: Final[CClass] = ...\n\nindex_exp: Final[IndexExpression[L[True]]] = ...\ns_: Final[IndexExpression[L[False]]] = ...\n | .venv\Lib\site-packages\numpy\lib\_index_tricks_impl.pyi | _index_tricks_impl.pyi | Other | 6,521 | 0.95 | 0.270408 | 0.072727 | node-utils | 890 | 2024-11-06T08:24:17.769941 | GPL-3.0 | false | 36fbef371ba5e8e54433d6ec3633cbf7 |
"""A collection of functions designed to help I/O with ascii files.\n\n"""\n__docformat__ = "restructuredtext en"\n\nimport itertools\n\nimport numpy as np\nimport numpy._core.numeric as nx\nfrom numpy._utils import asbytes, asunicode\n\n\ndef _decode_line(line, encoding=None):\n """Decode bytes from binary input streams.\n\n Defaults to decoding from 'latin1'.\n\n Parameters\n ----------\n line : str or bytes\n Line to be decoded.\n encoding : str\n Encoding used to decode `line`.\n\n Returns\n -------\n decoded_line : str\n\n """\n if type(line) is bytes:\n if encoding is None:\n encoding = "latin1"\n line = line.decode(encoding)\n\n return line\n\n\ndef _is_string_like(obj):\n """\n Check whether obj behaves like a string.\n """\n try:\n obj + ''\n except (TypeError, ValueError):\n return False\n return True\n\n\ndef _is_bytes_like(obj):\n """\n Check whether obj behaves like a bytes object.\n """\n try:\n obj + b''\n except (TypeError, ValueError):\n return False\n return True\n\n\ndef has_nested_fields(ndtype):\n """\n Returns whether one or several fields of a dtype are nested.\n\n Parameters\n ----------\n ndtype : dtype\n Data-type of a structured array.\n\n Raises\n ------\n AttributeError\n If `ndtype` does not have a `names` attribute.\n\n Examples\n --------\n >>> import numpy as np\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])\n >>> np.lib._iotools.has_nested_fields(dt)\n False\n\n """\n return any(ndtype[name].names is not None for name in ndtype.names or ())\n\n\ndef flatten_dtype(ndtype, flatten_base=False):\n """\n Unpack a structured data-type by collapsing nested fields and/or fields\n with a shape.\n\n Note that the field names are lost.\n\n Parameters\n ----------\n ndtype : dtype\n The datatype to collapse\n flatten_base : bool, optional\n If True, transform a field with a shape into several fields. Default is\n False.\n\n Examples\n --------\n >>> import numpy as np\n >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),\n ... ('block', int, (2, 3))])\n >>> np.lib._iotools.flatten_dtype(dt)\n [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]\n >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)\n [dtype('S4'),\n dtype('float64'),\n dtype('float64'),\n dtype('int64'),\n dtype('int64'),\n dtype('int64'),\n dtype('int64'),\n dtype('int64'),\n dtype('int64')]\n\n """\n names = ndtype.names\n if names is None:\n if flatten_base:\n return [ndtype.base] * int(np.prod(ndtype.shape))\n return [ndtype.base]\n else:\n types = []\n for field in names:\n info = ndtype.fields[field]\n flat_dt = flatten_dtype(info[0], flatten_base)\n types.extend(flat_dt)\n return types\n\n\nclass LineSplitter:\n """\n Object to split a string at a given delimiter or at given places.\n\n Parameters\n ----------\n delimiter : str, int, or sequence of ints, optional\n If a string, character used to delimit consecutive fields.\n If an integer or a sequence of integers, width(s) of each field.\n comments : str, optional\n Character used to mark the beginning of a comment. Default is '#'.\n autostrip : bool, optional\n Whether to strip each individual field. Default is True.\n\n """\n\n def autostrip(self, method):\n """\n Wrapper to strip each member of the output of `method`.\n\n Parameters\n ----------\n method : function\n Function that takes a single argument and returns a sequence of\n strings.\n\n Returns\n -------\n wrapped : function\n The result of wrapping `method`. `wrapped` takes a single input\n argument and returns a list of strings that are stripped of\n white-space.\n\n """\n return lambda input: [_.strip() for _ in method(input)]\n\n def __init__(self, delimiter=None, comments='#', autostrip=True,\n encoding=None):\n delimiter = _decode_line(delimiter)\n comments = _decode_line(comments)\n\n self.comments = comments\n\n # Delimiter is a character\n if (delimiter is None) or isinstance(delimiter, str):\n delimiter = delimiter or None\n _handyman = self._delimited_splitter\n # Delimiter is a list of field widths\n elif hasattr(delimiter, '__iter__'):\n _handyman = self._variablewidth_splitter\n idx = np.cumsum([0] + list(delimiter))\n delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)]\n # Delimiter is a single integer\n elif int(delimiter):\n (_handyman, delimiter) = (\n self._fixedwidth_splitter, int(delimiter))\n else:\n (_handyman, delimiter) = (self._delimited_splitter, None)\n self.delimiter = delimiter\n if autostrip:\n self._handyman = self.autostrip(_handyman)\n else:\n self._handyman = _handyman\n self.encoding = encoding\n\n def _delimited_splitter(self, line):\n """Chop off comments, strip, and split at delimiter. """\n if self.comments is not None:\n line = line.split(self.comments)[0]\n line = line.strip(" \r\n")\n if not line:\n return []\n return line.split(self.delimiter)\n\n def _fixedwidth_splitter(self, line):\n if self.comments is not None:\n line = line.split(self.comments)[0]\n line = line.strip("\r\n")\n if not line:\n return []\n fixed = self.delimiter\n slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]\n return [line[s] for s in slices]\n\n def _variablewidth_splitter(self, line):\n if self.comments is not None:\n line = line.split(self.comments)[0]\n if not line:\n return []\n slices = self.delimiter\n return [line[s] for s in slices]\n\n def __call__(self, line):\n return self._handyman(_decode_line(line, self.encoding))\n\n\nclass NameValidator:\n """\n Object to validate a list of strings to use as field names.\n\n The strings are stripped of any non alphanumeric character, and spaces\n are replaced by '_'. During instantiation, the user can define a list\n of names to exclude, as well as a list of invalid characters. Names in\n the exclusion list are appended a '_' character.\n\n Once an instance has been created, it can be called with a list of\n names, and a list of valid names will be created. The `__call__`\n method accepts an optional keyword "default" that sets the default name\n in case of ambiguity. By default this is 'f', so that names will\n default to `f0`, `f1`, etc.\n\n Parameters\n ----------\n excludelist : sequence, optional\n A list of names to exclude. This list is appended to the default\n list ['return', 'file', 'print']. Excluded names are appended an\n underscore: for example, `file` becomes `file_` if supplied.\n deletechars : str, optional\n A string combining invalid characters that must be deleted from the\n names.\n case_sensitive : {True, False, 'upper', 'lower'}, optional\n * If True, field names are case-sensitive.\n * If False or 'upper', field names are converted to upper case.\n * If 'lower', field names are converted to lower case.\n\n The default value is True.\n replace_space : '_', optional\n Character(s) used in replacement of white spaces.\n\n Notes\n -----\n Calling an instance of `NameValidator` is the same as calling its\n method `validate`.\n\n Examples\n --------\n >>> import numpy as np\n >>> validator = np.lib._iotools.NameValidator()\n >>> validator(['file', 'field2', 'with space', 'CaSe'])\n ('file_', 'field2', 'with_space', 'CaSe')\n\n >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],\n ... deletechars='q',\n ... case_sensitive=False)\n >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])\n ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')\n\n """\n\n defaultexcludelist = 'return', 'file', 'print'\n defaultdeletechars = frozenset(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")\n\n def __init__(self, excludelist=None, deletechars=None,\n case_sensitive=None, replace_space='_'):\n # Process the exclusion list ..\n if excludelist is None:\n excludelist = []\n excludelist.extend(self.defaultexcludelist)\n self.excludelist = excludelist\n # Process the list of characters to delete\n if deletechars is None:\n delete = set(self.defaultdeletechars)\n else:\n delete = set(deletechars)\n delete.add('"')\n self.deletechars = delete\n # Process the case option .....\n if (case_sensitive is None) or (case_sensitive is True):\n self.case_converter = lambda x: x\n elif (case_sensitive is False) or case_sensitive.startswith('u'):\n self.case_converter = lambda x: x.upper()\n elif case_sensitive.startswith('l'):\n self.case_converter = lambda x: x.lower()\n else:\n msg = f'unrecognized case_sensitive value {case_sensitive}.'\n raise ValueError(msg)\n\n self.replace_space = replace_space\n\n def validate(self, names, defaultfmt="f%i", nbfields=None):\n """\n Validate a list of strings as field names for a structured array.\n\n Parameters\n ----------\n names : sequence of str\n Strings to be validated.\n defaultfmt : str, optional\n Default format string, used if validating a given string\n reduces its length to zero.\n nbfields : integer, optional\n Final number of validated names, used to expand or shrink the\n initial list of names.\n\n Returns\n -------\n validatednames : list of str\n The list of validated field names.\n\n Notes\n -----\n A `NameValidator` instance can be called directly, which is the\n same as calling `validate`. For examples, see `NameValidator`.\n\n """\n # Initial checks ..............\n if (names is None):\n if (nbfields is None):\n return None\n names = []\n if isinstance(names, str):\n names = [names, ]\n if nbfields is not None:\n nbnames = len(names)\n if (nbnames < nbfields):\n names = list(names) + [''] * (nbfields - nbnames)\n elif (nbnames > nbfields):\n names = names[:nbfields]\n # Set some shortcuts ...........\n deletechars = self.deletechars\n excludelist = self.excludelist\n case_converter = self.case_converter\n replace_space = self.replace_space\n # Initializes some variables ...\n validatednames = []\n seen = {}\n nbempty = 0\n\n for item in names:\n item = case_converter(item).strip()\n if replace_space:\n item = item.replace(' ', replace_space)\n item = ''.join([c for c in item if c not in deletechars])\n if item == '':\n item = defaultfmt % nbempty\n while item in names:\n nbempty += 1\n item = defaultfmt % nbempty\n nbempty += 1\n elif item in excludelist:\n item += '_'\n cnt = seen.get(item, 0)\n if cnt > 0:\n validatednames.append(item + '_%d' % cnt)\n else:\n validatednames.append(item)\n seen[item] = cnt + 1\n return tuple(validatednames)\n\n def __call__(self, names, defaultfmt="f%i", nbfields=None):\n return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)\n\n\ndef str2bool(value):\n """\n Tries to transform a string supposed to represent a boolean to a boolean.\n\n Parameters\n ----------\n value : str\n The string that is transformed to a boolean.\n\n Returns\n -------\n boolval : bool\n The boolean representation of `value`.\n\n Raises\n ------\n ValueError\n If the string is not 'True' or 'False' (case independent)\n\n Examples\n --------\n >>> import numpy as np\n >>> np.lib._iotools.str2bool('TRUE')\n True\n >>> np.lib._iotools.str2bool('false')\n False\n\n """\n value = value.upper()\n if value == 'TRUE':\n return True\n elif value == 'FALSE':\n return False\n else:\n raise ValueError("Invalid boolean")\n\n\nclass ConverterError(Exception):\n """\n Exception raised when an error occurs in a converter for string values.\n\n """\n pass\n\n\nclass ConverterLockError(ConverterError):\n """\n Exception raised when an attempt is made to upgrade a locked converter.\n\n """\n pass\n\n\nclass ConversionWarning(UserWarning):\n """\n Warning issued when a string converter has a problem.\n\n Notes\n -----\n In `genfromtxt` a `ConversionWarning` is issued if raising exceptions\n is explicitly suppressed with the "invalid_raise" keyword.\n\n """\n pass\n\n\nclass StringConverter:\n """\n Factory class for function transforming a string into another object\n (int, float).\n\n After initialization, an instance can be called to transform a string\n into another object. If the string is recognized as representing a\n missing value, a default value is returned.\n\n Attributes\n ----------\n func : function\n Function used for the conversion.\n default : any\n Default value to return when the input corresponds to a missing\n value.\n type : type\n Type of the output.\n _status : int\n Integer representing the order of the conversion.\n _mapper : sequence of tuples\n Sequence of tuples (dtype, function, default value) to evaluate in\n order.\n _locked : bool\n Holds `locked` parameter.\n\n Parameters\n ----------\n dtype_or_func : {None, dtype, function}, optional\n If a `dtype`, specifies the input data type, used to define a basic\n function and a default value for missing data. For example, when\n `dtype` is float, the `func` attribute is set to `float` and the\n default value to `np.nan`. If a function, this function is used to\n convert a string to another object. In this case, it is recommended\n to give an associated default value as input.\n default : any, optional\n Value to return by default, that is, when the string to be\n converted is flagged as missing. If not given, `StringConverter`\n tries to supply a reasonable default value.\n missing_values : {None, sequence of str}, optional\n ``None`` or sequence of strings indicating a missing value. If ``None``\n then missing values are indicated by empty entries. The default is\n ``None``.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent automatic\n upgrade or not. Default is False.\n\n """\n _mapper = [(nx.bool, str2bool, False),\n (nx.int_, int, -1),]\n\n # On 32-bit systems, we need to make sure that we explicitly include\n # nx.int64 since ns.int_ is nx.int32.\n if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:\n _mapper.append((nx.int64, int, -1))\n\n _mapper.extend([(nx.float64, float, nx.nan),\n (nx.complex128, complex, nx.nan + 0j),\n (nx.longdouble, nx.longdouble, nx.nan),\n # If a non-default dtype is passed, fall back to generic\n # ones (should only be used for the converter)\n (nx.integer, int, -1),\n (nx.floating, float, nx.nan),\n (nx.complexfloating, complex, nx.nan + 0j),\n # Last, try with the string types (must be last, because\n # `_mapper[-1]` is used as default in some cases)\n (nx.str_, asunicode, '???'),\n (nx.bytes_, asbytes, '???'),\n ])\n\n @classmethod\n def _getdtype(cls, val):\n """Returns the dtype of the input variable."""\n return np.array(val).dtype\n\n @classmethod\n def _getsubdtype(cls, val):\n """Returns the type of the dtype of the input variable."""\n return np.array(val).dtype.type\n\n @classmethod\n def _dtypeortype(cls, dtype):\n """Returns dtype for datetime64 and type of dtype otherwise."""\n\n # This is a bit annoying. We want to return the "general" type in most\n # cases (ie. "string" rather than "S10"), but we want to return the\n # specific type for datetime64 (ie. "datetime64[us]" rather than\n # "datetime64").\n if dtype.type == np.datetime64:\n return dtype\n return dtype.type\n\n @classmethod\n def upgrade_mapper(cls, func, default=None):\n """\n Upgrade the mapper of a StringConverter by adding a new function and\n its corresponding default.\n\n The input function (or sequence of functions) and its associated\n default value (if any) is inserted in penultimate position of the\n mapper. The corresponding type is estimated from the dtype of the\n default value.\n\n Parameters\n ----------\n func : var\n Function, or sequence of functions\n\n Examples\n --------\n >>> import dateutil.parser\n >>> import datetime\n >>> dateparser = dateutil.parser.parse\n >>> defaultdate = datetime.date(2000, 1, 1)\n >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)\n """\n # Func is a single functions\n if callable(func):\n cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))\n return\n elif hasattr(func, '__iter__'):\n if isinstance(func[0], (tuple, list)):\n for _ in func:\n cls._mapper.insert(-1, _)\n return\n if default is None:\n default = [None] * len(func)\n else:\n default = list(default)\n default.append([None] * (len(func) - len(default)))\n for fct, dft in zip(func, default):\n cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))\n\n @classmethod\n def _find_map_entry(cls, dtype):\n # if a converter for the specific dtype is available use that\n for i, (deftype, func, default_def) in enumerate(cls._mapper):\n if dtype.type == deftype:\n return i, (deftype, func, default_def)\n\n # otherwise find an inexact match\n for i, (deftype, func, default_def) in enumerate(cls._mapper):\n if np.issubdtype(dtype.type, deftype):\n return i, (deftype, func, default_def)\n\n raise LookupError\n\n def __init__(self, dtype_or_func=None, default=None, missing_values=None,\n locked=False):\n # Defines a lock for upgrade\n self._locked = bool(locked)\n # No input dtype: minimal initialization\n if dtype_or_func is None:\n self.func = str2bool\n self._status = 0\n self.default = default or False\n dtype = np.dtype('bool')\n else:\n # Is the input a np.dtype ?\n try:\n self.func = None\n dtype = np.dtype(dtype_or_func)\n except TypeError:\n # dtype_or_func must be a function, then\n if not callable(dtype_or_func):\n errmsg = ("The input argument `dtype` is neither a"\n " function nor a dtype (got '%s' instead)")\n raise TypeError(errmsg % type(dtype_or_func))\n # Set the function\n self.func = dtype_or_func\n # If we don't have a default, try to guess it or set it to\n # None\n if default is None:\n try:\n default = self.func('0')\n except ValueError:\n default = None\n dtype = self._getdtype(default)\n\n # find the best match in our mapper\n try:\n self._status, (_, func, default_def) = self._find_map_entry(dtype)\n except LookupError:\n # no match\n self.default = default\n _, func, _ = self._mapper[-1]\n self._status = 0\n else:\n # use the found default only if we did not already have one\n if default is None:\n self.default = default_def\n else:\n self.default = default\n\n # If the input was a dtype, set the function to the last we saw\n if self.func is None:\n self.func = func\n\n # If the status is 1 (int), change the function to\n # something more robust.\n if self.func == self._mapper[1][1]:\n if issubclass(dtype.type, np.uint64):\n self.func = np.uint64\n elif issubclass(dtype.type, np.int64):\n self.func = np.int64\n else:\n self.func = lambda x: int(float(x))\n # Store the list of strings corresponding to missing values.\n if missing_values is None:\n self.missing_values = {''}\n else:\n if isinstance(missing_values, str):\n missing_values = missing_values.split(",")\n self.missing_values = set(list(missing_values) + [''])\n\n self._callingfunction = self._strict_call\n self.type = self._dtypeortype(dtype)\n self._checked = False\n self._initial_default = default\n\n def _loose_call(self, value):\n try:\n return self.func(value)\n except ValueError:\n return self.default\n\n def _strict_call(self, value):\n try:\n\n # We check if we can convert the value using the current function\n new_value = self.func(value)\n\n # In addition to having to check whether func can convert the\n # value, we also have to make sure that we don't get overflow\n # errors for integers.\n if self.func is int:\n try:\n np.array(value, dtype=self.type)\n except OverflowError:\n raise ValueError\n\n # We're still here so we can now return the new value\n return new_value\n\n except ValueError:\n if value.strip() in self.missing_values:\n if not self._status:\n self._checked = False\n return self.default\n raise ValueError(f"Cannot convert string '{value}'")\n\n def __call__(self, value):\n return self._callingfunction(value)\n\n def _do_upgrade(self):\n # Raise an exception if we locked the converter...\n if self._locked:\n errmsg = "Converter is locked and cannot be upgraded"\n raise ConverterLockError(errmsg)\n _statusmax = len(self._mapper)\n # Complains if we try to upgrade by the maximum\n _status = self._status\n if _status == _statusmax:\n errmsg = "Could not find a valid conversion function"\n raise ConverterError(errmsg)\n elif _status < _statusmax - 1:\n _status += 1\n self.type, self.func, default = self._mapper[_status]\n self._status = _status\n if self._initial_default is not None:\n self.default = self._initial_default\n else:\n self.default = default\n\n def upgrade(self, value):\n """\n Find the best converter for a given string, and return the result.\n\n The supplied string `value` is converted by testing different\n converters in order. First the `func` method of the\n `StringConverter` instance is tried, if this fails other available\n converters are tried. The order in which these other converters\n are tried is determined by the `_status` attribute of the instance.\n\n Parameters\n ----------\n value : str\n The string to convert.\n\n Returns\n -------\n out : any\n The result of converting `value` with the appropriate converter.\n\n """\n self._checked = True\n try:\n return self._strict_call(value)\n except ValueError:\n self._do_upgrade()\n return self.upgrade(value)\n\n def iterupgrade(self, value):\n self._checked = True\n if not hasattr(value, '__iter__'):\n value = (value,)\n _strict_call = self._strict_call\n try:\n for _m in value:\n _strict_call(_m)\n except ValueError:\n self._do_upgrade()\n self.iterupgrade(value)\n\n def update(self, func, default=None, testing_value=None,\n missing_values='', locked=False):\n """\n Set StringConverter attributes directly.\n\n Parameters\n ----------\n func : function\n Conversion function.\n default : any, optional\n Value to return by default, that is, when the string to be\n converted is flagged as missing. If not given,\n `StringConverter` tries to supply a reasonable default value.\n testing_value : str, optional\n A string representing a standard input value of the converter.\n This string is used to help defining a reasonable default\n value.\n missing_values : {sequence of str, None}, optional\n Sequence of strings indicating a missing value. If ``None``, then\n the existing `missing_values` are cleared. The default is ``''``.\n locked : bool, optional\n Whether the StringConverter should be locked to prevent\n automatic upgrade or not. Default is False.\n\n Notes\n -----\n `update` takes the same parameters as the constructor of\n `StringConverter`, except that `func` does not accept a `dtype`\n whereas `dtype_or_func` in the constructor does.\n\n """\n self.func = func\n self._locked = locked\n\n # Don't reset the default to None if we can avoid it\n if default is not None:\n self.default = default\n self.type = self._dtypeortype(self._getdtype(default))\n else:\n try:\n tester = func(testing_value or '1')\n except (TypeError, ValueError):\n tester = None\n self.type = self._dtypeortype(self._getdtype(tester))\n\n # Add the missing values to the existing set or clear it.\n if missing_values is None:\n # Clear all missing values even though the ctor initializes it to\n # set(['']) when the argument is None.\n self.missing_values = set()\n else:\n if not np.iterable(missing_values):\n missing_values = [missing_values]\n if not all(isinstance(v, str) for v in missing_values):\n raise TypeError("missing_values must be strings or unicode")\n self.missing_values.update(missing_values)\n\n\ndef easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):\n """\n Convenience function to create a `np.dtype` object.\n\n The function processes the input `dtype` and matches it with the given\n names.\n\n Parameters\n ----------\n ndtype : var\n Definition of the dtype. Can be any string or dictionary recognized\n by the `np.dtype` function, or a sequence of types.\n names : str or sequence, optional\n Sequence of strings to use as field names for a structured dtype.\n For convenience, `names` can be a string of a comma-separated list\n of names.\n defaultfmt : str, optional\n Format string used to define missing names, such as ``"f%i"``\n (default) or ``"fields_%02i"``.\n validationargs : optional\n A series of optional arguments used to initialize a\n `NameValidator`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.lib._iotools.easy_dtype(float)\n dtype('float64')\n >>> np.lib._iotools.easy_dtype("i4, f8")\n dtype([('f0', '<i4'), ('f1', '<f8')])\n >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")\n dtype([('field_000', '<i4'), ('field_001', '<f8')])\n\n >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")\n dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])\n >>> np.lib._iotools.easy_dtype(float, names="a,b,c")\n dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])\n\n """\n try:\n ndtype = np.dtype(ndtype)\n except TypeError:\n validate = NameValidator(**validationargs)\n nbfields = len(ndtype)\n if names is None:\n names = [''] * len(ndtype)\n elif isinstance(names, str):\n names = names.split(",")\n names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)\n ndtype = np.dtype({"formats": ndtype, "names": names})\n else:\n # Explicit names\n if names is not None:\n validate = NameValidator(**validationargs)\n if isinstance(names, str):\n names = names.split(",")\n # Simple dtype: repeat to match the nb of names\n if ndtype.names is None:\n formats = tuple([ndtype.type] * len(names))\n names = validate(names, defaultfmt=defaultfmt)\n ndtype = np.dtype(list(zip(names, formats)))\n # Structured dtype: just validate the names as needed\n else:\n ndtype.names = validate(names, nbfields=len(ndtype.names),\n defaultfmt=defaultfmt)\n # No implicit names\n elif ndtype.names is not None:\n validate = NameValidator(**validationargs)\n # Default initial names : should we change the format ?\n numbered_names = tuple(f"f{i}" for i in range(len(ndtype.names)))\n if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):\n ndtype.names = validate([''] * len(ndtype.names),\n defaultfmt=defaultfmt)\n # Explicit initial names : just validate\n else:\n ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)\n return ndtype\n | .venv\Lib\site-packages\numpy\lib\_iotools.py | _iotools.py | Python | 31,776 | 0.95 | 0.192222 | 0.072633 | vue-tools | 407 | 2024-07-15T02:45:16.496255 | BSD-3-Clause | false | cdc648b6be4beaf384869aa70cfb9191 |
from collections.abc import Callable, Iterable, Sequence\nfrom typing import (\n Any,\n ClassVar,\n Final,\n Literal,\n TypedDict,\n TypeVar,\n Unpack,\n overload,\n type_check_only,\n)\n\nimport numpy as np\nimport numpy.typing as npt\n\n_T = TypeVar("_T")\n\n@type_check_only\nclass _ValidationKwargs(TypedDict, total=False):\n excludelist: Iterable[str] | None\n deletechars: Iterable[str] | None\n case_sensitive: Literal["upper", "lower"] | bool | None\n replace_space: str\n\n###\n\n__docformat__: Final[str] = "restructuredtext en"\n\nclass ConverterError(Exception): ...\nclass ConverterLockError(ConverterError): ...\nclass ConversionWarning(UserWarning): ...\n\nclass LineSplitter:\n delimiter: str | int | Iterable[int] | None\n comments: str\n encoding: str | None\n\n def __init__(\n self,\n /,\n delimiter: str | bytes | int | Iterable[int] | None = None,\n comments: str | bytes = "#",\n autostrip: bool = True,\n encoding: str | None = None,\n ) -> None: ...\n def __call__(self, /, line: str | bytes) -> list[str]: ...\n def autostrip(self, /, method: Callable[[_T], Iterable[str]]) -> Callable[[_T], list[str]]: ...\n\nclass NameValidator:\n defaultexcludelist: ClassVar[Sequence[str]]\n defaultdeletechars: ClassVar[Sequence[str]]\n excludelist: list[str]\n deletechars: set[str]\n case_converter: Callable[[str], str]\n replace_space: str\n\n def __init__(\n self,\n /,\n excludelist: Iterable[str] | None = None,\n deletechars: Iterable[str] | None = None,\n case_sensitive: Literal["upper", "lower"] | bool | None = None,\n replace_space: str = "_",\n ) -> None: ...\n def __call__(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...\n def validate(self, /, names: Iterable[str], defaultfmt: str = "f%i", nbfields: int | None = None) -> tuple[str, ...]: ...\n\nclass StringConverter:\n func: Callable[[str], Any] | None\n default: Any\n missing_values: set[str]\n type: np.dtype[np.datetime64] | np.generic\n\n def __init__(\n self,\n /,\n dtype_or_func: npt.DTypeLike | None = None,\n default: None = None,\n missing_values: Iterable[str] | None = None,\n locked: bool = False,\n ) -> None: ...\n def update(\n self,\n /,\n func: Callable[[str], Any],\n default: object | None = None,\n testing_value: str | None = None,\n missing_values: str = "",\n locked: bool = False,\n ) -> None: ...\n #\n def __call__(self, /, value: str) -> Any: ...\n def upgrade(self, /, value: str) -> Any: ...\n def iterupgrade(self, /, value: Iterable[str] | str) -> None: ...\n\n #\n @classmethod\n def upgrade_mapper(cls, func: Callable[[str], Any], default: object | None = None) -> None: ...\n\n@overload\ndef str2bool(value: Literal["false", "False", "FALSE"]) -> Literal[False]: ...\n@overload\ndef str2bool(value: Literal["true", "True", "TRUE"]) -> Literal[True]: ...\n\n#\ndef has_nested_fields(ndtype: np.dtype[np.void]) -> bool: ...\ndef flatten_dtype(ndtype: np.dtype[np.void], flatten_base: bool = False) -> type[np.dtype]: ...\ndef easy_dtype(\n ndtype: npt.DTypeLike,\n names: Iterable[str] | None = None,\n defaultfmt: str = "f%i",\n **validationargs: Unpack[_ValidationKwargs],\n) -> np.dtype[np.void]: ...\n | .venv\Lib\site-packages\numpy\lib\_iotools.pyi | _iotools.pyi | Other | 3,507 | 0.95 | 0.210526 | 0.050505 | awesome-app | 678 | 2024-11-13T19:11:23.169268 | GPL-3.0 | false | 706b3f354ee006d8f620a3dc73ab95d8 |
"""\nFunctions that ignore NaN.\n\nFunctions\n---------\n\n- `nanmin` -- minimum non-NaN value\n- `nanmax` -- maximum non-NaN value\n- `nanargmin` -- index of minimum non-NaN value\n- `nanargmax` -- index of maximum non-NaN value\n- `nansum` -- sum of non-NaN values\n- `nanprod` -- product of non-NaN values\n- `nancumsum` -- cumulative sum of non-NaN values\n- `nancumprod` -- cumulative product of non-NaN values\n- `nanmean` -- mean of non-NaN values\n- `nanvar` -- variance of non-NaN values\n- `nanstd` -- standard deviation of non-NaN values\n- `nanmedian` -- median of non-NaN values\n- `nanquantile` -- qth quantile of non-NaN values\n- `nanpercentile` -- qth percentile of non-NaN values\n\n"""\nimport functools\nimport warnings\n\nimport numpy as np\nimport numpy._core.numeric as _nx\nfrom numpy._core import overrides\nfrom numpy.lib import _function_base_impl as fnb\nfrom numpy.lib._function_base_impl import _weights_are_valid\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',\n 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',\n 'nancumsum', 'nancumprod', 'nanquantile'\n ]\n\n\ndef _nan_mask(a, out=None):\n """\n Parameters\n ----------\n a : array-like\n Input array with at least 1 dimension.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output and will prevent the allocation of a new array.\n\n Returns\n -------\n y : bool ndarray or True\n A bool array where ``np.nan`` positions are marked with ``False``\n and other positions are marked with ``True``. If the type of ``a``\n is such that it can't possibly contain ``np.nan``, returns ``True``.\n """\n # we assume that a is an array for this private function\n\n if a.dtype.kind not in 'fc':\n return True\n\n y = np.isnan(a, out=out)\n y = np.invert(y, out=y)\n return y\n\ndef _replace_nan(a, val):\n """\n If `a` is of inexact type, make a copy of `a`, replace NaNs with\n the `val` value, and return the copy together with a boolean mask\n marking the locations where NaNs were present. If `a` is not of\n inexact type, do nothing and return `a` together with a mask of None.\n\n Note that scalars will end up as array scalars, which is important\n for using the result as the value of the out argument in some\n operations.\n\n Parameters\n ----------\n a : array-like\n Input array.\n val : float\n NaN values are set to val before doing the operation.\n\n Returns\n -------\n y : ndarray\n If `a` is of inexact type, return a copy of `a` with the NaNs\n replaced by the fill value, otherwise return `a`.\n mask: {bool, None}\n If `a` is of inexact type, return a boolean mask marking locations of\n NaNs, otherwise return None.\n\n """\n a = np.asanyarray(a)\n\n if a.dtype == np.object_:\n # object arrays do not support `isnan` (gh-9009), so make a guess\n mask = np.not_equal(a, a, dtype=bool)\n elif issubclass(a.dtype.type, np.inexact):\n mask = np.isnan(a)\n else:\n mask = None\n\n if mask is not None:\n a = np.array(a, subok=True, copy=True)\n np.copyto(a, val, where=mask)\n\n return a, mask\n\n\ndef _copyto(a, val, mask):\n """\n Replace values in `a` with NaN where `mask` is True. This differs from\n copyto in that it will deal with the case where `a` is a numpy scalar.\n\n Parameters\n ----------\n a : ndarray or numpy scalar\n Array or numpy scalar some of whose values are to be replaced\n by val.\n val : numpy scalar\n Value used a replacement.\n mask : ndarray, scalar\n Boolean array. Where True the corresponding element of `a` is\n replaced by `val`. Broadcasts.\n\n Returns\n -------\n res : ndarray, scalar\n Array with elements replaced or scalar `val`.\n\n """\n if isinstance(a, np.ndarray):\n np.copyto(a, val, where=mask, casting='unsafe')\n else:\n a = a.dtype.type(val)\n return a\n\n\ndef _remove_nan_1d(arr1d, second_arr1d=None, overwrite_input=False):\n """\n Equivalent to arr1d[~arr1d.isnan()], but in a different order\n\n Presumably faster as it incurs fewer copies\n\n Parameters\n ----------\n arr1d : ndarray\n Array to remove nans from\n second_arr1d : ndarray or None\n A second array which will have the same positions removed as arr1d.\n overwrite_input : bool\n True if `arr1d` can be modified in place\n\n Returns\n -------\n res : ndarray\n Array with nan elements removed\n second_res : ndarray or None\n Second array with nan element positions of first array removed.\n overwrite_input : bool\n True if `res` can be modified in place, given the constraint on the\n input\n """\n if arr1d.dtype == object:\n # object arrays do not support `isnan` (gh-9009), so make a guess\n c = np.not_equal(arr1d, arr1d, dtype=bool)\n else:\n c = np.isnan(arr1d)\n\n s = np.nonzero(c)[0]\n if s.size == arr1d.size:\n warnings.warn("All-NaN slice encountered", RuntimeWarning,\n stacklevel=6)\n if second_arr1d is None:\n return arr1d[:0], None, True\n else:\n return arr1d[:0], second_arr1d[:0], True\n elif s.size == 0:\n return arr1d, second_arr1d, overwrite_input\n else:\n if not overwrite_input:\n arr1d = arr1d.copy()\n # select non-nans at end of array\n enonan = arr1d[-s.size:][~c[-s.size:]]\n # fill nans in beginning of array with non-nans of end\n arr1d[s[:enonan.size]] = enonan\n\n if second_arr1d is None:\n return arr1d[:-s.size], None, True\n else:\n if not overwrite_input:\n second_arr1d = second_arr1d.copy()\n enonan = second_arr1d[-s.size:][~c[-s.size:]]\n second_arr1d[s[:enonan.size]] = enonan\n\n return arr1d[:-s.size], second_arr1d[:-s.size], True\n\n\ndef _divide_by_count(a, b, out=None):\n """\n Compute a/b ignoring invalid results. If `a` is an array the division\n is done in place. If `a` is a scalar, then its type is preserved in the\n output. If out is None, then a is used instead so that the division\n is in place. Note that this is only called with `a` an inexact type.\n\n Parameters\n ----------\n a : {ndarray, numpy scalar}\n Numerator. Expected to be of inexact type but not checked.\n b : {ndarray, numpy scalar}\n Denominator.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary.\n\n Returns\n -------\n ret : {ndarray, numpy scalar}\n The return value is a/b. If `a` was an ndarray the division is done\n in place. If `a` is a numpy scalar, the division preserves its type.\n\n """\n with np.errstate(invalid='ignore', divide='ignore'):\n if isinstance(a, np.ndarray):\n if out is None:\n return np.divide(a, b, out=a, casting='unsafe')\n else:\n return np.divide(a, b, out=out, casting='unsafe')\n elif out is None:\n # Precaution against reduced object arrays\n try:\n return a.dtype.type(a / b)\n except AttributeError:\n return a / b\n else:\n # This is questionable, but currently a numpy scalar can\n # be output to a zero dimensional array.\n return np.divide(a, b, out=out, casting='unsafe')\n\n\ndef _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,\n initial=None, where=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanmin_dispatcher)\ndef nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,\n where=np._NoValue):\n """\n Return minimum of an array or minimum along an axis, ignoring any NaNs.\n When all-NaN slices are encountered a ``RuntimeWarning`` is raised and\n Nan is returned for that slice.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose minimum is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the minimum is computed. The default is to compute\n the minimum of the flattened array.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary. See\n :ref:`ufuncs-output-type` for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If the value is anything but the default, then\n `keepdims` will be passed through to the `min` method\n of sub-classes of `ndarray`. If the sub-classes methods\n does not implement `keepdims` any exceptions will be raised.\n initial : scalar, optional\n The maximum value of an output element. Must be present to allow\n computation on empty slice. See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n where : array_like of bool, optional\n Elements to compare for the minimum. See `~numpy.ufunc.reduce`\n for details.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n nanmin : ndarray\n An array with the same shape as `a`, with the specified axis\n removed. If `a` is a 0-d array, or if axis is None, an ndarray\n scalar is returned. The same dtype as `a` is returned.\n\n See Also\n --------\n nanmax :\n The maximum value of an array along a given axis, ignoring any NaNs.\n amin :\n The minimum value of an array along a given axis, propagating any NaNs.\n fmin :\n Element-wise minimum of two arrays, ignoring any NaNs.\n minimum :\n Element-wise minimum of two arrays, propagating any NaNs.\n isnan :\n Shows which elements are Not a Number (NaN).\n isfinite:\n Shows which elements are neither NaN nor infinity.\n\n amax, fmax, maximum\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n Positive infinity is treated as a very large number and negative\n infinity is treated as a very small (i.e. negative) number.\n\n If the input has a integer type the function is equivalent to np.min.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, np.nan]])\n >>> np.nanmin(a)\n 1.0\n >>> np.nanmin(a, axis=0)\n array([1., 2.])\n >>> np.nanmin(a, axis=1)\n array([1., 3.])\n\n When positive infinity and negative infinity are present:\n\n >>> np.nanmin([1, 2, np.nan, np.inf])\n 1.0\n >>> np.nanmin([1, 2, np.nan, -np.inf])\n -inf\n\n """\n kwargs = {}\n if keepdims is not np._NoValue:\n kwargs['keepdims'] = keepdims\n if initial is not np._NoValue:\n kwargs['initial'] = initial\n if where is not np._NoValue:\n kwargs['where'] = where\n\n if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_:\n # Fast, but not safe for subclasses of ndarray, or object arrays,\n # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)\n res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)\n if np.isnan(res).any():\n warnings.warn("All-NaN slice encountered", RuntimeWarning,\n stacklevel=2)\n else:\n # Slow, but safe for subclasses of ndarray\n a, mask = _replace_nan(a, +np.inf)\n res = np.amin(a, axis=axis, out=out, **kwargs)\n if mask is None:\n return res\n\n # Check for all-NaN axis\n kwargs.pop("initial", None)\n mask = np.all(mask, axis=axis, **kwargs)\n if np.any(mask):\n res = _copyto(res, np.nan, mask)\n warnings.warn("All-NaN axis encountered", RuntimeWarning,\n stacklevel=2)\n return res\n\n\ndef _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,\n initial=None, where=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanmax_dispatcher)\ndef nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,\n where=np._NoValue):\n """\n Return the maximum of an array or maximum along an axis, ignoring any\n NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is\n raised and NaN is returned for that slice.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose maximum is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the maximum is computed. The default is to compute\n the maximum of the flattened array.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary. See\n :ref:`ufuncs-output-type` for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n If the value is anything but the default, then\n `keepdims` will be passed through to the `max` method\n of sub-classes of `ndarray`. If the sub-classes methods\n does not implement `keepdims` any exceptions will be raised.\n initial : scalar, optional\n The minimum value of an output element. Must be present to allow\n computation on empty slice. See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n where : array_like of bool, optional\n Elements to compare for the maximum. See `~numpy.ufunc.reduce`\n for details.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n nanmax : ndarray\n An array with the same shape as `a`, with the specified axis removed.\n If `a` is a 0-d array, or if axis is None, an ndarray scalar is\n returned. The same dtype as `a` is returned.\n\n See Also\n --------\n nanmin :\n The minimum value of an array along a given axis, ignoring any NaNs.\n amax :\n The maximum value of an array along a given axis, propagating any NaNs.\n fmax :\n Element-wise maximum of two arrays, ignoring any NaNs.\n maximum :\n Element-wise maximum of two arrays, propagating any NaNs.\n isnan :\n Shows which elements are Not a Number (NaN).\n isfinite:\n Shows which elements are neither NaN nor infinity.\n\n amin, fmin, minimum\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n Positive infinity is treated as a very large number and negative\n infinity is treated as a very small (i.e. negative) number.\n\n If the input has a integer type the function is equivalent to np.max.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, np.nan]])\n >>> np.nanmax(a)\n 3.0\n >>> np.nanmax(a, axis=0)\n array([3., 2.])\n >>> np.nanmax(a, axis=1)\n array([2., 3.])\n\n When positive infinity and negative infinity are present:\n\n >>> np.nanmax([1, 2, np.nan, -np.inf])\n 2.0\n >>> np.nanmax([1, 2, np.nan, np.inf])\n inf\n\n """\n kwargs = {}\n if keepdims is not np._NoValue:\n kwargs['keepdims'] = keepdims\n if initial is not np._NoValue:\n kwargs['initial'] = initial\n if where is not np._NoValue:\n kwargs['where'] = where\n\n if (type(a) is np.ndarray or type(a) is np.memmap) and a.dtype != np.object_:\n # Fast, but not safe for subclasses of ndarray, or object arrays,\n # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)\n res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)\n if np.isnan(res).any():\n warnings.warn("All-NaN slice encountered", RuntimeWarning,\n stacklevel=2)\n else:\n # Slow, but safe for subclasses of ndarray\n a, mask = _replace_nan(a, -np.inf)\n res = np.amax(a, axis=axis, out=out, **kwargs)\n if mask is None:\n return res\n\n # Check for all-NaN axis\n kwargs.pop("initial", None)\n mask = np.all(mask, axis=axis, **kwargs)\n if np.any(mask):\n res = _copyto(res, np.nan, mask)\n warnings.warn("All-NaN axis encountered", RuntimeWarning,\n stacklevel=2)\n return res\n\n\ndef _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):\n return (a,)\n\n\n@array_function_dispatch(_nanargmin_dispatcher)\ndef nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):\n """\n Return the indices of the minimum values in the specified axis ignoring\n NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results\n cannot be trusted if a slice contains only NaNs and Infs.\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : int, optional\n Axis along which to operate. By default flattened input is used.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n .. versionadded:: 1.22.0\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n index_array : ndarray\n An array of indices or a single index value.\n\n See Also\n --------\n argmin, nanargmax\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[np.nan, 4], [2, 3]])\n >>> np.argmin(a)\n 0\n >>> np.nanargmin(a)\n 2\n >>> np.nanargmin(a, axis=0)\n array([1, 1])\n >>> np.nanargmin(a, axis=1)\n array([1, 0])\n\n """\n a, mask = _replace_nan(a, np.inf)\n if mask is not None and mask.size:\n mask = np.all(mask, axis=axis)\n if np.any(mask):\n raise ValueError("All-NaN slice encountered")\n res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)\n return res\n\n\ndef _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):\n return (a,)\n\n\n@array_function_dispatch(_nanargmax_dispatcher)\ndef nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):\n """\n Return the indices of the maximum values in the specified axis ignoring\n NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the\n results cannot be trusted if a slice contains only NaNs and -Infs.\n\n\n Parameters\n ----------\n a : array_like\n Input data.\n axis : int, optional\n Axis along which to operate. By default flattened input is used.\n out : array, optional\n If provided, the result will be inserted into this array. It should\n be of the appropriate shape and dtype.\n\n .. versionadded:: 1.22.0\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the array.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n index_array : ndarray\n An array of indices or a single index value.\n\n See Also\n --------\n argmax, nanargmin\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[np.nan, 4], [2, 3]])\n >>> np.argmax(a)\n 0\n >>> np.nanargmax(a)\n 1\n >>> np.nanargmax(a, axis=0)\n array([1, 0])\n >>> np.nanargmax(a, axis=1)\n array([1, 1])\n\n """\n a, mask = _replace_nan(a, -np.inf)\n if mask is not None and mask.size:\n mask = np.all(mask, axis=axis)\n if np.any(mask):\n raise ValueError("All-NaN slice encountered")\n res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)\n return res\n\n\ndef _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,\n initial=None, where=None):\n return (a, out)\n\n\n@array_function_dispatch(_nansum_dispatcher)\ndef nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,\n initial=np._NoValue, where=np._NoValue):\n """\n Return the sum of array elements over a given axis treating Not a\n Numbers (NaNs) as zero.\n\n In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or\n empty. In later versions zero is returned.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose sum is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the sum is computed. The default is to compute the\n sum of the flattened array.\n dtype : data-type, optional\n The type of the returned array and of the accumulator in which the\n elements are summed. By default, the dtype of `a` is used. An\n exception is when `a` has an integer type with less precision than\n the platform (u)intp. In that case, the default will be either\n (u)int32 or (u)int64 depending on whether the platform is 32 or 64\n bits. For inexact inputs, dtype must be inexact.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``. If provided, it must have the same shape as the\n expected output, but the type will be cast if necessary. See\n :ref:`ufuncs-output-type` for more details. The casting of NaN to integer\n can yield unexpected results.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If the value is anything but the default, then\n `keepdims` will be passed through to the `mean` or `sum` methods\n of sub-classes of `ndarray`. If the sub-classes methods\n does not implement `keepdims` any exceptions will be raised.\n initial : scalar, optional\n Starting value for the sum. See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n where : array_like of bool, optional\n Elements to include in the sum. See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n nansum : ndarray.\n A new array holding the result is returned unless `out` is\n specified, in which it is returned. The result has the same\n size as `a`, and the same shape as `a` if `axis` is not None\n or `a` is a 1-d array.\n\n See Also\n --------\n numpy.sum : Sum across array propagating NaNs.\n isnan : Show which elements are NaN.\n isfinite : Show which elements are not NaN or +/-inf.\n\n Notes\n -----\n If both positive and negative infinity are present, the sum will be Not\n A Number (NaN).\n\n Examples\n --------\n >>> import numpy as np\n >>> np.nansum(1)\n 1\n >>> np.nansum([1])\n 1\n >>> np.nansum([1, np.nan])\n 1.0\n >>> a = np.array([[1, 1], [1, np.nan]])\n >>> np.nansum(a)\n 3.0\n >>> np.nansum(a, axis=0)\n array([2., 1.])\n >>> np.nansum([1, np.nan, np.inf])\n inf\n >>> np.nansum([1, np.nan, -np.inf])\n -inf\n >>> from numpy.testing import suppress_warnings\n >>> with np.errstate(invalid="ignore"):\n ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present\n np.float64(nan)\n\n """\n a, mask = _replace_nan(a, 0)\n return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n initial=initial, where=where)\n\n\ndef _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,\n initial=None, where=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanprod_dispatcher)\ndef nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,\n initial=np._NoValue, where=np._NoValue):\n """\n Return the product of array elements over a given axis treating Not a\n Numbers (NaNs) as ones.\n\n One is returned for slices that are all-NaN or empty.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose product is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the product is computed. The default is to compute\n the product of the flattened array.\n dtype : data-type, optional\n The type of the returned array and of the accumulator in which the\n elements are summed. By default, the dtype of `a` is used. An\n exception is when `a` has an integer type with less precision than\n the platform (u)intp. In that case, the default will be either\n (u)int32 or (u)int64 depending on whether the platform is 32 or 64\n bits. For inexact inputs, dtype must be inexact.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``. If provided, it must have the same shape as the\n expected output, but the type will be cast if necessary. See\n :ref:`ufuncs-output-type` for more details. The casting of NaN to integer\n can yield unexpected results.\n keepdims : bool, optional\n If True, the axes which are reduced are left in the result as\n dimensions with size one. With this option, the result will\n broadcast correctly against the original `arr`.\n initial : scalar, optional\n The starting value for this product. See `~numpy.ufunc.reduce`\n for details.\n\n .. versionadded:: 1.22.0\n where : array_like of bool, optional\n Elements to include in the product. See `~numpy.ufunc.reduce`\n for details.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n nanprod : ndarray\n A new array holding the result is returned unless `out` is\n specified, in which case it is returned.\n\n See Also\n --------\n numpy.prod : Product across array propagating NaNs.\n isnan : Show which elements are NaN.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.nanprod(1)\n 1\n >>> np.nanprod([1])\n 1\n >>> np.nanprod([1, np.nan])\n 1.0\n >>> a = np.array([[1, 2], [3, np.nan]])\n >>> np.nanprod(a)\n 6.0\n >>> np.nanprod(a, axis=0)\n array([3., 2.])\n\n """\n a, mask = _replace_nan(a, 1)\n return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n initial=initial, where=where)\n\n\ndef _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):\n return (a, out)\n\n\n@array_function_dispatch(_nancumsum_dispatcher)\ndef nancumsum(a, axis=None, dtype=None, out=None):\n """\n Return the cumulative sum of array elements over a given axis treating Not a\n Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are\n encountered and leading NaNs are replaced by zeros.\n\n Zeros are returned for slices that are all-NaN or empty.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n Axis along which the cumulative sum is computed. The default\n (None) is to compute the cumsum over the flattened array.\n dtype : dtype, optional\n Type of the returned array and of the accumulator in which the\n elements are summed. If `dtype` is not specified, it defaults\n to the dtype of `a`, unless `a` has an integer dtype with a\n precision less than that of the default platform integer. In\n that case, the default platform integer is used.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output\n but the type will be cast if necessary. See :ref:`ufuncs-output-type` for\n more details.\n\n Returns\n -------\n nancumsum : ndarray.\n A new array holding the result is returned unless `out` is\n specified, in which it is returned. The result has the same\n size as `a`, and the same shape as `a` if `axis` is not None\n or `a` is a 1-d array.\n\n See Also\n --------\n numpy.cumsum : Cumulative sum across array propagating NaNs.\n isnan : Show which elements are NaN.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.nancumsum(1)\n array([1])\n >>> np.nancumsum([1])\n array([1])\n >>> np.nancumsum([1, np.nan])\n array([1., 1.])\n >>> a = np.array([[1, 2], [3, np.nan]])\n >>> np.nancumsum(a)\n array([1., 3., 6., 6.])\n >>> np.nancumsum(a, axis=0)\n array([[1., 2.],\n [4., 2.]])\n >>> np.nancumsum(a, axis=1)\n array([[1., 3.],\n [3., 3.]])\n\n """\n a, mask = _replace_nan(a, 0)\n return np.cumsum(a, axis=axis, dtype=dtype, out=out)\n\n\ndef _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):\n return (a, out)\n\n\n@array_function_dispatch(_nancumprod_dispatcher)\ndef nancumprod(a, axis=None, dtype=None, out=None):\n """\n Return the cumulative product of array elements over a given axis treating Not a\n Numbers (NaNs) as one. The cumulative product does not change when NaNs are\n encountered and leading NaNs are replaced by ones.\n\n Ones are returned for slices that are all-NaN or empty.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int, optional\n Axis along which the cumulative product is computed. By default\n the input is flattened.\n dtype : dtype, optional\n Type of the returned array, as well as of the accumulator in which\n the elements are multiplied. If *dtype* is not specified, it\n defaults to the dtype of `a`, unless `a` has an integer dtype with\n a precision less than that of the default platform integer. In\n that case, the default platform integer is used instead.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output\n but the type of the resulting values will be cast if necessary.\n\n Returns\n -------\n nancumprod : ndarray\n A new array holding the result is returned unless `out` is\n specified, in which case it is returned.\n\n See Also\n --------\n numpy.cumprod : Cumulative product across array propagating NaNs.\n isnan : Show which elements are NaN.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.nancumprod(1)\n array([1])\n >>> np.nancumprod([1])\n array([1])\n >>> np.nancumprod([1, np.nan])\n array([1., 1.])\n >>> a = np.array([[1, 2], [3, np.nan]])\n >>> np.nancumprod(a)\n array([1., 2., 6., 6.])\n >>> np.nancumprod(a, axis=0)\n array([[1., 2.],\n [3., 2.]])\n >>> np.nancumprod(a, axis=1)\n array([[1., 2.],\n [3., 3.]])\n\n """\n a, mask = _replace_nan(a, 1)\n return np.cumprod(a, axis=axis, dtype=dtype, out=out)\n\n\ndef _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,\n *, where=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanmean_dispatcher)\ndef nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,\n *, where=np._NoValue):\n """\n Compute the arithmetic mean along the specified axis, ignoring NaNs.\n\n Returns the average of the array elements. The average is taken over\n the flattened array by default, otherwise over the specified axis.\n `float64` intermediate and return values are used for integer inputs.\n\n For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose mean is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the means are computed. The default is to compute\n the mean of the flattened array.\n dtype : data-type, optional\n Type to use in computing the mean. For integer inputs, the default\n is `float64`; for inexact inputs, it is the same as the input\n dtype.\n out : ndarray, optional\n Alternate output array in which to place the result. The default\n is ``None``; if provided, it must have the same shape as the\n expected output, but the type will be cast if necessary.\n See :ref:`ufuncs-output-type` for more details.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If the value is anything but the default, then\n `keepdims` will be passed through to the `mean` or `sum` methods\n of sub-classes of `ndarray`. If the sub-classes methods\n does not implement `keepdims` any exceptions will be raised.\n where : array_like of bool, optional\n Elements to include in the mean. See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n\n Returns\n -------\n m : ndarray, see dtype parameter above\n If `out=None`, returns a new array containing the mean values,\n otherwise a reference to the output array is returned. Nan is\n returned for slices that contain only NaNs.\n\n See Also\n --------\n average : Weighted average\n mean : Arithmetic mean taken while not ignoring NaNs\n var, nanvar\n\n Notes\n -----\n The arithmetic mean is the sum of the non-NaN elements along the axis\n divided by the number of non-NaN elements.\n\n Note that for floating-point input, the mean is computed using the same\n precision the input has. Depending on the input data, this can cause\n the results to be inaccurate, especially for `float32`. Specifying a\n higher-precision accumulator using the `dtype` keyword can alleviate\n this issue.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, np.nan], [3, 4]])\n >>> np.nanmean(a)\n 2.6666666666666665\n >>> np.nanmean(a, axis=0)\n array([2., 4.])\n >>> np.nanmean(a, axis=1)\n array([1., 3.5]) # may vary\n\n """\n arr, mask = _replace_nan(a, 0)\n if mask is None:\n return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n where=where)\n\n if dtype is not None:\n dtype = np.dtype(dtype)\n if dtype is not None and not issubclass(dtype.type, np.inexact):\n raise TypeError("If a is inexact, then dtype must be inexact")\n if out is not None and not issubclass(out.dtype.type, np.inexact):\n raise TypeError("If a is inexact, then out must be inexact")\n\n cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,\n where=where)\n tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n where=where)\n avg = _divide_by_count(tot, cnt, out=out)\n\n isbad = (cnt == 0)\n if isbad.any():\n warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)\n # NaN is the only possible bad value, so no further\n # action is needed to handle bad results.\n return avg\n\n\ndef _nanmedian1d(arr1d, overwrite_input=False):\n """\n Private function for rank 1 arrays. Compute the median ignoring NaNs.\n See nanmedian for parameter usage\n """\n arr1d_parsed, _, overwrite_input = _remove_nan_1d(\n arr1d, overwrite_input=overwrite_input,\n )\n\n if arr1d_parsed.size == 0:\n # Ensure that a nan-esque scalar of the appropriate type (and unit)\n # is returned for `timedelta64` and `complexfloating`\n return arr1d[-1]\n\n return np.median(arr1d_parsed, overwrite_input=overwrite_input)\n\n\ndef _nanmedian(a, axis=None, out=None, overwrite_input=False):\n """\n Private function that doesn't support extended axis or keepdims.\n These methods are extended to this function using _ureduce\n See nanmedian for parameter usage\n\n """\n if axis is None or a.ndim == 1:\n part = a.ravel()\n if out is None:\n return _nanmedian1d(part, overwrite_input)\n else:\n out[...] = _nanmedian1d(part, overwrite_input)\n return out\n else:\n # for small medians use sort + indexing which is still faster than\n # apply_along_axis\n # benchmarked with shuffled (50, 50, x) containing a few NaN\n if a.shape[axis] < 600:\n return _nanmedian_small(a, axis, out, overwrite_input)\n result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)\n if out is not None:\n out[...] = result\n return result\n\n\ndef _nanmedian_small(a, axis=None, out=None, overwrite_input=False):\n """\n sort + indexing median, faster for small medians along multiple\n dimensions due to the high overhead of apply_along_axis\n\n see nanmedian for parameter usage\n """\n a = np.ma.masked_array(a, np.isnan(a))\n m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)\n for i in range(np.count_nonzero(m.mask.ravel())):\n warnings.warn("All-NaN slice encountered", RuntimeWarning,\n stacklevel=5)\n\n fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan\n if out is not None:\n out[...] = m.filled(fill_value)\n return out\n return m.filled(fill_value)\n\n\ndef _nanmedian_dispatcher(\n a, axis=None, out=None, overwrite_input=None, keepdims=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanmedian_dispatcher)\ndef nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):\n """\n Compute the median along the specified axis, while ignoring NaNs.\n\n Returns the median of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array.\n axis : {int, sequence of int, None}, optional\n Axis or axes along which the medians are computed. The default\n is to compute the median along a flattened version of the array.\n A sequence of axes is supported since version 1.9.0.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow use of memory of input array `a` for\n calculations. The input array will be modified by the call to\n `median`. This will save memory when you do not need to preserve\n the contents of the input array. Treat the input as undefined,\n but it will probably be fully or partially sorted. Default is\n False. If `overwrite_input` is ``True`` and `a` is not already an\n `ndarray`, an error will be raised.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If this is anything but the default value it will be passed\n through (in the special case of an empty array) to the\n `mean` function of the underlying array. If the array is\n a sub-class and `mean` does not have the kwarg `keepdims` this\n will raise a RuntimeError.\n\n Returns\n -------\n median : ndarray\n A new array holding the result. If the input contains integers\n or floats smaller than ``float64``, then the output data-type is\n ``np.float64``. Otherwise, the data-type of the output is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n mean, median, percentile\n\n Notes\n -----\n Given a vector ``V`` of length ``N``, the median of ``V`` is the\n middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,\n ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two\n middle values of ``V_sorted`` when ``N`` is even.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])\n >>> a[0, 1] = np.nan\n >>> a\n array([[10., nan, 4.],\n [ 3., 2., 1.]])\n >>> np.median(a)\n np.float64(nan)\n >>> np.nanmedian(a)\n 3.0\n >>> np.nanmedian(a, axis=0)\n array([6.5, 2. , 2.5])\n >>> np.median(a, axis=1)\n array([nan, 2.])\n >>> b = a.copy()\n >>> np.nanmedian(b, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a==b)\n >>> b = a.copy()\n >>> np.nanmedian(b, axis=None, overwrite_input=True)\n 3.0\n >>> assert not np.all(a==b)\n\n """\n a = np.asanyarray(a)\n # apply_along_axis in _nanmedian doesn't handle empty arrays well,\n # so deal them upfront\n if a.size == 0:\n return np.nanmean(a, axis, out=out, keepdims=keepdims)\n\n return fnb._ureduce(a, func=_nanmedian, keepdims=keepdims,\n axis=axis, out=out,\n overwrite_input=overwrite_input)\n\n\ndef _nanpercentile_dispatcher(\n a, q, axis=None, out=None, overwrite_input=None,\n method=None, keepdims=None, *, weights=None, interpolation=None):\n return (a, q, out, weights)\n\n\n@array_function_dispatch(_nanpercentile_dispatcher)\ndef nanpercentile(\n a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method="linear",\n keepdims=np._NoValue,\n *,\n weights=None,\n interpolation=None,\n):\n """\n Compute the qth percentile of the data along the specified axis,\n while ignoring nan values.\n\n Returns the qth percentile(s) of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array, containing\n nan values to be ignored.\n q : array_like of float\n Percentile or sequence of percentiles to compute, which must be\n between 0 and 100 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the percentiles are computed. The default\n is to compute the percentile(s) along a flattened version of the\n array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape and buffer length as the expected output, but the\n type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow the input array `a` to be modified by\n intermediate calculations, to save memory. In this case, the\n contents of the input `a` after this function completes is\n undefined.\n method : str, optional\n This parameter specifies the method to use for estimating the\n percentile. There are many different methods, some unique to NumPy.\n See the notes for explanation. The options sorted by their R type\n as summarized in the H&F paper [1]_ are:\n\n 1. 'inverted_cdf'\n 2. 'averaged_inverted_cdf'\n 3. 'closest_observation'\n 4. 'interpolated_inverted_cdf'\n 5. 'hazen'\n 6. 'weibull'\n 7. 'linear' (default)\n 8. 'median_unbiased'\n 9. 'normal_unbiased'\n\n The first three methods are discontinuous. NumPy further defines the\n following discontinuous variations of the default 'linear' (7.) option:\n\n * 'lower'\n * 'higher',\n * 'midpoint'\n * 'nearest'\n\n .. versionchanged:: 1.22.0\n This argument was previously called "interpolation" and only\n offered the "linear" default and last four options.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the\n result will broadcast correctly against the original array `a`.\n\n If this is anything but the default value it will be passed\n through (in the special case of an empty array) to the\n `mean` function of the underlying array. If the array is\n a sub-class and `mean` does not have the kwarg `keepdims` this\n will raise a RuntimeError.\n\n weights : array_like, optional\n An array of weights associated with the values in `a`. Each value in\n `a` contributes to the percentile according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of `a` along the given axis) or of the same shape as `a`.\n If `weights=None`, then all data in `a` are assumed to have a\n weight equal to one.\n Only `method="inverted_cdf"` supports weights.\n\n .. versionadded:: 2.0.0\n\n interpolation : str, optional\n Deprecated name for the method keyword argument.\n\n .. deprecated:: 1.22.0\n\n Returns\n -------\n percentile : scalar or ndarray\n If `q` is a single percentile and `axis=None`, then the result\n is a scalar. If multiple percentiles are given, first axis of\n the result corresponds to the percentiles. The other axes are\n the axes that remain after the reduction of `a`. If the input\n contains integers or floats smaller than ``float64``, the output\n data-type is ``float64``. Otherwise, the output data-type is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n nanmean\n nanmedian : equivalent to ``nanpercentile(..., 50)``\n percentile, median, mean\n nanquantile : equivalent to nanpercentile, except q in range [0, 1].\n\n Notes\n -----\n The behavior of `numpy.nanpercentile` with percentage `q` is that of\n `numpy.quantile` with argument ``q/100`` (ignoring nan values).\n For more information, please see `numpy.quantile`.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])\n >>> a[0][1] = np.nan\n >>> a\n array([[10., nan, 4.],\n [ 3., 2., 1.]])\n >>> np.percentile(a, 50)\n np.float64(nan)\n >>> np.nanpercentile(a, 50)\n 3.0\n >>> np.nanpercentile(a, 50, axis=0)\n array([6.5, 2. , 2.5])\n >>> np.nanpercentile(a, 50, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.nanpercentile(a, 50, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.nanpercentile(a, 50, axis=0, out=out)\n array([6.5, 2. , 2.5])\n >>> m\n array([6.5, 2. , 2.5])\n\n >>> b = a.copy()\n >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a==b)\n\n References\n ----------\n .. [1] R. J. Hyndman and Y. Fan,\n "Sample quantiles in statistical packages,"\n The American Statistician, 50(4), pp. 361-365, 1996\n\n """\n if interpolation is not None:\n method = fnb._check_interpolation_as_method(\n method, interpolation, "nanpercentile")\n\n a = np.asanyarray(a)\n if a.dtype.kind == "c":\n raise TypeError("a must be an array of real numbers")\n\n q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100, out=...)\n if not fnb._quantile_is_valid(q):\n raise ValueError("Percentiles must be in the range [0, 100]")\n\n if weights is not None:\n if method != "inverted_cdf":\n msg = ("Only method 'inverted_cdf' supports weights. "\n f"Got: {method}.")\n raise ValueError(msg)\n if axis is not None:\n axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")\n weights = _weights_are_valid(weights=weights, a=a, axis=axis)\n if np.any(weights < 0):\n raise ValueError("Weights must be non-negative.")\n\n return _nanquantile_unchecked(\n a, q, axis, out, overwrite_input, method, keepdims, weights)\n\n\ndef _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,\n method=None, keepdims=None, *, weights=None,\n interpolation=None):\n return (a, q, out, weights)\n\n\n@array_function_dispatch(_nanquantile_dispatcher)\ndef nanquantile(\n a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method="linear",\n keepdims=np._NoValue,\n *,\n weights=None,\n interpolation=None,\n):\n """\n Compute the qth quantile of the data along the specified axis,\n while ignoring nan values.\n Returns the qth quantile(s) of the array elements.\n\n Parameters\n ----------\n a : array_like\n Input array or object that can be converted to an array, containing\n nan values to be ignored\n q : array_like of float\n Probability or sequence of probabilities for the quantiles to compute.\n Values must be between 0 and 1 inclusive.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the quantiles are computed. The\n default is to compute the quantile(s) along a flattened\n version of the array.\n out : ndarray, optional\n Alternative output array in which to place the result. It must\n have the same shape and buffer length as the expected output,\n but the type (of the output) will be cast if necessary.\n overwrite_input : bool, optional\n If True, then allow the input array `a` to be modified by intermediate\n calculations, to save memory. In this case, the contents of the input\n `a` after this function completes is undefined.\n method : str, optional\n This parameter specifies the method to use for estimating the\n quantile. There are many different methods, some unique to NumPy.\n See the notes for explanation. The options sorted by their R type\n as summarized in the H&F paper [1]_ are:\n\n 1. 'inverted_cdf'\n 2. 'averaged_inverted_cdf'\n 3. 'closest_observation'\n 4. 'interpolated_inverted_cdf'\n 5. 'hazen'\n 6. 'weibull'\n 7. 'linear' (default)\n 8. 'median_unbiased'\n 9. 'normal_unbiased'\n\n The first three methods are discontinuous. NumPy further defines the\n following discontinuous variations of the default 'linear' (7.) option:\n\n * 'lower'\n * 'higher',\n * 'midpoint'\n * 'nearest'\n\n .. versionchanged:: 1.22.0\n This argument was previously called "interpolation" and only\n offered the "linear" default and last four options.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left in\n the result as dimensions with size one. With this option, the\n result will broadcast correctly against the original array `a`.\n\n If this is anything but the default value it will be passed\n through (in the special case of an empty array) to the\n `mean` function of the underlying array. If the array is\n a sub-class and `mean` does not have the kwarg `keepdims` this\n will raise a RuntimeError.\n\n weights : array_like, optional\n An array of weights associated with the values in `a`. Each value in\n `a` contributes to the quantile according to its associated weight.\n The weights array can either be 1-D (in which case its length must be\n the size of `a` along the given axis) or of the same shape as `a`.\n If `weights=None`, then all data in `a` are assumed to have a\n weight equal to one.\n Only `method="inverted_cdf"` supports weights.\n\n .. versionadded:: 2.0.0\n\n interpolation : str, optional\n Deprecated name for the method keyword argument.\n\n .. deprecated:: 1.22.0\n\n Returns\n -------\n quantile : scalar or ndarray\n If `q` is a single probability and `axis=None`, then the result\n is a scalar. If multiple probability levels are given, first axis of\n the result corresponds to the quantiles. The other axes are\n the axes that remain after the reduction of `a`. If the input\n contains integers or floats smaller than ``float64``, the output\n data-type is ``float64``. Otherwise, the output data-type is the\n same as that of the input. If `out` is specified, that array is\n returned instead.\n\n See Also\n --------\n quantile\n nanmean, nanmedian\n nanmedian : equivalent to ``nanquantile(..., 0.5)``\n nanpercentile : same as nanquantile, but with q in the range [0, 100].\n\n Notes\n -----\n The behavior of `numpy.nanquantile` is the same as that of\n `numpy.quantile` (ignoring nan values).\n For more information, please see `numpy.quantile`.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])\n >>> a[0][1] = np.nan\n >>> a\n array([[10., nan, 4.],\n [ 3., 2., 1.]])\n >>> np.quantile(a, 0.5)\n np.float64(nan)\n >>> np.nanquantile(a, 0.5)\n 3.0\n >>> np.nanquantile(a, 0.5, axis=0)\n array([6.5, 2. , 2.5])\n >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)\n array([[7.],\n [2.]])\n >>> m = np.nanquantile(a, 0.5, axis=0)\n >>> out = np.zeros_like(m)\n >>> np.nanquantile(a, 0.5, axis=0, out=out)\n array([6.5, 2. , 2.5])\n >>> m\n array([6.5, 2. , 2.5])\n >>> b = a.copy()\n >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)\n array([7., 2.])\n >>> assert not np.all(a==b)\n\n References\n ----------\n .. [1] R. J. Hyndman and Y. Fan,\n "Sample quantiles in statistical packages,"\n The American Statistician, 50(4), pp. 361-365, 1996\n\n """\n\n if interpolation is not None:\n method = fnb._check_interpolation_as_method(\n method, interpolation, "nanquantile")\n\n a = np.asanyarray(a)\n if a.dtype.kind == "c":\n raise TypeError("a must be an array of real numbers")\n\n # Use dtype of array if possible (e.g., if q is a python int or float).\n if isinstance(q, (int, float)) and a.dtype.kind == "f":\n q = np.asanyarray(q, dtype=a.dtype)\n else:\n q = np.asanyarray(q)\n\n if not fnb._quantile_is_valid(q):\n raise ValueError("Quantiles must be in the range [0, 1]")\n\n if weights is not None:\n if method != "inverted_cdf":\n msg = ("Only method 'inverted_cdf' supports weights. "\n f"Got: {method}.")\n raise ValueError(msg)\n if axis is not None:\n axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis")\n weights = _weights_are_valid(weights=weights, a=a, axis=axis)\n if np.any(weights < 0):\n raise ValueError("Weights must be non-negative.")\n\n return _nanquantile_unchecked(\n a, q, axis, out, overwrite_input, method, keepdims, weights)\n\n\ndef _nanquantile_unchecked(\n a,\n q,\n axis=None,\n out=None,\n overwrite_input=False,\n method="linear",\n keepdims=np._NoValue,\n weights=None,\n):\n """Assumes that q is in [0, 1], and is an ndarray"""\n # apply_along_axis in _nanpercentile doesn't handle empty arrays well,\n # so deal them upfront\n if a.size == 0:\n return np.nanmean(a, axis, out=out, keepdims=keepdims)\n return fnb._ureduce(a,\n func=_nanquantile_ureduce_func,\n q=q,\n weights=weights,\n keepdims=keepdims,\n axis=axis,\n out=out,\n overwrite_input=overwrite_input,\n method=method)\n\n\ndef _nanquantile_ureduce_func(\n a: np.array,\n q: np.array,\n weights: np.array,\n axis: int | None = None,\n out=None,\n overwrite_input: bool = False,\n method="linear",\n):\n """\n Private function that doesn't support extended axis or keepdims.\n These methods are extended to this function using _ureduce\n See nanpercentile for parameter usage\n """\n if axis is None or a.ndim == 1:\n part = a.ravel()\n wgt = None if weights is None else weights.ravel()\n result = _nanquantile_1d(part, q, overwrite_input, method, weights=wgt)\n # Note that this code could try to fill in `out` right away\n elif weights is None:\n result = np.apply_along_axis(_nanquantile_1d, axis, a, q,\n overwrite_input, method, weights)\n # apply_along_axis fills in collapsed axis with results.\n # Move those axes to the beginning to match percentile's\n # convention.\n if q.ndim != 0:\n from_ax = [axis + i for i in range(q.ndim)]\n result = np.moveaxis(result, from_ax, list(range(q.ndim)))\n else:\n # We need to apply along axis over 2 arrays, a and weights.\n # move operation axes to end for simplicity:\n a = np.moveaxis(a, axis, -1)\n if weights is not None:\n weights = np.moveaxis(weights, axis, -1)\n if out is not None:\n result = out\n else:\n # weights are limited to `inverted_cdf` so the result dtype\n # is known to be identical to that of `a` here:\n result = np.empty_like(a, shape=q.shape + a.shape[:-1])\n\n for ii in np.ndindex(a.shape[:-1]):\n result[(...,) + ii] = _nanquantile_1d(\n a[ii], q, weights=weights[ii],\n overwrite_input=overwrite_input, method=method,\n )\n # This path dealt with `out` already...\n return result\n\n if out is not None:\n out[...] = result\n return result\n\n\ndef _nanquantile_1d(\n arr1d, q, overwrite_input=False, method="linear", weights=None,\n):\n """\n Private function for rank 1 arrays. Compute quantile ignoring NaNs.\n See nanpercentile for parameter usage\n """\n # TODO: What to do when arr1d = [1, np.nan] and weights = [0, 1]?\n arr1d, weights, overwrite_input = _remove_nan_1d(arr1d,\n second_arr1d=weights, overwrite_input=overwrite_input)\n if arr1d.size == 0:\n # convert to scalar\n return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]\n\n return fnb._quantile_unchecked(\n arr1d,\n q,\n overwrite_input=overwrite_input,\n method=method,\n weights=weights,\n )\n\n\ndef _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,\n keepdims=None, *, where=None, mean=None,\n correction=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanvar_dispatcher)\ndef nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,\n *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue):\n """\n Compute the variance along the specified axis, while ignoring NaNs.\n\n Returns the variance of the array elements, a measure of the spread of\n a distribution. The variance is computed for the flattened array by\n default, otherwise over the specified axis.\n\n For all-NaN slices or slices with zero degrees of freedom, NaN is\n returned and a `RuntimeWarning` is raised.\n\n Parameters\n ----------\n a : array_like\n Array containing numbers whose variance is desired. If `a` is not an\n array, a conversion is attempted.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the variance is computed. The default is to compute\n the variance of the flattened array.\n dtype : data-type, optional\n Type to use in computing the variance. For arrays of integer type\n the default is `float64`; for arrays of float types it is the same as\n the array type.\n out : ndarray, optional\n Alternate output array in which to place the result. It must have\n the same shape as the expected output, but the type is cast if\n necessary.\n ddof : {int, float}, optional\n "Delta Degrees of Freedom": the divisor used in the calculation is\n ``N - ddof``, where ``N`` represents the number of non-NaN\n elements. By default `ddof` is zero.\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n where : array_like of bool, optional\n Elements to include in the variance. See `~numpy.ufunc.reduce` for\n details.\n\n .. versionadded:: 1.22.0\n\n mean : array_like, optional\n Provide the mean to prevent its recalculation. The mean should have\n a shape as if it was calculated with ``keepdims=True``.\n The axis for the calculation of the mean should be the same as used in\n the call to this var function.\n\n .. versionadded:: 2.0.0\n\n correction : {int, float}, optional\n Array API compatible name for the ``ddof`` parameter. Only one of them\n can be provided at the same time.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n variance : ndarray, see dtype parameter above\n If `out` is None, return a new array containing the variance,\n otherwise return a reference to the output array. If ddof is >= the\n number of non-NaN elements in a slice or the slice contains only\n NaNs, then the result for that slice is NaN.\n\n See Also\n --------\n std : Standard deviation\n mean : Average\n var : Variance while not ignoring NaNs\n nanstd, nanmean\n :ref:`ufuncs-output-type`\n\n Notes\n -----\n The variance is the average of the squared deviations from the mean,\n i.e., ``var = mean(abs(x - x.mean())**2)``.\n\n The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.\n If, however, `ddof` is specified, the divisor ``N - ddof`` is used\n instead. In standard statistical practice, ``ddof=1`` provides an\n unbiased estimator of the variance of a hypothetical infinite\n population. ``ddof=0`` provides a maximum likelihood estimate of the\n variance for normally distributed variables.\n\n Note that for complex numbers, the absolute value is taken before\n squaring, so that the result is always real and nonnegative.\n\n For floating-point input, the variance is computed using the same\n precision the input has. Depending on the input data, this can cause\n the results to be inaccurate, especially for `float32` (see example\n below). Specifying a higher-accuracy accumulator using the ``dtype``\n keyword can alleviate this issue.\n\n For this function to work on sub-classes of ndarray, they must define\n `sum` with the kwarg `keepdims`\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, np.nan], [3, 4]])\n >>> np.nanvar(a)\n 1.5555555555555554\n >>> np.nanvar(a, axis=0)\n array([1., 0.])\n >>> np.nanvar(a, axis=1)\n array([0., 0.25]) # may vary\n\n """\n arr, mask = _replace_nan(a, 0)\n if mask is None:\n return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims, where=where, mean=mean,\n correction=correction)\n\n if dtype is not None:\n dtype = np.dtype(dtype)\n if dtype is not None and not issubclass(dtype.type, np.inexact):\n raise TypeError("If a is inexact, then dtype must be inexact")\n if out is not None and not issubclass(out.dtype.type, np.inexact):\n raise TypeError("If a is inexact, then out must be inexact")\n\n if correction != np._NoValue:\n if ddof != 0:\n raise ValueError(\n "ddof and correction can't be provided simultaneously."\n )\n else:\n ddof = correction\n\n # Compute mean\n if type(arr) is np.matrix:\n _keepdims = np._NoValue\n else:\n _keepdims = True\n\n cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,\n where=where)\n\n if mean is not np._NoValue:\n avg = mean\n else:\n # we need to special case matrix for reverse compatibility\n # in order for this to work, these sums need to be called with\n # keepdims=True, however matrix now raises an error in this case, but\n # the reason that it drops the keepdims kwarg is to force keepdims=True\n # so this used to work by serendipity.\n avg = np.sum(arr, axis=axis, dtype=dtype,\n keepdims=_keepdims, where=where)\n avg = _divide_by_count(avg, cnt)\n\n # Compute squared deviation from mean.\n np.subtract(arr, avg, out=arr, casting='unsafe', where=where)\n arr = _copyto(arr, 0, mask)\n if issubclass(arr.dtype.type, np.complexfloating):\n sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real\n else:\n sqr = np.multiply(arr, arr, out=arr, where=where)\n\n # Compute variance.\n var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,\n where=where)\n\n # Precaution against reduced object arrays\n try:\n var_ndim = var.ndim\n except AttributeError:\n var_ndim = np.ndim(var)\n if var_ndim < cnt.ndim:\n # Subclasses of ndarray may ignore keepdims, so check here.\n cnt = cnt.squeeze(axis)\n dof = cnt - ddof\n var = _divide_by_count(var, dof)\n\n isbad = (dof <= 0)\n if np.any(isbad):\n warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,\n stacklevel=2)\n # NaN, inf, or negative numbers are all possible bad\n # values, so explicitly replace them with NaN.\n var = _copyto(var, np.nan, isbad)\n return var\n\n\ndef _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,\n keepdims=None, *, where=None, mean=None,\n correction=None):\n return (a, out)\n\n\n@array_function_dispatch(_nanstd_dispatcher)\ndef nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,\n *, where=np._NoValue, mean=np._NoValue, correction=np._NoValue):\n """\n Compute the standard deviation along the specified axis, while\n ignoring NaNs.\n\n Returns the standard deviation, a measure of the spread of a\n distribution, of the non-NaN array elements. The standard deviation is\n computed for the flattened array by default, otherwise over the\n specified axis.\n\n For all-NaN slices or slices with zero degrees of freedom, NaN is\n returned and a `RuntimeWarning` is raised.\n\n Parameters\n ----------\n a : array_like\n Calculate the standard deviation of the non-NaN values.\n axis : {int, tuple of int, None}, optional\n Axis or axes along which the standard deviation is computed. The default is\n to compute the standard deviation of the flattened array.\n dtype : dtype, optional\n Type to use in computing the standard deviation. For arrays of\n integer type the default is float64, for arrays of float types it\n is the same as the array type.\n out : ndarray, optional\n Alternative output array in which to place the result. It must have\n the same shape as the expected output but the type (of the\n calculated values) will be cast if necessary.\n ddof : {int, float}, optional\n Means Delta Degrees of Freedom. The divisor used in calculations\n is ``N - ddof``, where ``N`` represents the number of non-NaN\n elements. By default `ddof` is zero.\n\n keepdims : bool, optional\n If this is set to True, the axes which are reduced are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the original `a`.\n\n If this value is anything but the default it is passed through\n as-is to the relevant functions of the sub-classes. If these\n functions do not have a `keepdims` kwarg, a RuntimeError will\n be raised.\n where : array_like of bool, optional\n Elements to include in the standard deviation.\n See `~numpy.ufunc.reduce` for details.\n\n .. versionadded:: 1.22.0\n\n mean : array_like, optional\n Provide the mean to prevent its recalculation. The mean should have\n a shape as if it was calculated with ``keepdims=True``.\n The axis for the calculation of the mean should be the same as used in\n the call to this std function.\n\n .. versionadded:: 2.0.0\n\n correction : {int, float}, optional\n Array API compatible name for the ``ddof`` parameter. Only one of them\n can be provided at the same time.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n standard_deviation : ndarray, see dtype parameter above.\n If `out` is None, return a new array containing the standard\n deviation, otherwise return a reference to the output array. If\n ddof is >= the number of non-NaN elements in a slice or the slice\n contains only NaNs, then the result for that slice is NaN.\n\n See Also\n --------\n var, mean, std\n nanvar, nanmean\n :ref:`ufuncs-output-type`\n\n Notes\n -----\n The standard deviation is the square root of the average of the squared\n deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.\n\n The average squared deviation is normally calculated as\n ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is\n specified, the divisor ``N - ddof`` is used instead. In standard\n statistical practice, ``ddof=1`` provides an unbiased estimator of the\n variance of the infinite population. ``ddof=0`` provides a maximum\n likelihood estimate of the variance for normally distributed variables.\n The standard deviation computed in this function is the square root of\n the estimated variance, so even with ``ddof=1``, it will not be an\n unbiased estimate of the standard deviation per se.\n\n Note that, for complex numbers, `std` takes the absolute value before\n squaring, so that the result is always real and nonnegative.\n\n For floating-point input, the *std* is computed using the same\n precision the input has. Depending on the input data, this can cause\n the results to be inaccurate, especially for float32 (see example\n below). Specifying a higher-accuracy accumulator using the `dtype`\n keyword can alleviate this issue.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, np.nan], [3, 4]])\n >>> np.nanstd(a)\n 1.247219128924647\n >>> np.nanstd(a, axis=0)\n array([1., 0.])\n >>> np.nanstd(a, axis=1)\n array([0., 0.5]) # may vary\n\n """\n var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims, where=where, mean=mean,\n correction=correction)\n if isinstance(var, np.ndarray):\n std = np.sqrt(var, out=var)\n elif hasattr(var, 'dtype'):\n std = var.dtype.type(np.sqrt(var))\n else:\n std = np.sqrt(var)\n return std\n | .venv\Lib\site-packages\numpy\lib\_nanfunctions_impl.py | _nanfunctions_impl.py | Python | 73,973 | 0.75 | 0.130435 | 0.037725 | awesome-app | 739 | 2023-08-04T15:47:25.957139 | GPL-3.0 | false | 5ff24b874321f6631cbba397fa596408 |
from numpy._core.fromnumeric import (\n amax,\n amin,\n argmax,\n argmin,\n cumprod,\n cumsum,\n mean,\n prod,\n std,\n sum,\n var,\n)\nfrom numpy.lib._function_base_impl import (\n median,\n percentile,\n quantile,\n)\n\n__all__ = [\n "nansum",\n "nanmax",\n "nanmin",\n "nanargmax",\n "nanargmin",\n "nanmean",\n "nanmedian",\n "nanpercentile",\n "nanvar",\n "nanstd",\n "nanprod",\n "nancumsum",\n "nancumprod",\n "nanquantile",\n]\n\n# NOTE: In reality these functions are not aliases but distinct functions\n# with identical signatures.\nnanmin = amin\nnanmax = amax\nnanargmin = argmin\nnanargmax = argmax\nnansum = sum\nnanprod = prod\nnancumsum = cumsum\nnancumprod = cumprod\nnanmean = mean\nnanvar = var\nnanstd = std\nnanmedian = median\nnanpercentile = percentile\nnanquantile = quantile\n | .venv\Lib\site-packages\numpy\lib\_nanfunctions_impl.pyi | _nanfunctions_impl.pyi | Other | 885 | 0.95 | 0 | 0.04 | python-kit | 554 | 2025-07-02T08:09:12.958949 | MIT | false | 9d08f654b951a1eb3ce130a4757b63d4 |
"""\nIO related functions.\n"""\nimport contextlib\nimport functools\nimport itertools\nimport operator\nimport os\nimport pickle\nimport re\nimport warnings\nimport weakref\nfrom collections.abc import Mapping\nfrom operator import itemgetter\n\nimport numpy as np\nfrom numpy._core import overrides\nfrom numpy._core._multiarray_umath import _load_from_filelike\nfrom numpy._core.multiarray import packbits, unpackbits\nfrom numpy._core.overrides import finalize_array_function_like, set_module\nfrom numpy._utils import asbytes, asunicode\n\nfrom . import format\nfrom ._datasource import DataSource # noqa: F401\nfrom ._format_impl import _MAX_HEADER_SIZE\nfrom ._iotools import (\n ConversionWarning,\n ConverterError,\n ConverterLockError,\n LineSplitter,\n NameValidator,\n StringConverter,\n _decode_line,\n _is_string_like,\n easy_dtype,\n flatten_dtype,\n has_nested_fields,\n)\n\n__all__ = [\n 'savetxt', 'loadtxt', 'genfromtxt', 'load', 'save', 'savez',\n 'savez_compressed', 'packbits', 'unpackbits', 'fromregex'\n ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\nclass BagObj:\n """\n BagObj(obj)\n\n Convert attribute look-ups to getitems on the object passed in.\n\n Parameters\n ----------\n obj : class instance\n Object on which attribute look-up is performed.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib._npyio_impl import BagObj as BO\n >>> class BagDemo:\n ... def __getitem__(self, key): # An instance of BagObj(BagDemo)\n ... # will call this method when any\n ... # attribute look-up is required\n ... result = "Doesn't matter what you want, "\n ... return result + "you're gonna get this"\n ...\n >>> demo_obj = BagDemo()\n >>> bagobj = BO(demo_obj)\n >>> bagobj.hello_there\n "Doesn't matter what you want, you're gonna get this"\n >>> bagobj.I_can_be_anything\n "Doesn't matter what you want, you're gonna get this"\n\n """\n\n def __init__(self, obj):\n # Use weakref to make NpzFile objects collectable by refcount\n self._obj = weakref.proxy(obj)\n\n def __getattribute__(self, key):\n try:\n return object.__getattribute__(self, '_obj')[key]\n except KeyError:\n raise AttributeError(key) from None\n\n def __dir__(self):\n """\n Enables dir(bagobj) to list the files in an NpzFile.\n\n This also enables tab-completion in an interpreter or IPython.\n """\n return list(object.__getattribute__(self, '_obj').keys())\n\n\ndef zipfile_factory(file, *args, **kwargs):\n """\n Create a ZipFile.\n\n Allows for Zip64, and the `file` argument can accept file, str, or\n pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile\n constructor.\n """\n if not hasattr(file, 'read'):\n file = os.fspath(file)\n import zipfile\n kwargs['allowZip64'] = True\n return zipfile.ZipFile(file, *args, **kwargs)\n\n\n@set_module('numpy.lib.npyio')\nclass NpzFile(Mapping):\n """\n NpzFile(fid)\n\n A dictionary-like object with lazy-loading of files in the zipped\n archive provided on construction.\n\n `NpzFile` is used to load files in the NumPy ``.npz`` data archive\n format. It assumes that files in the archive have a ``.npy`` extension,\n other files are ignored.\n\n The arrays and file strings are lazily loaded on either\n getitem access using ``obj['key']`` or attribute lookup using\n ``obj.f.key``. A list of all files (without ``.npy`` extensions) can\n be obtained with ``obj.files`` and the ZipFile object itself using\n ``obj.zip``.\n\n Attributes\n ----------\n files : list of str\n List of all files in the archive with a ``.npy`` extension.\n zip : ZipFile instance\n The ZipFile object initialized with the zipped archive.\n f : BagObj instance\n An object on which attribute can be performed as an alternative\n to getitem access on the `NpzFile` instance itself.\n allow_pickle : bool, optional\n Allow loading pickled data. Default: False\n pickle_kwargs : dict, optional\n Additional keyword arguments to pass on to pickle.load.\n These are only useful when loading object arrays saved on\n Python 2.\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n This option is ignored when `allow_pickle` is passed. In that case\n the file is by definition trusted and the limit is unnecessary.\n\n Parameters\n ----------\n fid : file, str, or pathlib.Path\n The zipped archive to open. This is either a file-like object\n or a string containing the path to the archive.\n own_fid : bool, optional\n Whether NpzFile should close the file handle.\n Requires that `fid` is a file-like object.\n\n Examples\n --------\n >>> import numpy as np\n >>> from tempfile import TemporaryFile\n >>> outfile = TemporaryFile()\n >>> x = np.arange(10)\n >>> y = np.sin(x)\n >>> np.savez(outfile, x=x, y=y)\n >>> _ = outfile.seek(0)\n\n >>> npz = np.load(outfile)\n >>> isinstance(npz, np.lib.npyio.NpzFile)\n True\n >>> npz\n NpzFile 'object' with keys: x, y\n >>> sorted(npz.files)\n ['x', 'y']\n >>> npz['x'] # getitem access\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> npz.f.x # attribute lookup\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n """\n # Make __exit__ safe if zipfile_factory raises an exception\n zip = None\n fid = None\n _MAX_REPR_ARRAY_COUNT = 5\n\n def __init__(self, fid, own_fid=False, allow_pickle=False,\n pickle_kwargs=None, *,\n max_header_size=_MAX_HEADER_SIZE):\n # Import is postponed to here since zipfile depends on gzip, an\n # optional component of the so-called standard library.\n _zip = zipfile_factory(fid)\n _files = _zip.namelist()\n self.files = [name.removesuffix(".npy") for name in _files]\n self._files = dict(zip(self.files, _files))\n self._files.update(zip(_files, _files))\n self.allow_pickle = allow_pickle\n self.max_header_size = max_header_size\n self.pickle_kwargs = pickle_kwargs\n self.zip = _zip\n self.f = BagObj(self)\n if own_fid:\n self.fid = fid\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n self.close()\n\n def close(self):\n """\n Close the file.\n\n """\n if self.zip is not None:\n self.zip.close()\n self.zip = None\n if self.fid is not None:\n self.fid.close()\n self.fid = None\n self.f = None # break reference cycle\n\n def __del__(self):\n self.close()\n\n # Implement the Mapping ABC\n def __iter__(self):\n return iter(self.files)\n\n def __len__(self):\n return len(self.files)\n\n def __getitem__(self, key):\n try:\n key = self._files[key]\n except KeyError:\n raise KeyError(f"{key} is not a file in the archive") from None\n else:\n with self.zip.open(key) as bytes:\n magic = bytes.read(len(format.MAGIC_PREFIX))\n bytes.seek(0)\n if magic == format.MAGIC_PREFIX:\n # FIXME: This seems like it will copy strings around\n # more than is strictly necessary. The zipfile\n # will read the string and then\n # the format.read_array will copy the string\n # to another place in memory.\n # It would be better if the zipfile could read\n # (or at least uncompress) the data\n # directly into the array memory.\n return format.read_array(\n bytes,\n allow_pickle=self.allow_pickle,\n pickle_kwargs=self.pickle_kwargs,\n max_header_size=self.max_header_size\n )\n else:\n return bytes.read(key)\n\n def __contains__(self, key):\n return (key in self._files)\n\n def __repr__(self):\n # Get filename or default to `object`\n if isinstance(self.fid, str):\n filename = self.fid\n else:\n filename = getattr(self.fid, "name", "object")\n\n # Get the name of arrays\n array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])\n if len(self.files) > self._MAX_REPR_ARRAY_COUNT:\n array_names += "..."\n return f"NpzFile {filename!r} with keys: {array_names}"\n\n # Work around problems with the docstrings in the Mapping methods\n # They contain a `->`, which confuses the type annotation interpretations\n # of sphinx-docs. See gh-25964\n\n def get(self, key, default=None, /):\n """\n D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None.\n """\n return Mapping.get(self, key, default)\n\n def items(self):\n """\n D.items() returns a set-like object providing a view on the items\n """\n return Mapping.items(self)\n\n def keys(self):\n """\n D.keys() returns a set-like object providing a view on the keys\n """\n return Mapping.keys(self)\n\n def values(self):\n """\n D.values() returns a set-like object providing a view on the values\n """\n return Mapping.values(self)\n\n\n@set_module('numpy')\ndef load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,\n encoding='ASCII', *, max_header_size=_MAX_HEADER_SIZE):\n """\n Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.\n\n .. warning:: Loading files that contain object arrays uses the ``pickle``\n module, which is not secure against erroneous or maliciously\n constructed data. Consider passing ``allow_pickle=False`` to\n load data that is known not to contain object arrays for the\n safer handling of untrusted sources.\n\n Parameters\n ----------\n file : file-like object, string, or pathlib.Path\n The file to read. File-like objects must support the\n ``seek()`` and ``read()`` methods and must always\n be opened in binary mode. Pickled files require that the\n file-like object support the ``readline()`` method as well.\n mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional\n If not None, then memory-map the file, using the given mode (see\n `numpy.memmap` for a detailed description of the modes). A\n memory-mapped array is kept on disk. However, it can be accessed\n and sliced like any ndarray. Memory mapping is especially useful\n for accessing small fragments of large files without reading the\n entire file into memory.\n allow_pickle : bool, optional\n Allow loading pickled object arrays stored in npy files. Reasons for\n disallowing pickles include security, as loading pickled data can\n execute arbitrary code. If pickles are disallowed, loading object\n arrays will fail. Default: False\n fix_imports : bool, optional\n Only useful when loading Python 2 generated pickled files,\n which includes npy/npz files containing object arrays. If `fix_imports`\n is True, pickle will try to map the old Python 2 names to the new names\n used in Python 3.\n encoding : str, optional\n What encoding to use when reading Python 2 strings. Only useful when\n loading Python 2 generated pickled files, which includes\n npy/npz files containing object arrays. Values other than 'latin1',\n 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical\n data. Default: 'ASCII'\n max_header_size : int, optional\n Maximum allowed size of the header. Large headers may not be safe\n to load securely and thus require explicitly passing a larger value.\n See :py:func:`ast.literal_eval()` for details.\n This option is ignored when `allow_pickle` is passed. In that case\n the file is by definition trusted and the limit is unnecessary.\n\n Returns\n -------\n result : array, tuple, dict, etc.\n Data stored in the file. For ``.npz`` files, the returned instance\n of NpzFile class must be closed to avoid leaking file descriptors.\n\n Raises\n ------\n OSError\n If the input file does not exist or cannot be read.\n UnpicklingError\n If ``allow_pickle=True``, but the file cannot be loaded as a pickle.\n ValueError\n The file contains an object array, but ``allow_pickle=False`` given.\n EOFError\n When calling ``np.load`` multiple times on the same file handle,\n if all data has already been read\n\n See Also\n --------\n save, savez, savez_compressed, loadtxt\n memmap : Create a memory-map to an array stored in a file on disk.\n lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.\n\n Notes\n -----\n - If the file contains pickle data, then whatever object is stored\n in the pickle is returned.\n - If the file is a ``.npy`` file, then a single array is returned.\n - If the file is a ``.npz`` file, then a dictionary-like object is\n returned, containing ``{filename: array}`` key-value pairs, one for\n each file in the archive.\n - If the file is a ``.npz`` file, the returned value supports the\n context manager protocol in a similar fashion to the open function::\n\n with load('foo.npz') as data:\n a = data['a']\n\n The underlying file descriptor is closed when exiting the 'with'\n block.\n\n Examples\n --------\n >>> import numpy as np\n\n Store data to disk, and load it again:\n\n >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))\n >>> np.load('/tmp/123.npy')\n array([[1, 2, 3],\n [4, 5, 6]])\n\n Store compressed data to disk, and load it again:\n\n >>> a=np.array([[1, 2, 3], [4, 5, 6]])\n >>> b=np.array([1, 2])\n >>> np.savez('/tmp/123.npz', a=a, b=b)\n >>> data = np.load('/tmp/123.npz')\n >>> data['a']\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> data['b']\n array([1, 2])\n >>> data.close()\n\n Mem-map the stored array, and then access the second row\n directly from disk:\n\n >>> X = np.load('/tmp/123.npy', mmap_mode='r')\n >>> X[1, :]\n memmap([4, 5, 6])\n\n """\n if encoding not in ('ASCII', 'latin1', 'bytes'):\n # The 'encoding' value for pickle also affects what encoding\n # the serialized binary data of NumPy arrays is loaded\n # in. Pickle does not pass on the encoding information to\n # NumPy. The unpickling code in numpy._core.multiarray is\n # written to assume that unicode data appearing where binary\n # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.\n #\n # Other encoding values can corrupt binary data, and we\n # purposefully disallow them. For the same reason, the errors=\n # argument is not exposed, as values other than 'strict'\n # result can similarly silently corrupt numerical data.\n raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")\n\n pickle_kwargs = {'encoding': encoding, 'fix_imports': fix_imports}\n\n with contextlib.ExitStack() as stack:\n if hasattr(file, 'read'):\n fid = file\n own_fid = False\n else:\n fid = stack.enter_context(open(os.fspath(file), "rb"))\n own_fid = True\n\n # Code to distinguish from NumPy binary files and pickles.\n _ZIP_PREFIX = b'PK\x03\x04'\n _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this\n N = len(format.MAGIC_PREFIX)\n magic = fid.read(N)\n if not magic:\n raise EOFError("No data left in file")\n # If the file size is less than N, we need to make sure not\n # to seek past the beginning of the file\n fid.seek(-min(N, len(magic)), 1) # back-up\n if magic.startswith((_ZIP_PREFIX, _ZIP_SUFFIX)):\n # zip-file (assume .npz)\n # Potentially transfer file ownership to NpzFile\n stack.pop_all()\n ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,\n pickle_kwargs=pickle_kwargs,\n max_header_size=max_header_size)\n return ret\n elif magic == format.MAGIC_PREFIX:\n # .npy file\n if mmap_mode:\n if allow_pickle:\n max_header_size = 2**64\n return format.open_memmap(file, mode=mmap_mode,\n max_header_size=max_header_size)\n else:\n return format.read_array(fid, allow_pickle=allow_pickle,\n pickle_kwargs=pickle_kwargs,\n max_header_size=max_header_size)\n else:\n # Try a pickle\n if not allow_pickle:\n raise ValueError(\n "This file contains pickled (object) data. If you trust "\n "the file you can load it unsafely using the "\n "`allow_pickle=` keyword argument or `pickle.load()`.")\n try:\n return pickle.load(fid, **pickle_kwargs)\n except Exception as e:\n raise pickle.UnpicklingError(\n f"Failed to interpret file {file!r} as a pickle") from e\n\n\ndef _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):\n return (arr,)\n\n\n@array_function_dispatch(_save_dispatcher)\ndef save(file, arr, allow_pickle=True, fix_imports=np._NoValue):\n """\n Save an array to a binary file in NumPy ``.npy`` format.\n\n Parameters\n ----------\n file : file, str, or pathlib.Path\n File or filename to which the data is saved. If file is a file-object,\n then the filename is unchanged. If file is a string or Path,\n a ``.npy`` extension will be appended to the filename if it does not\n already have one.\n arr : array_like\n Array data to be saved.\n allow_pickle : bool, optional\n Allow saving object arrays using Python pickles. Reasons for\n disallowing pickles include security (loading pickled data can execute\n arbitrary code) and portability (pickled objects may not be loadable\n on different Python installations, for example if the stored objects\n require libraries that are not available, and not all pickled data is\n compatible between different versions of Python).\n Default: True\n fix_imports : bool, optional\n The `fix_imports` flag is deprecated and has no effect.\n\n .. deprecated:: 2.1\n This flag is ignored since NumPy 1.17 and was only needed to\n support loading in Python 2 some files written in Python 3.\n\n See Also\n --------\n savez : Save several arrays into a ``.npz`` archive\n savetxt, load\n\n Notes\n -----\n For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.\n\n Any data saved to the file is appended to the end of the file.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from tempfile import TemporaryFile\n >>> outfile = TemporaryFile()\n\n >>> x = np.arange(10)\n >>> np.save(outfile, x)\n\n >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file\n >>> np.load(outfile)\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n\n >>> with open('test.npy', 'wb') as f:\n ... np.save(f, np.array([1, 2]))\n ... np.save(f, np.array([1, 3]))\n >>> with open('test.npy', 'rb') as f:\n ... a = np.load(f)\n ... b = np.load(f)\n >>> print(a, b)\n # [1 2] [1 3]\n """\n if fix_imports is not np._NoValue:\n # Deprecated 2024-05-16, NumPy 2.1\n warnings.warn(\n "The 'fix_imports' flag is deprecated and has no effect. "\n "(Deprecated in NumPy 2.1)",\n DeprecationWarning, stacklevel=2)\n if hasattr(file, 'write'):\n file_ctx = contextlib.nullcontext(file)\n else:\n file = os.fspath(file)\n if not file.endswith('.npy'):\n file = file + '.npy'\n file_ctx = open(file, "wb")\n\n with file_ctx as fid:\n arr = np.asanyarray(arr)\n format.write_array(fid, arr, allow_pickle=allow_pickle,\n pickle_kwargs={'fix_imports': fix_imports})\n\n\ndef _savez_dispatcher(file, *args, allow_pickle=True, **kwds):\n yield from args\n yield from kwds.values()\n\n\n@array_function_dispatch(_savez_dispatcher)\ndef savez(file, *args, allow_pickle=True, **kwds):\n """Save several arrays into a single file in uncompressed ``.npz`` format.\n\n Provide arrays as keyword arguments to store them under the\n corresponding name in the output file: ``savez(fn, x=x, y=y)``.\n\n If arrays are specified as positional arguments, i.e., ``savez(fn,\n x, y)``, their names will be `arr_0`, `arr_1`, etc.\n\n Parameters\n ----------\n file : file, str, or pathlib.Path\n Either the filename (string) or an open file (file-like object)\n where the data will be saved. If file is a string or a Path, the\n ``.npz`` extension will be appended to the filename if it is not\n already there.\n args : Arguments, optional\n Arrays to save to the file. Please use keyword arguments (see\n `kwds` below) to assign names to arrays. Arrays specified as\n args will be named "arr_0", "arr_1", and so on.\n allow_pickle : bool, optional\n Allow saving object arrays using Python pickles. Reasons for\n disallowing pickles include security (loading pickled data can execute\n arbitrary code) and portability (pickled objects may not be loadable\n on different Python installations, for example if the stored objects\n require libraries that are not available, and not all pickled data is\n compatible between different versions of Python).\n Default: True\n kwds : Keyword arguments, optional\n Arrays to save to the file. Each array will be saved to the\n output file with its corresponding keyword name.\n\n Returns\n -------\n None\n\n See Also\n --------\n save : Save a single array to a binary file in NumPy format.\n savetxt : Save an array to a file as plain text.\n savez_compressed : Save several arrays into a compressed ``.npz`` archive\n\n Notes\n -----\n The ``.npz`` file format is a zipped archive of files named after the\n variables they contain. The archive is not compressed and each file\n in the archive contains one variable in ``.npy`` format. For a\n description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.\n\n When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`\n object is returned. This is a dictionary-like object which can be queried\n for its list of arrays (with the ``.files`` attribute), and for the arrays\n themselves.\n\n Keys passed in `kwds` are used as filenames inside the ZIP archive.\n Therefore, keys should be valid filenames; e.g., avoid keys that begin with\n ``/`` or contain ``.``.\n\n When naming variables with keyword arguments, it is not possible to name a\n variable ``file``, as this would cause the ``file`` argument to be defined\n twice in the call to ``savez``.\n\n Examples\n --------\n >>> import numpy as np\n >>> from tempfile import TemporaryFile\n >>> outfile = TemporaryFile()\n >>> x = np.arange(10)\n >>> y = np.sin(x)\n\n Using `savez` with \\*args, the arrays are saved with default names.\n\n >>> np.savez(outfile, x, y)\n >>> _ = outfile.seek(0) # Only needed to simulate closing & reopening file\n >>> npzfile = np.load(outfile)\n >>> npzfile.files\n ['arr_0', 'arr_1']\n >>> npzfile['arr_0']\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n Using `savez` with \\**kwds, the arrays are saved with the keyword names.\n\n >>> outfile = TemporaryFile()\n >>> np.savez(outfile, x=x, y=y)\n >>> _ = outfile.seek(0)\n >>> npzfile = np.load(outfile)\n >>> sorted(npzfile.files)\n ['x', 'y']\n >>> npzfile['x']\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n\n """\n _savez(file, args, kwds, False, allow_pickle=allow_pickle)\n\n\ndef _savez_compressed_dispatcher(file, *args, allow_pickle=True, **kwds):\n yield from args\n yield from kwds.values()\n\n\n@array_function_dispatch(_savez_compressed_dispatcher)\ndef savez_compressed(file, *args, allow_pickle=True, **kwds):\n """\n Save several arrays into a single file in compressed ``.npz`` format.\n\n Provide arrays as keyword arguments to store them under the\n corresponding name in the output file: ``savez_compressed(fn, x=x, y=y)``.\n\n If arrays are specified as positional arguments, i.e.,\n ``savez_compressed(fn, x, y)``, their names will be `arr_0`, `arr_1`, etc.\n\n Parameters\n ----------\n file : file, str, or pathlib.Path\n Either the filename (string) or an open file (file-like object)\n where the data will be saved. If file is a string or a Path, the\n ``.npz`` extension will be appended to the filename if it is not\n already there.\n args : Arguments, optional\n Arrays to save to the file. Please use keyword arguments (see\n `kwds` below) to assign names to arrays. Arrays specified as\n args will be named "arr_0", "arr_1", and so on.\n allow_pickle : bool, optional\n Allow saving object arrays using Python pickles. Reasons for\n disallowing pickles include security (loading pickled data can execute\n arbitrary code) and portability (pickled objects may not be loadable\n on different Python installations, for example if the stored objects\n require libraries that are not available, and not all pickled data is\n compatible between different versions of Python).\n Default: True\n kwds : Keyword arguments, optional\n Arrays to save to the file. Each array will be saved to the\n output file with its corresponding keyword name.\n\n Returns\n -------\n None\n\n See Also\n --------\n numpy.save : Save a single array to a binary file in NumPy format.\n numpy.savetxt : Save an array to a file as plain text.\n numpy.savez : Save several arrays into an uncompressed ``.npz`` file format\n numpy.load : Load the files created by savez_compressed.\n\n Notes\n -----\n The ``.npz`` file format is a zipped archive of files named after the\n variables they contain. The archive is compressed with\n ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable\n in ``.npy`` format. For a description of the ``.npy`` format, see\n :py:mod:`numpy.lib.format`.\n\n\n When opening the saved ``.npz`` file with `load` a `~lib.npyio.NpzFile`\n object is returned. This is a dictionary-like object which can be queried\n for its list of arrays (with the ``.files`` attribute), and for the arrays\n themselves.\n\n Examples\n --------\n >>> import numpy as np\n >>> test_array = np.random.rand(3, 2)\n >>> test_vector = np.random.rand(4)\n >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)\n >>> loaded = np.load('/tmp/123.npz')\n >>> print(np.array_equal(test_array, loaded['a']))\n True\n >>> print(np.array_equal(test_vector, loaded['b']))\n True\n\n """\n _savez(file, args, kwds, True, allow_pickle=allow_pickle)\n\n\ndef _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):\n # Import is postponed to here since zipfile depends on gzip, an optional\n # component of the so-called standard library.\n import zipfile\n\n if not hasattr(file, 'write'):\n file = os.fspath(file)\n if not file.endswith('.npz'):\n file = file + '.npz'\n\n namedict = kwds\n for i, val in enumerate(args):\n key = 'arr_%d' % i\n if key in namedict.keys():\n raise ValueError(\n f"Cannot use un-named variables and keyword {key}")\n namedict[key] = val\n\n if compress:\n compression = zipfile.ZIP_DEFLATED\n else:\n compression = zipfile.ZIP_STORED\n\n zipf = zipfile_factory(file, mode="w", compression=compression)\n try:\n for key, val in namedict.items():\n fname = key + '.npy'\n val = np.asanyarray(val)\n # always force zip64, gh-10776\n with zipf.open(fname, 'w', force_zip64=True) as fid:\n format.write_array(fid, val,\n allow_pickle=allow_pickle,\n pickle_kwargs=pickle_kwargs)\n finally:\n zipf.close()\n\n\ndef _ensure_ndmin_ndarray_check_param(ndmin):\n """Just checks if the param ndmin is supported on\n _ensure_ndmin_ndarray. It is intended to be used as\n verification before running anything expensive.\n e.g. loadtxt, genfromtxt\n """\n # Check correctness of the values of `ndmin`\n if ndmin not in [0, 1, 2]:\n raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")\n\ndef _ensure_ndmin_ndarray(a, *, ndmin: int):\n """This is a helper function of loadtxt and genfromtxt to ensure\n proper minimum dimension as requested\n\n ndim : int. Supported values 1, 2, 3\n ^^ whenever this changes, keep in sync with\n _ensure_ndmin_ndarray_check_param\n """\n # Verify that the array has at least dimensions `ndmin`.\n # Tweak the size and shape of the arrays - remove extraneous dimensions\n if a.ndim > ndmin:\n a = np.squeeze(a)\n # and ensure we have the minimum number of dimensions asked for\n # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0\n if a.ndim < ndmin:\n if ndmin == 1:\n a = np.atleast_1d(a)\n elif ndmin == 2:\n a = np.atleast_2d(a).T\n\n return a\n\n\n# amount of lines loadtxt reads in one chunk, can be overridden for testing\n_loadtxt_chunksize = 50000\n\n\ndef _check_nonneg_int(value, name="argument"):\n try:\n operator.index(value)\n except TypeError:\n raise TypeError(f"{name} must be an integer") from None\n if value < 0:\n raise ValueError(f"{name} must be nonnegative")\n\n\ndef _preprocess_comments(iterable, comments, encoding):\n """\n Generator that consumes a line iterated iterable and strips out the\n multiple (or multi-character) comments from lines.\n This is a pre-processing step to achieve feature parity with loadtxt\n (we assume that this feature is a nieche feature).\n """\n for line in iterable:\n if isinstance(line, bytes):\n # Need to handle conversion here, or the splitting would fail\n line = line.decode(encoding)\n\n for c in comments:\n line = line.split(c, 1)[0]\n\n yield line\n\n\n# The number of rows we read in one go if confronted with a parametric dtype\n_loadtxt_chunksize = 50000\n\n\ndef _read(fname, *, delimiter=',', comment='#', quote='"',\n imaginary_unit='j', usecols=None, skiplines=0,\n max_rows=None, converters=None, ndmin=None, unpack=False,\n dtype=np.float64, encoding=None):\n r"""\n Read a NumPy array from a text file.\n This is a helper function for loadtxt.\n\n Parameters\n ----------\n fname : file, str, or pathlib.Path\n The filename or the file to be read.\n delimiter : str, optional\n Field delimiter of the fields in line of the file.\n Default is a comma, ','. If None any sequence of whitespace is\n considered a delimiter.\n comment : str or sequence of str or None, optional\n Character that begins a comment. All text from the comment\n character to the end of the line is ignored.\n Multiple comments or multiple-character comment strings are supported,\n but may be slower and `quote` must be empty if used.\n Use None to disable all use of comments.\n quote : str or None, optional\n Character that is used to quote string fields. Default is '"'\n (a double quote). Use None to disable quote support.\n imaginary_unit : str, optional\n Character that represent the imaginary unit `sqrt(-1)`.\n Default is 'j'.\n usecols : array_like, optional\n A one-dimensional array of integer column numbers. These are the\n columns from the file to be included in the array. If this value\n is not given, all the columns are used.\n skiplines : int, optional\n Number of lines to skip before interpreting the data in the file.\n max_rows : int, optional\n Maximum number of rows of data to read. Default is to read the\n entire file.\n converters : dict or callable, optional\n A function to parse all columns strings into the desired value, or\n a dictionary mapping column number to a parser function.\n E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.\n Converters can also be used to provide a default value for missing\n data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will\n convert empty fields to 0.\n Default: None\n ndmin : int, optional\n Minimum dimension of the array returned.\n Allowed values are 0, 1 or 2. Default is 0.\n unpack : bool, optional\n If True, the returned array is transposed, so that arguments may be\n unpacked using ``x, y, z = read(...)``. When used with a structured\n data-type, arrays are returned for each field. Default is False.\n dtype : numpy data type\n A NumPy dtype instance, can be a structured dtype to map to the\n columns of the file.\n encoding : str, optional\n Encoding used to decode the inputfile. The special value 'bytes'\n (the default) enables backwards-compatible behavior for `converters`,\n ensuring that inputs to the converter functions are encoded\n bytes objects. The special value 'bytes' has no additional effect if\n ``converters=None``. If encoding is ``'bytes'`` or ``None``, the\n default system encoding is used.\n\n Returns\n -------\n ndarray\n NumPy array.\n """\n # Handle special 'bytes' keyword for encoding\n byte_converters = False\n if encoding == 'bytes':\n encoding = None\n byte_converters = True\n\n if dtype is None:\n raise TypeError("a dtype must be provided.")\n dtype = np.dtype(dtype)\n\n read_dtype_via_object_chunks = None\n if dtype.kind in 'SUM' and dtype in {\n np.dtype("S0"), np.dtype("U0"), np.dtype("M8"), np.dtype("m8")}:\n # This is a legacy "flexible" dtype. We do not truly support\n # parametric dtypes currently (no dtype discovery step in the core),\n # but have to support these for backward compatibility.\n read_dtype_via_object_chunks = dtype\n dtype = np.dtype(object)\n\n if usecols is not None:\n # Allow usecols to be a single int or a sequence of ints, the C-code\n # handles the rest\n try:\n usecols = list(usecols)\n except TypeError:\n usecols = [usecols]\n\n _ensure_ndmin_ndarray_check_param(ndmin)\n\n if comment is None:\n comments = None\n else:\n # assume comments are a sequence of strings\n if "" in comment:\n raise ValueError(\n "comments cannot be an empty string. Use comments=None to "\n "disable comments."\n )\n comments = tuple(comment)\n comment = None\n if len(comments) == 0:\n comments = None # No comments at all\n elif len(comments) == 1:\n # If there is only one comment, and that comment has one character,\n # the normal parsing can deal with it just fine.\n if isinstance(comments[0], str) and len(comments[0]) == 1:\n comment = comments[0]\n comments = None\n # Input validation if there are multiple comment characters\n elif delimiter in comments:\n raise TypeError(\n f"Comment characters '{comments}' cannot include the "\n f"delimiter '{delimiter}'"\n )\n\n # comment is now either a 1 or 0 character string or a tuple:\n if comments is not None:\n # Note: An earlier version support two character comments (and could\n # have been extended to multiple characters, we assume this is\n # rare enough to not optimize for.\n if quote is not None:\n raise ValueError(\n "when multiple comments or a multi-character comment is "\n "given, quotes are not supported. In this case quotechar "\n "must be set to None.")\n\n if len(imaginary_unit) != 1:\n raise ValueError('len(imaginary_unit) must be 1.')\n\n _check_nonneg_int(skiplines)\n if max_rows is not None:\n _check_nonneg_int(max_rows)\n else:\n # Passing -1 to the C code means "read the entire file".\n max_rows = -1\n\n fh_closing_ctx = contextlib.nullcontext()\n filelike = False\n try:\n if isinstance(fname, os.PathLike):\n fname = os.fspath(fname)\n if isinstance(fname, str):\n fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)\n if encoding is None:\n encoding = getattr(fh, 'encoding', 'latin1')\n\n fh_closing_ctx = contextlib.closing(fh)\n data = fh\n filelike = True\n else:\n if encoding is None:\n encoding = getattr(fname, 'encoding', 'latin1')\n data = iter(fname)\n except TypeError as e:\n raise ValueError(\n f"fname must be a string, filehandle, list of strings,\n"\n f"or generator. Got {type(fname)} instead.") from e\n\n with fh_closing_ctx:\n if comments is not None:\n if filelike:\n data = iter(data)\n filelike = False\n data = _preprocess_comments(data, comments, encoding)\n\n if read_dtype_via_object_chunks is None:\n arr = _load_from_filelike(\n data, delimiter=delimiter, comment=comment, quote=quote,\n imaginary_unit=imaginary_unit,\n usecols=usecols, skiplines=skiplines, max_rows=max_rows,\n converters=converters, dtype=dtype,\n encoding=encoding, filelike=filelike,\n byte_converters=byte_converters)\n\n else:\n # This branch reads the file into chunks of object arrays and then\n # casts them to the desired actual dtype. This ensures correct\n # string-length and datetime-unit discovery (like `arr.astype()`).\n # Due to chunking, certain error reports are less clear, currently.\n if filelike:\n data = iter(data) # cannot chunk when reading from file\n filelike = False\n\n c_byte_converters = False\n if read_dtype_via_object_chunks == "S":\n c_byte_converters = True # Use latin1 rather than ascii\n\n chunks = []\n while max_rows != 0:\n if max_rows < 0:\n chunk_size = _loadtxt_chunksize\n else:\n chunk_size = min(_loadtxt_chunksize, max_rows)\n\n next_arr = _load_from_filelike(\n data, delimiter=delimiter, comment=comment, quote=quote,\n imaginary_unit=imaginary_unit,\n usecols=usecols, skiplines=skiplines, max_rows=chunk_size,\n converters=converters, dtype=dtype,\n encoding=encoding, filelike=filelike,\n byte_converters=byte_converters,\n c_byte_converters=c_byte_converters)\n # Cast here already. We hope that this is better even for\n # large files because the storage is more compact. It could\n # be adapted (in principle the concatenate could cast).\n chunks.append(next_arr.astype(read_dtype_via_object_chunks))\n\n skiplines = 0 # Only have to skip for first chunk\n if max_rows >= 0:\n max_rows -= chunk_size\n if len(next_arr) < chunk_size:\n # There was less data than requested, so we are done.\n break\n\n # Need at least one chunk, but if empty, the last one may have\n # the wrong shape.\n if len(chunks) > 1 and len(chunks[-1]) == 0:\n del chunks[-1]\n if len(chunks) == 1:\n arr = chunks[0]\n else:\n arr = np.concatenate(chunks, axis=0)\n\n # NOTE: ndmin works as advertised for structured dtypes, but normally\n # these would return a 1D result plus the structured dimension,\n # so ndmin=2 adds a third dimension even when no squeezing occurs.\n # A `squeeze=False` could be a better solution (pandas uses squeeze).\n arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)\n\n if arr.shape:\n if arr.shape[0] == 0:\n warnings.warn(\n f'loadtxt: input contained no data: "{fname}"',\n category=UserWarning,\n stacklevel=3\n )\n\n if unpack:\n # Unpack structured dtypes if requested:\n dt = arr.dtype\n if dt.names is not None:\n # For structured arrays, return an array for each field.\n return [arr[field] for field in dt.names]\n else:\n return arr.T\n else:\n return arr\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef loadtxt(fname, dtype=float, comments='#', delimiter=None,\n converters=None, skiprows=0, usecols=None, unpack=False,\n ndmin=0, encoding=None, max_rows=None, *, quotechar=None,\n like=None):\n r"""\n Load data from a text file.\n\n Parameters\n ----------\n fname : file, str, pathlib.Path, list of str, generator\n File, filename, list, or generator to read. If the filename\n extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note\n that generators must return bytes or strings. The strings\n in a list or produced by a generator are treated as lines.\n dtype : data-type, optional\n Data-type of the resulting array; default: float. If this is a\n structured data-type, the resulting array will be 1-dimensional, and\n each row will be interpreted as an element of the array. In this\n case, the number of columns used must match the number of fields in\n the data-type.\n comments : str or sequence of str or None, optional\n The characters or list of characters used to indicate the start of a\n comment. None implies no comments. For backwards compatibility, byte\n strings will be decoded as 'latin1'. The default is '#'.\n delimiter : str, optional\n The character used to separate the values. For backwards compatibility,\n byte strings will be decoded as 'latin1'. The default is whitespace.\n\n .. versionchanged:: 1.23.0\n Only single character delimiters are supported. Newline characters\n cannot be used as the delimiter.\n\n converters : dict or callable, optional\n Converter functions to customize value parsing. If `converters` is\n callable, the function is applied to all columns, else it must be a\n dict that maps column number to a parser function.\n See examples for further details.\n Default: None.\n\n .. versionchanged:: 1.23.0\n The ability to pass a single callable to be applied to all columns\n was added.\n\n skiprows : int, optional\n Skip the first `skiprows` lines, including comments; default: 0.\n usecols : int or sequence, optional\n Which columns to read, with 0 being the first. For example,\n ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.\n The default, None, results in all columns being read.\n unpack : bool, optional\n If True, the returned array is transposed, so that arguments may be\n unpacked using ``x, y, z = loadtxt(...)``. When used with a\n structured data-type, arrays are returned for each field.\n Default is False.\n ndmin : int, optional\n The returned array will have at least `ndmin` dimensions.\n Otherwise mono-dimensional axes will be squeezed.\n Legal values: 0 (default), 1 or 2.\n encoding : str, optional\n Encoding used to decode the inputfile. Does not apply to input streams.\n The special value 'bytes' enables backward compatibility workarounds\n that ensures you receive byte arrays as results if possible and passes\n 'latin1' encoded strings to converters. Override this value to receive\n unicode arrays and pass strings as input to converters. If set to None\n the system default is used. The default value is None.\n\n .. versionchanged:: 2.0\n Before NumPy 2, the default was ``'bytes'`` for Python 2\n compatibility. The default is now ``None``.\n\n max_rows : int, optional\n Read `max_rows` rows of content after `skiprows` lines. The default is\n to read all the rows. Note that empty rows containing no data such as\n empty lines and comment lines are not counted towards `max_rows`,\n while such lines are counted in `skiprows`.\n\n .. versionchanged:: 1.23.0\n Lines containing no data, including comment lines (e.g., lines\n starting with '#' or as specified via `comments`) are not counted\n towards `max_rows`.\n quotechar : unicode character or None, optional\n The character used to denote the start and end of a quoted item.\n Occurrences of the delimiter or comment characters are ignored within\n a quoted item. The default value is ``quotechar=None``, which means\n quoting support is disabled.\n\n If two consecutive instances of `quotechar` are found within a quoted\n field, the first is treated as an escape character. See examples.\n\n .. versionadded:: 1.23.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n Data read from the text file.\n\n See Also\n --------\n load, fromstring, fromregex\n genfromtxt : Load data with missing values handled as specified.\n scipy.io.loadmat : reads MATLAB data files\n\n Notes\n -----\n This function aims to be a fast reader for simply formatted files. The\n `genfromtxt` function provides more sophisticated handling of, e.g.,\n lines with missing values.\n\n Each row in the input text file must have the same number of values to be\n able to read all values. If all rows do not have same number of values, a\n subset of up to n columns (where n is the least number of values present\n in all rows) can be read by specifying the columns via `usecols`.\n\n The strings produced by the Python float.hex method can be used as\n input for floats.\n\n Examples\n --------\n >>> import numpy as np\n >>> from io import StringIO # StringIO behaves like a file object\n >>> c = StringIO("0 1\n2 3")\n >>> np.loadtxt(c)\n array([[0., 1.],\n [2., 3.]])\n\n >>> d = StringIO("M 21 72\nF 35 58")\n >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),\n ... 'formats': ('S1', 'i4', 'f4')})\n array([(b'M', 21, 72.), (b'F', 35, 58.)],\n dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])\n\n >>> c = StringIO("1,0,2\n3,0,4")\n >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)\n >>> x\n array([1., 3.])\n >>> y\n array([2., 4.])\n\n The `converters` argument is used to specify functions to preprocess the\n text prior to parsing. `converters` can be a dictionary that maps\n preprocessing functions to each column:\n\n >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")\n >>> conv = {\n ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0\n ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1\n ... }\n >>> np.loadtxt(s, delimiter=",", converters=conv)\n array([[1., 3.],\n [3., 5.]])\n\n `converters` can be a callable instead of a dictionary, in which case it\n is applied to all columns:\n\n >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")\n >>> import functools\n >>> conv = functools.partial(int, base=16)\n >>> np.loadtxt(s, converters=conv)\n array([[222., 173.],\n [192., 222.]])\n\n This example shows how `converters` can be used to convert a field\n with a trailing minus sign into a negative number.\n\n >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")\n >>> def conv(fld):\n ... return -float(fld[:-1]) if fld.endswith("-") else float(fld)\n ...\n >>> np.loadtxt(s, converters=conv)\n array([[ 10.01, -31.25],\n [ 19.22, 64.31],\n [-17.57, 63.94]])\n\n Using a callable as the converter can be particularly useful for handling\n values with different formatting, e.g. floats with underscores:\n\n >>> s = StringIO("1 2.7 100_000")\n >>> np.loadtxt(s, converters=float)\n array([1.e+00, 2.7e+00, 1.e+05])\n\n This idea can be extended to automatically handle values specified in\n many different formats, such as hex values:\n\n >>> def conv(val):\n ... try:\n ... return float(val)\n ... except ValueError:\n ... return float.fromhex(val)\n >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")\n >>> np.loadtxt(s, delimiter=",", converters=conv)\n array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])\n\n Or a format where the ``-`` sign comes after the number:\n\n >>> s = StringIO("10.01 31.25-\n19.22 64.31\n17.57- 63.94")\n >>> conv = lambda x: -float(x[:-1]) if x.endswith("-") else float(x)\n >>> np.loadtxt(s, converters=conv)\n array([[ 10.01, -31.25],\n [ 19.22, 64.31],\n [-17.57, 63.94]])\n\n Support for quoted fields is enabled with the `quotechar` parameter.\n Comment and delimiter characters are ignored when they appear within a\n quoted item delineated by `quotechar`:\n\n >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')\n >>> dtype = np.dtype([("label", "U12"), ("value", float)])\n >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')\n array([('alpha, #42', 10.), ('beta, #64', 2.)],\n dtype=[('label', '<U12'), ('value', '<f8')])\n\n Quoted fields can be separated by multiple whitespace characters:\n\n >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')\n >>> dtype = np.dtype([("label", "U12"), ("value", float)])\n >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')\n array([('alpha, #42', 10.), ('beta, #64', 2.)],\n dtype=[('label', '<U12'), ('value', '<f8')])\n\n Two consecutive quote characters within a quoted field are treated as a\n single escaped character:\n\n >>> s = StringIO('"Hello, my name is ""Monty""!"')\n >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')\n array('Hello, my name is "Monty"!', dtype='<U26')\n\n Read subset of columns when all rows do not contain equal number of values:\n\n >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")\n >>> np.loadtxt(d, usecols=(0, 1))\n array([[ 1., 2.],\n [ 2., 4.],\n [ 3., 9.],\n [ 4., 16.]])\n\n """\n\n if like is not None:\n return _loadtxt_with_like(\n like, fname, dtype=dtype, comments=comments, delimiter=delimiter,\n converters=converters, skiprows=skiprows, usecols=usecols,\n unpack=unpack, ndmin=ndmin, encoding=encoding,\n max_rows=max_rows\n )\n\n if isinstance(delimiter, bytes):\n delimiter.decode("latin1")\n\n if dtype is None:\n dtype = np.float64\n\n comment = comments\n # Control character type conversions for Py3 convenience\n if comment is not None:\n if isinstance(comment, (str, bytes)):\n comment = [comment]\n comment = [\n x.decode('latin1') if isinstance(x, bytes) else x for x in comment]\n if isinstance(delimiter, bytes):\n delimiter = delimiter.decode('latin1')\n\n arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,\n converters=converters, skiplines=skiprows, usecols=usecols,\n unpack=unpack, ndmin=ndmin, encoding=encoding,\n max_rows=max_rows, quote=quotechar)\n\n return arr\n\n\n_loadtxt_with_like = array_function_dispatch()(loadtxt)\n\n\ndef _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,\n header=None, footer=None, comments=None,\n encoding=None):\n return (X,)\n\n\n@array_function_dispatch(_savetxt_dispatcher)\ndef savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',\n footer='', comments='# ', encoding=None):\n """\n Save an array to a text file.\n\n Parameters\n ----------\n fname : filename, file handle or pathlib.Path\n If the filename ends in ``.gz``, the file is automatically saved in\n compressed gzip format. `loadtxt` understands gzipped files\n transparently.\n X : 1D or 2D array_like\n Data to be saved to a text file.\n fmt : str or sequence of strs, optional\n A single format (%10.5f), a sequence of formats, or a\n multi-format string, e.g. 'Iteration %d -- %10.5f', in which\n case `delimiter` is ignored. For complex `X`, the legal options\n for `fmt` are:\n\n * a single specifier, ``fmt='%.4e'``, resulting in numbers formatted\n like ``' (%s+%sj)' % (fmt, fmt)``\n * a full string specifying every real and imaginary part, e.g.\n ``' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'`` for 3 columns\n * a list of specifiers, one per column - in this case, the real\n and imaginary part must have separate specifiers,\n e.g. ``['%.3e + %.3ej', '(%.15e%+.15ej)']`` for 2 columns\n delimiter : str, optional\n String or character separating columns.\n newline : str, optional\n String or character separating lines.\n header : str, optional\n String that will be written at the beginning of the file.\n footer : str, optional\n String that will be written at the end of the file.\n comments : str, optional\n String that will be prepended to the ``header`` and ``footer`` strings,\n to mark them as comments. Default: '# ', as expected by e.g.\n ``numpy.loadtxt``.\n encoding : {None, str}, optional\n Encoding used to encode the outputfile. Does not apply to output\n streams. If the encoding is something other than 'bytes' or 'latin1'\n you will not be able to load the file in NumPy versions < 1.14. Default\n is 'latin1'.\n\n See Also\n --------\n save : Save an array to a binary file in NumPy ``.npy`` format\n savez : Save several arrays into an uncompressed ``.npz`` archive\n savez_compressed : Save several arrays into a compressed ``.npz`` archive\n\n Notes\n -----\n Further explanation of the `fmt` parameter\n (``%[flag]width[.precision]specifier``):\n\n flags:\n ``-`` : left justify\n\n ``+`` : Forces to precede result with + or -.\n\n ``0`` : Left pad the number with zeros instead of space (see width).\n\n width:\n Minimum number of characters to be printed. The value is not truncated\n if it has more characters.\n\n precision:\n - For integer specifiers (eg. ``d,i,o,x``), the minimum number of\n digits.\n - For ``e, E`` and ``f`` specifiers, the number of digits to print\n after the decimal point.\n - For ``g`` and ``G``, the maximum number of significant digits.\n - For ``s``, the maximum number of characters.\n\n specifiers:\n ``c`` : character\n\n ``d`` or ``i`` : signed decimal integer\n\n ``e`` or ``E`` : scientific notation with ``e`` or ``E``.\n\n ``f`` : decimal floating point\n\n ``g,G`` : use the shorter of ``e,E`` or ``f``\n\n ``o`` : signed octal\n\n ``s`` : string of characters\n\n ``u`` : unsigned decimal integer\n\n ``x,X`` : unsigned hexadecimal integer\n\n This explanation of ``fmt`` is not complete, for an exhaustive\n specification see [1]_.\n\n References\n ----------\n .. [1] `Format Specification Mini-Language\n <https://docs.python.org/library/string.html#format-specification-mini-language>`_,\n Python Documentation.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = y = z = np.arange(0.0,5.0,1.0)\n >>> np.savetxt('test.out', x, delimiter=',') # X is an array\n >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays\n >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation\n\n """\n\n class WriteWrap:\n """Convert to bytes on bytestream inputs.\n\n """\n def __init__(self, fh, encoding):\n self.fh = fh\n self.encoding = encoding\n self.do_write = self.first_write\n\n def close(self):\n self.fh.close()\n\n def write(self, v):\n self.do_write(v)\n\n def write_bytes(self, v):\n if isinstance(v, bytes):\n self.fh.write(v)\n else:\n self.fh.write(v.encode(self.encoding))\n\n def write_normal(self, v):\n self.fh.write(asunicode(v))\n\n def first_write(self, v):\n try:\n self.write_normal(v)\n self.write = self.write_normal\n except TypeError:\n # input is probably a bytestream\n self.write_bytes(v)\n self.write = self.write_bytes\n\n own_fh = False\n if isinstance(fname, os.PathLike):\n fname = os.fspath(fname)\n if _is_string_like(fname):\n # datasource doesn't support creating a new file ...\n open(fname, 'wt').close()\n fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)\n own_fh = True\n elif hasattr(fname, 'write'):\n # wrap to handle byte output streams\n fh = WriteWrap(fname, encoding or 'latin1')\n else:\n raise ValueError('fname must be a string or file handle')\n\n try:\n X = np.asarray(X)\n\n # Handle 1-dimensional arrays\n if X.ndim == 0 or X.ndim > 2:\n raise ValueError(\n "Expected 1D or 2D array, got %dD array instead" % X.ndim)\n elif X.ndim == 1:\n # Common case -- 1d array of numbers\n if X.dtype.names is None:\n X = np.atleast_2d(X).T\n ncol = 1\n\n # Complex dtype -- each field indicates a separate column\n else:\n ncol = len(X.dtype.names)\n else:\n ncol = X.shape[1]\n\n iscomplex_X = np.iscomplexobj(X)\n # `fmt` can be a string with multiple insertion points or a\n # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')\n if type(fmt) in (list, tuple):\n if len(fmt) != ncol:\n raise AttributeError(f'fmt has wrong shape. {str(fmt)}')\n format = delimiter.join(fmt)\n elif isinstance(fmt, str):\n n_fmt_chars = fmt.count('%')\n error = ValueError(f'fmt has wrong number of % formats: {fmt}')\n if n_fmt_chars == 1:\n if iscomplex_X:\n fmt = [f' ({fmt}+{fmt}j)', ] * ncol\n else:\n fmt = [fmt, ] * ncol\n format = delimiter.join(fmt)\n elif iscomplex_X and n_fmt_chars != (2 * ncol):\n raise error\n elif ((not iscomplex_X) and n_fmt_chars != ncol):\n raise error\n else:\n format = fmt\n else:\n raise ValueError(f'invalid fmt: {fmt!r}')\n\n if len(header) > 0:\n header = header.replace('\n', '\n' + comments)\n fh.write(comments + header + newline)\n if iscomplex_X:\n for row in X:\n row2 = []\n for number in row:\n row2.extend((number.real, number.imag))\n s = format % tuple(row2) + newline\n fh.write(s.replace('+-', '-'))\n else:\n for row in X:\n try:\n v = format % tuple(row) + newline\n except TypeError as e:\n raise TypeError("Mismatch between array dtype ('%s') and "\n "format specifier ('%s')"\n % (str(X.dtype), format)) from e\n fh.write(v)\n\n if len(footer) > 0:\n footer = footer.replace('\n', '\n' + comments)\n fh.write(comments + footer + newline)\n finally:\n if own_fh:\n fh.close()\n\n\n@set_module('numpy')\ndef fromregex(file, regexp, dtype, encoding=None):\n r"""\n Construct an array from a text file, using regular expression parsing.\n\n The returned array is always a structured array, and is constructed from\n all matches of the regular expression in the file. Groups in the regular\n expression are converted to fields of the structured array.\n\n Parameters\n ----------\n file : file, str, or pathlib.Path\n Filename or file object to read.\n\n .. versionchanged:: 1.22.0\n Now accepts `os.PathLike` implementations.\n\n regexp : str or regexp\n Regular expression used to parse the file.\n Groups in the regular expression correspond to fields in the dtype.\n dtype : dtype or list of dtypes\n Dtype for the structured array; must be a structured datatype.\n encoding : str, optional\n Encoding used to decode the inputfile. Does not apply to input streams.\n\n Returns\n -------\n output : ndarray\n The output array, containing the part of the content of `file` that\n was matched by `regexp`. `output` is always a structured array.\n\n Raises\n ------\n TypeError\n When `dtype` is not a valid dtype for a structured array.\n\n See Also\n --------\n fromstring, loadtxt\n\n Notes\n -----\n Dtypes for structured arrays can be specified in several forms, but all\n forms specify at least the data type and field name. For details see\n `basics.rec`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from io import StringIO\n >>> text = StringIO("1312 foo\n1534 bar\n444 qux")\n\n >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]\n >>> output = np.fromregex(text, regexp,\n ... [('num', np.int64), ('key', 'S3')])\n >>> output\n array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],\n dtype=[('num', '<i8'), ('key', 'S3')])\n >>> output['num']\n array([1312, 1534, 444])\n\n """\n own_fh = False\n if not hasattr(file, "read"):\n file = os.fspath(file)\n file = np.lib._datasource.open(file, 'rt', encoding=encoding)\n own_fh = True\n\n try:\n if not isinstance(dtype, np.dtype):\n dtype = np.dtype(dtype)\n if dtype.names is None:\n raise TypeError('dtype must be a structured datatype.')\n\n content = file.read()\n if isinstance(content, bytes) and isinstance(regexp, str):\n regexp = asbytes(regexp)\n\n if not hasattr(regexp, 'match'):\n regexp = re.compile(regexp)\n seq = regexp.findall(content)\n if seq and not isinstance(seq[0], tuple):\n # Only one group is in the regexp.\n # Create the new array as a single data-type and then\n # re-interpret as a single-field structured array.\n newdtype = np.dtype(dtype[dtype.names[0]])\n output = np.array(seq, dtype=newdtype)\n output.dtype = dtype\n else:\n output = np.array(seq, dtype=dtype)\n\n return output\n finally:\n if own_fh:\n file.close()\n\n\n#####--------------------------------------------------------------------------\n#---- --- ASCII functions ---\n#####--------------------------------------------------------------------------\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef genfromtxt(fname, dtype=float, comments='#', delimiter=None,\n skip_header=0, skip_footer=0, converters=None,\n missing_values=None, filling_values=None, usecols=None,\n names=None, excludelist=None,\n deletechars=''.join(sorted(NameValidator.defaultdeletechars)), # noqa: B008\n replace_space='_', autostrip=False, case_sensitive=True,\n defaultfmt="f%i", unpack=None, usemask=False, loose=True,\n invalid_raise=True, max_rows=None, encoding=None,\n *, ndmin=0, like=None):\n """\n Load data from a text file, with missing values handled as specified.\n\n Each line past the first `skip_header` lines is split at the `delimiter`\n character, and characters following the `comments` character are discarded.\n\n Parameters\n ----------\n fname : file, str, pathlib.Path, list of str, generator\n File, filename, list, or generator to read. If the filename\n extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note\n that generators must return bytes or strings. The strings\n in a list or produced by a generator are treated as lines.\n dtype : dtype, optional\n Data type of the resulting array.\n If None, the dtypes will be determined by the contents of each\n column, individually.\n comments : str, optional\n The character used to indicate the start of a comment.\n All the characters occurring on a line after a comment are discarded.\n delimiter : str, int, or sequence, optional\n The string used to separate values. By default, any consecutive\n whitespaces act as delimiter. An integer or sequence of integers\n can also be provided as width(s) of each field.\n skiprows : int, optional\n `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.\n skip_header : int, optional\n The number of lines to skip at the beginning of the file.\n skip_footer : int, optional\n The number of lines to skip at the end of the file.\n converters : variable, optional\n The set of functions that convert the data of a column to a value.\n The converters can also be used to provide a default value\n for missing data: ``converters = {3: lambda s: float(s or 0)}``.\n missing : variable, optional\n `missing` was removed in numpy 1.10. Please use `missing_values`\n instead.\n missing_values : variable, optional\n The set of strings corresponding to missing data.\n filling_values : variable, optional\n The set of values to be used as default when the data are missing.\n usecols : sequence, optional\n Which columns to read, with 0 being the first. For example,\n ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.\n names : {None, True, str, sequence}, optional\n If `names` is True, the field names are read from the first line after\n the first `skip_header` lines. This line can optionally be preceded\n by a comment delimiter. Any content before the comment delimiter is\n discarded. If `names` is a sequence or a single-string of\n comma-separated names, the names will be used to define the field\n names in a structured dtype. If `names` is None, the names of the\n dtype fields will be used, if any.\n excludelist : sequence, optional\n A list of names to exclude. This list is appended to the default list\n ['return','file','print']. Excluded names are appended with an\n underscore: for example, `file` would become `file_`.\n deletechars : str, optional\n A string combining invalid characters that must be deleted from the\n names.\n defaultfmt : str, optional\n A format used to define default field names, such as "f%i" or "f_%02i".\n autostrip : bool, optional\n Whether to automatically strip white spaces from the variables.\n replace_space : char, optional\n Character(s) used in replacement of white spaces in the variable\n names. By default, use a '_'.\n case_sensitive : {True, False, 'upper', 'lower'}, optional\n If True, field names are case sensitive.\n If False or 'upper', field names are converted to upper case.\n If 'lower', field names are converted to lower case.\n unpack : bool, optional\n If True, the returned array is transposed, so that arguments may be\n unpacked using ``x, y, z = genfromtxt(...)``. When used with a\n structured data-type, arrays are returned for each field.\n Default is False.\n usemask : bool, optional\n If True, return a masked array.\n If False, return a regular array.\n loose : bool, optional\n If True, do not raise errors for invalid values.\n invalid_raise : bool, optional\n If True, an exception is raised if an inconsistency is detected in the\n number of columns.\n If False, a warning is emitted and the offending lines are skipped.\n max_rows : int, optional\n The maximum number of rows to read. Must not be used with skip_footer\n at the same time. If given, the value must be at least 1. Default is\n to read the entire file.\n encoding : str, optional\n Encoding used to decode the inputfile. Does not apply when `fname`\n is a file object. The special value 'bytes' enables backward\n compatibility workarounds that ensure that you receive byte arrays\n when possible and passes latin1 encoded strings to converters.\n Override this value to receive unicode arrays and pass strings\n as input to converters. If set to None the system default is used.\n The default value is 'bytes'.\n\n .. versionchanged:: 2.0\n Before NumPy 2, the default was ``'bytes'`` for Python 2\n compatibility. The default is now ``None``.\n\n ndmin : int, optional\n Same parameter as `loadtxt`\n\n .. versionadded:: 1.23.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n Data read from the text file. If `usemask` is True, this is a\n masked array.\n\n See Also\n --------\n numpy.loadtxt : equivalent function when no data is missing.\n\n Notes\n -----\n * When spaces are used as delimiters, or when no delimiter has been given\n as input, there should not be any missing data between two fields.\n * When variables are named (either by a flexible dtype or with a `names`\n sequence), there must not be any header in the file (else a ValueError\n exception is raised).\n * Individual values are not stripped of spaces by default.\n When using a custom converter, make sure the function does remove spaces.\n * Custom converters may receive unexpected values due to dtype\n discovery.\n\n References\n ----------\n .. [1] NumPy User Guide, section `I/O with NumPy\n <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.\n\n Examples\n --------\n >>> from io import StringIO\n >>> import numpy as np\n\n Comma delimited file with mixed dtype\n\n >>> s = StringIO("1,1.3,abcde")\n >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),\n ... ('mystring','S5')], delimiter=",")\n >>> data\n array((1, 1.3, b'abcde'),\n dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])\n\n Using dtype = None\n\n >>> _ = s.seek(0) # needed for StringIO example only\n >>> data = np.genfromtxt(s, dtype=None,\n ... names = ['myint','myfloat','mystring'], delimiter=",")\n >>> data\n array((1, 1.3, 'abcde'),\n dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '<U5')])\n\n Specifying dtype and names\n\n >>> _ = s.seek(0)\n >>> data = np.genfromtxt(s, dtype="i8,f8,S5",\n ... names=['myint','myfloat','mystring'], delimiter=",")\n >>> data\n array((1, 1.3, b'abcde'),\n dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])\n\n An example with fixed-width columns\n\n >>> s = StringIO("11.3abcde")\n >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],\n ... delimiter=[1,3,5])\n >>> data\n array((1, 1.3, 'abcde'),\n dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '<U5')])\n\n An example to show comments\n\n >>> f = StringIO('''\n ... text,# of chars\n ... hello world,11\n ... numpy,5''')\n >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')\n array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],\n dtype=[('f0', 'S12'), ('f1', 'S12')])\n\n """\n\n if like is not None:\n return _genfromtxt_with_like(\n like, fname, dtype=dtype, comments=comments, delimiter=delimiter,\n skip_header=skip_header, skip_footer=skip_footer,\n converters=converters, missing_values=missing_values,\n filling_values=filling_values, usecols=usecols, names=names,\n excludelist=excludelist, deletechars=deletechars,\n replace_space=replace_space, autostrip=autostrip,\n case_sensitive=case_sensitive, defaultfmt=defaultfmt,\n unpack=unpack, usemask=usemask, loose=loose,\n invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,\n ndmin=ndmin,\n )\n\n _ensure_ndmin_ndarray_check_param(ndmin)\n\n if max_rows is not None:\n if skip_footer:\n raise ValueError(\n "The keywords 'skip_footer' and 'max_rows' can not be "\n "specified at the same time.")\n if max_rows < 1:\n raise ValueError("'max_rows' must be at least 1.")\n\n if usemask:\n from numpy.ma import MaskedArray, make_mask_descr\n # Check the input dictionary of converters\n user_converters = converters or {}\n if not isinstance(user_converters, dict):\n raise TypeError(\n "The input argument 'converter' should be a valid dictionary "\n "(got '%s' instead)" % type(user_converters))\n\n if encoding == 'bytes':\n encoding = None\n byte_converters = True\n else:\n byte_converters = False\n\n # Initialize the filehandle, the LineSplitter and the NameValidator\n if isinstance(fname, os.PathLike):\n fname = os.fspath(fname)\n if isinstance(fname, str):\n fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)\n fid_ctx = contextlib.closing(fid)\n else:\n fid = fname\n fid_ctx = contextlib.nullcontext(fid)\n try:\n fhd = iter(fid)\n except TypeError as e:\n raise TypeError(\n "fname must be a string, a filehandle, a sequence of strings,\n"\n f"or an iterator of strings. Got {type(fname)} instead."\n ) from e\n with fid_ctx:\n split_line = LineSplitter(delimiter=delimiter, comments=comments,\n autostrip=autostrip, encoding=encoding)\n validate_names = NameValidator(excludelist=excludelist,\n deletechars=deletechars,\n case_sensitive=case_sensitive,\n replace_space=replace_space)\n\n # Skip the first `skip_header` rows\n try:\n for i in range(skip_header):\n next(fhd)\n\n # Keep on until we find the first valid values\n first_values = None\n\n while not first_values:\n first_line = _decode_line(next(fhd), encoding)\n if (names is True) and (comments is not None):\n if comments in first_line:\n first_line = (\n ''.join(first_line.split(comments)[1:]))\n first_values = split_line(first_line)\n except StopIteration:\n # return an empty array if the datafile is empty\n first_line = ''\n first_values = []\n warnings.warn(\n f'genfromtxt: Empty input file: "{fname}"', stacklevel=2\n )\n\n # Should we take the first values as names ?\n if names is True:\n fval = first_values[0].strip()\n if comments is not None:\n if fval in comments:\n del first_values[0]\n\n # Check the columns to use: make sure `usecols` is a list\n if usecols is not None:\n try:\n usecols = [_.strip() for _ in usecols.split(",")]\n except AttributeError:\n try:\n usecols = list(usecols)\n except TypeError:\n usecols = [usecols, ]\n nbcols = len(usecols or first_values)\n\n # Check the names and overwrite the dtype.names if needed\n if names is True:\n names = validate_names([str(_.strip()) for _ in first_values])\n first_line = ''\n elif _is_string_like(names):\n names = validate_names([_.strip() for _ in names.split(',')])\n elif names:\n names = validate_names(names)\n # Get the dtype\n if dtype is not None:\n dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,\n excludelist=excludelist,\n deletechars=deletechars,\n case_sensitive=case_sensitive,\n replace_space=replace_space)\n # Make sure the names is a list (for 2.5)\n if names is not None:\n names = list(names)\n\n if usecols:\n for (i, current) in enumerate(usecols):\n # if usecols is a list of names, convert to a list of indices\n if _is_string_like(current):\n usecols[i] = names.index(current)\n elif current < 0:\n usecols[i] = current + len(first_values)\n # If the dtype is not None, make sure we update it\n if (dtype is not None) and (len(dtype) > nbcols):\n descr = dtype.descr\n dtype = np.dtype([descr[_] for _ in usecols])\n names = list(dtype.names)\n # If `names` is not None, update the names\n elif (names is not None) and (len(names) > nbcols):\n names = [names[_] for _ in usecols]\n elif (names is not None) and (dtype is not None):\n names = list(dtype.names)\n\n # Process the missing values ...............................\n # Rename missing_values for convenience\n user_missing_values = missing_values or ()\n if isinstance(user_missing_values, bytes):\n user_missing_values = user_missing_values.decode('latin1')\n\n # Define the list of missing_values (one column: one list)\n missing_values = [[''] for _ in range(nbcols)]\n\n # We have a dictionary: process it field by field\n if isinstance(user_missing_values, dict):\n # Loop on the items\n for (key, val) in user_missing_values.items():\n # Is the key a string ?\n if _is_string_like(key):\n try:\n # Transform it into an integer\n key = names.index(key)\n except ValueError:\n # We couldn't find it: the name must have been dropped\n continue\n # Redefine the key as needed if it's a column number\n if usecols:\n try:\n key = usecols.index(key)\n except ValueError:\n pass\n # Transform the value as a list of string\n if isinstance(val, (list, tuple)):\n val = [str(_) for _ in val]\n else:\n val = [str(val), ]\n # Add the value(s) to the current list of missing\n if key is None:\n # None acts as default\n for miss in missing_values:\n miss.extend(val)\n else:\n missing_values[key].extend(val)\n # We have a sequence : each item matches a column\n elif isinstance(user_missing_values, (list, tuple)):\n for (value, entry) in zip(user_missing_values, missing_values):\n value = str(value)\n if value not in entry:\n entry.append(value)\n # We have a string : apply it to all entries\n elif isinstance(user_missing_values, str):\n user_value = user_missing_values.split(",")\n for entry in missing_values:\n entry.extend(user_value)\n # We have something else: apply it to all entries\n else:\n for entry in missing_values:\n entry.extend([str(user_missing_values)])\n\n # Process the filling_values ...............................\n # Rename the input for convenience\n user_filling_values = filling_values\n if user_filling_values is None:\n user_filling_values = []\n # Define the default\n filling_values = [None] * nbcols\n # We have a dictionary : update each entry individually\n if isinstance(user_filling_values, dict):\n for (key, val) in user_filling_values.items():\n if _is_string_like(key):\n try:\n # Transform it into an integer\n key = names.index(key)\n except ValueError:\n # We couldn't find it: the name must have been dropped\n continue\n # Redefine the key if it's a column number\n # and usecols is defined\n if usecols:\n try:\n key = usecols.index(key)\n except ValueError:\n pass\n # Add the value to the list\n filling_values[key] = val\n # We have a sequence : update on a one-to-one basis\n elif isinstance(user_filling_values, (list, tuple)):\n n = len(user_filling_values)\n if (n <= nbcols):\n filling_values[:n] = user_filling_values\n else:\n filling_values = user_filling_values[:nbcols]\n # We have something else : use it for all entries\n else:\n filling_values = [user_filling_values] * nbcols\n\n # Initialize the converters ................................\n if dtype is None:\n # Note: we can't use a [...]*nbcols, as we would have 3 times\n # the same converter, instead of 3 different converters.\n converters = [\n StringConverter(None, missing_values=miss, default=fill)\n for (miss, fill) in zip(missing_values, filling_values)\n ]\n else:\n dtype_flat = flatten_dtype(dtype, flatten_base=True)\n # Initialize the converters\n if len(dtype_flat) > 1:\n # Flexible type : get a converter from each dtype\n zipit = zip(dtype_flat, missing_values, filling_values)\n converters = [StringConverter(dt,\n locked=True,\n missing_values=miss,\n default=fill)\n for (dt, miss, fill) in zipit]\n else:\n # Set to a default converter (but w/ different missing values)\n zipit = zip(missing_values, filling_values)\n converters = [StringConverter(dtype,\n locked=True,\n missing_values=miss,\n default=fill)\n for (miss, fill) in zipit]\n # Update the converters to use the user-defined ones\n uc_update = []\n for (j, conv) in user_converters.items():\n # If the converter is specified by column names,\n # use the index instead\n if _is_string_like(j):\n try:\n j = names.index(j)\n i = j\n except ValueError:\n continue\n elif usecols:\n try:\n i = usecols.index(j)\n except ValueError:\n # Unused converter specified\n continue\n else:\n i = j\n # Find the value to test - first_line is not filtered by usecols:\n if len(first_line):\n testing_value = first_values[j]\n else:\n testing_value = None\n if conv is bytes:\n user_conv = asbytes\n elif byte_converters:\n # Converters may use decode to workaround numpy's old\n # behavior, so encode the string again before passing\n # to the user converter.\n def tobytes_first(x, conv):\n if type(x) is bytes:\n return conv(x)\n return conv(x.encode("latin1"))\n user_conv = functools.partial(tobytes_first, conv=conv)\n else:\n user_conv = conv\n converters[i].update(user_conv, locked=True,\n testing_value=testing_value,\n default=filling_values[i],\n missing_values=missing_values[i],)\n uc_update.append((i, user_conv))\n # Make sure we have the corrected keys in user_converters...\n user_converters.update(uc_update)\n\n # Fixme: possible error as following variable never used.\n # miss_chars = [_.missing_values for _ in converters]\n\n # Initialize the output lists ...\n # ... rows\n rows = []\n append_to_rows = rows.append\n # ... masks\n if usemask:\n masks = []\n append_to_masks = masks.append\n # ... invalid\n invalid = []\n append_to_invalid = invalid.append\n\n # Parse each line\n for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):\n values = split_line(line)\n nbvalues = len(values)\n # Skip an empty line\n if nbvalues == 0:\n continue\n if usecols:\n # Select only the columns we need\n try:\n values = [values[_] for _ in usecols]\n except IndexError:\n append_to_invalid((i + skip_header + 1, nbvalues))\n continue\n elif nbvalues != nbcols:\n append_to_invalid((i + skip_header + 1, nbvalues))\n continue\n # Store the values\n append_to_rows(tuple(values))\n if usemask:\n append_to_masks(tuple(v.strip() in m\n for (v, m) in zip(values,\n missing_values)))\n if len(rows) == max_rows:\n break\n\n # Upgrade the converters (if needed)\n if dtype is None:\n for (i, converter) in enumerate(converters):\n current_column = [itemgetter(i)(_m) for _m in rows]\n try:\n converter.iterupgrade(current_column)\n except ConverterLockError:\n errmsg = f"Converter #{i} is locked and cannot be upgraded: "\n current_column = map(itemgetter(i), rows)\n for (j, value) in enumerate(current_column):\n try:\n converter.upgrade(value)\n except (ConverterError, ValueError):\n line_number = j + 1 + skip_header\n errmsg += f"(occurred line #{line_number} for value '{value}')"\n raise ConverterError(errmsg)\n\n # Check that we don't have invalid values\n nbinvalid = len(invalid)\n if nbinvalid > 0:\n nbrows = len(rows) + nbinvalid - skip_footer\n # Construct the error message\n template = f" Line #%i (got %i columns instead of {nbcols})"\n if skip_footer > 0:\n nbinvalid_skipped = len([_ for _ in invalid\n if _[0] > nbrows + skip_header])\n invalid = invalid[:nbinvalid - nbinvalid_skipped]\n skip_footer -= nbinvalid_skipped\n#\n# nbrows -= skip_footer\n# errmsg = [template % (i, nb)\n# for (i, nb) in invalid if i < nbrows]\n# else:\n errmsg = [template % (i, nb)\n for (i, nb) in invalid]\n if len(errmsg):\n errmsg.insert(0, "Some errors were detected !")\n errmsg = "\n".join(errmsg)\n # Raise an exception ?\n if invalid_raise:\n raise ValueError(errmsg)\n # Issue a warning ?\n else:\n warnings.warn(errmsg, ConversionWarning, stacklevel=2)\n\n # Strip the last skip_footer data\n if skip_footer > 0:\n rows = rows[:-skip_footer]\n if usemask:\n masks = masks[:-skip_footer]\n\n # Convert each value according to the converter:\n # We want to modify the list in place to avoid creating a new one...\n if loose:\n rows = list(\n zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]\n for (i, conv) in enumerate(converters)]))\n else:\n rows = list(\n zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]\n for (i, conv) in enumerate(converters)]))\n\n # Reset the dtype\n data = rows\n if dtype is None:\n # Get the dtypes from the types of the converters\n column_types = [conv.type for conv in converters]\n # Find the columns with strings...\n strcolidx = [i for (i, v) in enumerate(column_types)\n if v == np.str_]\n\n if byte_converters and strcolidx:\n # convert strings back to bytes for backward compatibility\n warnings.warn(\n "Reading unicode strings without specifying the encoding "\n "argument is deprecated. Set the encoding, use None for the "\n "system default.",\n np.exceptions.VisibleDeprecationWarning, stacklevel=2)\n\n def encode_unicode_cols(row_tup):\n row = list(row_tup)\n for i in strcolidx:\n row[i] = row[i].encode('latin1')\n return tuple(row)\n\n try:\n data = [encode_unicode_cols(r) for r in data]\n except UnicodeEncodeError:\n pass\n else:\n for i in strcolidx:\n column_types[i] = np.bytes_\n\n # Update string types to be the right length\n sized_column_types = column_types.copy()\n for i, col_type in enumerate(column_types):\n if np.issubdtype(col_type, np.character):\n n_chars = max(len(row[i]) for row in data)\n sized_column_types[i] = (col_type, n_chars)\n\n if names is None:\n # If the dtype is uniform (before sizing strings)\n base = {\n c_type\n for c, c_type in zip(converters, column_types)\n if c._checked}\n if len(base) == 1:\n uniform_type, = base\n (ddtype, mdtype) = (uniform_type, bool)\n else:\n ddtype = [(defaultfmt % i, dt)\n for (i, dt) in enumerate(sized_column_types)]\n if usemask:\n mdtype = [(defaultfmt % i, bool)\n for (i, dt) in enumerate(sized_column_types)]\n else:\n ddtype = list(zip(names, sized_column_types))\n mdtype = list(zip(names, [bool] * len(sized_column_types)))\n output = np.array(data, dtype=ddtype)\n if usemask:\n outputmask = np.array(masks, dtype=mdtype)\n else:\n # Overwrite the initial dtype names if needed\n if names and dtype.names is not None:\n dtype.names = names\n # Case 1. We have a structured type\n if len(dtype_flat) > 1:\n # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]\n # First, create the array using a flattened dtype:\n # [('a', int), ('b1', int), ('b2', float)]\n # Then, view the array using the specified dtype.\n if 'O' in (_.char for _ in dtype_flat):\n if has_nested_fields(dtype):\n raise NotImplementedError(\n "Nested fields involving objects are not supported...")\n else:\n output = np.array(data, dtype=dtype)\n else:\n rows = np.array(data, dtype=[('', _) for _ in dtype_flat])\n output = rows.view(dtype)\n # Now, process the rowmasks the same way\n if usemask:\n rowmasks = np.array(\n masks, dtype=np.dtype([('', bool) for t in dtype_flat]))\n # Construct the new dtype\n mdtype = make_mask_descr(dtype)\n outputmask = rowmasks.view(mdtype)\n # Case #2. We have a basic dtype\n else:\n # We used some user-defined converters\n if user_converters:\n ishomogeneous = True\n descr = []\n for i, ttype in enumerate([conv.type for conv in converters]):\n # Keep the dtype of the current converter\n if i in user_converters:\n ishomogeneous &= (ttype == dtype.type)\n if np.issubdtype(ttype, np.character):\n ttype = (ttype, max(len(row[i]) for row in data))\n descr.append(('', ttype))\n else:\n descr.append(('', dtype))\n # So we changed the dtype ?\n if not ishomogeneous:\n # We have more than one field\n if len(descr) > 1:\n dtype = np.dtype(descr)\n # We have only one field: drop the name if not needed.\n else:\n dtype = np.dtype(ttype)\n #\n output = np.array(data, dtype)\n if usemask:\n if dtype.names is not None:\n mdtype = [(_, bool) for _ in dtype.names]\n else:\n mdtype = bool\n outputmask = np.array(masks, dtype=mdtype)\n # Try to take care of the missing data we missed\n names = output.dtype.names\n if usemask and names:\n for (name, conv) in zip(names, converters):\n missing_values = [conv(_) for _ in conv.missing_values\n if _ != '']\n for mval in missing_values:\n outputmask[name] |= (output[name] == mval)\n # Construct the final array\n if usemask:\n output = output.view(MaskedArray)\n output._mask = outputmask\n\n output = _ensure_ndmin_ndarray(output, ndmin=ndmin)\n\n if unpack:\n if names is None:\n return output.T\n elif len(names) == 1:\n # squeeze single-name dtypes too\n return output[names[0]]\n else:\n # For structured arrays with multiple fields,\n # return an array for each field.\n return [output[field] for field in names]\n return output\n\n\n_genfromtxt_with_like = array_function_dispatch()(genfromtxt)\n\n\ndef recfromtxt(fname, **kwargs):\n """\n Load ASCII data from a file and return it in a record array.\n\n If ``usemask=False`` a standard `recarray` is returned,\n if ``usemask=True`` a MaskedRecords array is returned.\n\n .. deprecated:: 2.0\n Use `numpy.genfromtxt` instead.\n\n Parameters\n ----------\n fname, kwargs : For a description of input parameters, see `genfromtxt`.\n\n See Also\n --------\n numpy.genfromtxt : generic function\n\n Notes\n -----\n By default, `dtype` is None, which means that the data-type of the output\n array will be determined from the data.\n\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`recfromtxt` is deprecated, "\n "use `numpy.genfromtxt` instead."\n "(deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n kwargs.setdefault("dtype", None)\n usemask = kwargs.get('usemask', False)\n output = genfromtxt(fname, **kwargs)\n if usemask:\n from numpy.ma.mrecords import MaskedRecords\n output = output.view(MaskedRecords)\n else:\n output = output.view(np.recarray)\n return output\n\n\ndef recfromcsv(fname, **kwargs):\n """\n Load ASCII data stored in a comma-separated file.\n\n The returned array is a record array (if ``usemask=False``, see\n `recarray`) or a masked record array (if ``usemask=True``,\n see `ma.mrecords.MaskedRecords`).\n\n .. deprecated:: 2.0\n Use `numpy.genfromtxt` with comma as `delimiter` instead.\n\n Parameters\n ----------\n fname, kwargs : For a description of input parameters, see `genfromtxt`.\n\n See Also\n --------\n numpy.genfromtxt : generic function to load ASCII data.\n\n Notes\n -----\n By default, `dtype` is None, which means that the data-type of the output\n array will be determined from the data.\n\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`recfromcsv` is deprecated, "\n "use `numpy.genfromtxt` with comma as `delimiter` instead. "\n "(deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n # Set default kwargs for genfromtxt as relevant to csv import.\n kwargs.setdefault("case_sensitive", "lower")\n kwargs.setdefault("names", True)\n kwargs.setdefault("delimiter", ",")\n kwargs.setdefault("dtype", None)\n output = genfromtxt(fname, **kwargs)\n\n usemask = kwargs.get("usemask", False)\n if usemask:\n from numpy.ma.mrecords import MaskedRecords\n output = output.view(MaskedRecords)\n else:\n output = output.view(np.recarray)\n return output\n | .venv\Lib\site-packages\numpy\lib\_npyio_impl.py | _npyio_impl.py | Python | 101,876 | 0.75 | 0.164869 | 0.093054 | node-utils | 466 | 2023-09-11T15:45:59.806986 | MIT | false | 2147d506f7511927458dff4ab97e6c94 |
import types\nimport zipfile\nfrom collections.abc import Callable, Collection, Iterable, Iterator, Mapping, Sequence\nfrom re import Pattern\nfrom typing import (\n IO,\n Any,\n ClassVar,\n Generic,\n Protocol,\n Self,\n TypeAlias,\n overload,\n type_check_only,\n)\nfrom typing import Literal as L\n\nfrom _typeshed import (\n StrOrBytesPath,\n StrPath,\n SupportsKeysAndGetItem,\n SupportsRead,\n SupportsWrite,\n)\nfrom typing_extensions import TypeVar, deprecated, override\n\nimport numpy as np\nfrom numpy._core.multiarray import packbits, unpackbits\nfrom numpy._typing import ArrayLike, DTypeLike, NDArray, _DTypeLike, _SupportsArrayFunc\nfrom numpy.ma.mrecords import MaskedRecords\n\nfrom ._datasource import DataSource as DataSource\n\n__all__ = [\n "fromregex",\n "genfromtxt",\n "load",\n "loadtxt",\n "packbits",\n "save",\n "savetxt",\n "savez",\n "savez_compressed",\n "unpackbits",\n]\n\n_T_co = TypeVar("_T_co", covariant=True)\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, default=Any, covariant=True)\n\n_FName: TypeAlias = StrPath | Iterable[str] | Iterable[bytes]\n_FNameRead: TypeAlias = StrPath | SupportsRead[str] | SupportsRead[bytes]\n_FNameWriteBytes: TypeAlias = StrPath | SupportsWrite[bytes]\n_FNameWrite: TypeAlias = _FNameWriteBytes | SupportsWrite[str]\n\n@type_check_only\nclass _SupportsReadSeek(SupportsRead[_T_co], Protocol[_T_co]):\n def seek(self, offset: int, whence: int, /) -> object: ...\n\nclass BagObj(Generic[_T_co]):\n def __init__(self, /, obj: SupportsKeysAndGetItem[str, _T_co]) -> None: ...\n def __getattribute__(self, key: str, /) -> _T_co: ...\n def __dir__(self) -> list[str]: ...\n\nclass NpzFile(Mapping[str, NDArray[_ScalarT_co]]):\n _MAX_REPR_ARRAY_COUNT: ClassVar[int] = 5\n\n zip: zipfile.ZipFile\n fid: IO[str] | None\n files: list[str]\n allow_pickle: bool\n pickle_kwargs: Mapping[str, Any] | None\n f: BagObj[NpzFile[_ScalarT_co]]\n\n #\n def __init__(\n self,\n /,\n fid: IO[Any],\n own_fid: bool = False,\n allow_pickle: bool = False,\n pickle_kwargs: Mapping[str, object] | None = None,\n *,\n max_header_size: int = 10_000,\n ) -> None: ...\n def __del__(self) -> None: ...\n def __enter__(self) -> Self: ...\n def __exit__(self, cls: type[BaseException] | None, e: BaseException | None, tb: types.TracebackType | None, /) -> None: ...\n @override\n def __len__(self) -> int: ...\n @override\n def __iter__(self) -> Iterator[str]: ...\n @override\n def __getitem__(self, key: str, /) -> NDArray[_ScalarT_co]: ...\n def close(self) -> None: ...\n\n# NOTE: Returns a `NpzFile` if file is a zip file;\n# returns an `ndarray`/`memmap` otherwise\ndef load(\n file: StrOrBytesPath | _SupportsReadSeek[bytes],\n mmap_mode: L["r+", "r", "w+", "c"] | None = None,\n allow_pickle: bool = False,\n fix_imports: bool = True,\n encoding: L["ASCII", "latin1", "bytes"] = "ASCII",\n *,\n max_header_size: int = 10_000,\n) -> Any: ...\n\n@overload\ndef save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True) -> None: ...\n@overload\n@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")\ndef save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool, fix_imports: bool) -> None: ...\n@overload\n@deprecated("The 'fix_imports' flag is deprecated in NumPy 2.1.")\ndef save(file: _FNameWriteBytes, arr: ArrayLike, allow_pickle: bool = True, *, fix_imports: bool) -> None: ...\n\n#\ndef savez(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...\n\n#\ndef savez_compressed(file: _FNameWriteBytes, *args: ArrayLike, allow_pickle: bool = True, **kwds: ArrayLike) -> None: ...\n\n# File-like objects only have to implement `__iter__` and,\n# optionally, `encoding`\n@overload\ndef loadtxt(\n fname: _FName,\n dtype: None = None,\n comments: str | Sequence[str] | None = "#",\n delimiter: str | None = None,\n converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,\n skiprows: int = 0,\n usecols: int | Sequence[int] | None = None,\n unpack: bool = False,\n ndmin: L[0, 1, 2] = 0,\n encoding: str | None = None,\n max_rows: int | None = None,\n *,\n quotechar: str | None = None,\n like: _SupportsArrayFunc | None = None,\n) -> NDArray[np.float64]: ...\n@overload\ndef loadtxt(\n fname: _FName,\n dtype: _DTypeLike[_ScalarT],\n comments: str | Sequence[str] | None = "#",\n delimiter: str | None = None,\n converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,\n skiprows: int = 0,\n usecols: int | Sequence[int] | None = None,\n unpack: bool = False,\n ndmin: L[0, 1, 2] = 0,\n encoding: str | None = None,\n max_rows: int | None = None,\n *,\n quotechar: str | None = None,\n like: _SupportsArrayFunc | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef loadtxt(\n fname: _FName,\n dtype: DTypeLike,\n comments: str | Sequence[str] | None = "#",\n delimiter: str | None = None,\n converters: Mapping[int | str, Callable[[str], Any]] | Callable[[str], Any] | None = None,\n skiprows: int = 0,\n usecols: int | Sequence[int] | None = None,\n unpack: bool = False,\n ndmin: L[0, 1, 2] = 0,\n encoding: str | None = None,\n max_rows: int | None = None,\n *,\n quotechar: str | None = None,\n like: _SupportsArrayFunc | None = None,\n) -> NDArray[Any]: ...\n\ndef savetxt(\n fname: _FNameWrite,\n X: ArrayLike,\n fmt: str | Sequence[str] = "%.18e",\n delimiter: str = " ",\n newline: str = "\n",\n header: str = "",\n footer: str = "",\n comments: str = "# ",\n encoding: str | None = None,\n) -> None: ...\n\n@overload\ndef fromregex(\n file: _FNameRead,\n regexp: str | bytes | Pattern[Any],\n dtype: _DTypeLike[_ScalarT],\n encoding: str | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef fromregex(\n file: _FNameRead,\n regexp: str | bytes | Pattern[Any],\n dtype: DTypeLike,\n encoding: str | None = None,\n) -> NDArray[Any]: ...\n\n@overload\ndef genfromtxt(\n fname: _FName,\n dtype: None = None,\n comments: str = ...,\n delimiter: str | int | Iterable[int] | None = ...,\n skip_header: int = ...,\n skip_footer: int = ...,\n converters: Mapping[int | str, Callable[[str], Any]] | None = ...,\n missing_values: Any = ...,\n filling_values: Any = ...,\n usecols: Sequence[int] | None = ...,\n names: L[True] | str | Collection[str] | None = ...,\n excludelist: Sequence[str] | None = ...,\n deletechars: str = ...,\n replace_space: str = ...,\n autostrip: bool = ...,\n case_sensitive: bool | L["upper", "lower"] = ...,\n defaultfmt: str = ...,\n unpack: bool | None = ...,\n usemask: bool = ...,\n loose: bool = ...,\n invalid_raise: bool = ...,\n max_rows: int | None = ...,\n encoding: str = ...,\n *,\n ndmin: L[0, 1, 2] = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef genfromtxt(\n fname: _FName,\n dtype: _DTypeLike[_ScalarT],\n comments: str = ...,\n delimiter: str | int | Iterable[int] | None = ...,\n skip_header: int = ...,\n skip_footer: int = ...,\n converters: Mapping[int | str, Callable[[str], Any]] | None = ...,\n missing_values: Any = ...,\n filling_values: Any = ...,\n usecols: Sequence[int] | None = ...,\n names: L[True] | str | Collection[str] | None = ...,\n excludelist: Sequence[str] | None = ...,\n deletechars: str = ...,\n replace_space: str = ...,\n autostrip: bool = ...,\n case_sensitive: bool | L["upper", "lower"] = ...,\n defaultfmt: str = ...,\n unpack: bool | None = ...,\n usemask: bool = ...,\n loose: bool = ...,\n invalid_raise: bool = ...,\n max_rows: int | None = ...,\n encoding: str = ...,\n *,\n ndmin: L[0, 1, 2] = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef genfromtxt(\n fname: _FName,\n dtype: DTypeLike,\n comments: str = ...,\n delimiter: str | int | Iterable[int] | None = ...,\n skip_header: int = ...,\n skip_footer: int = ...,\n converters: Mapping[int | str, Callable[[str], Any]] | None = ...,\n missing_values: Any = ...,\n filling_values: Any = ...,\n usecols: Sequence[int] | None = ...,\n names: L[True] | str | Collection[str] | None = ...,\n excludelist: Sequence[str] | None = ...,\n deletechars: str = ...,\n replace_space: str = ...,\n autostrip: bool = ...,\n case_sensitive: bool | L["upper", "lower"] = ...,\n defaultfmt: str = ...,\n unpack: bool | None = ...,\n usemask: bool = ...,\n loose: bool = ...,\n invalid_raise: bool = ...,\n max_rows: int | None = ...,\n encoding: str = ...,\n *,\n ndmin: L[0, 1, 2] = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef recfromtxt(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...\n@overload\ndef recfromtxt(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...\n\n@overload\ndef recfromcsv(fname: _FName, *, usemask: L[False] = False, **kwargs: object) -> np.recarray[Any, np.dtype[np.record]]: ...\n@overload\ndef recfromcsv(fname: _FName, *, usemask: L[True], **kwargs: object) -> MaskedRecords[Any, np.dtype[np.void]]: ...\n | .venv\Lib\site-packages\numpy\lib\_npyio_impl.pyi | _npyio_impl.pyi | Other | 9,689 | 0.95 | 0.116279 | 0.053571 | vue-tools | 911 | 2024-07-18T01:03:12.946651 | Apache-2.0 | false | 72895b101df0244d9a8e24eb681cd213 |
"""\nFunctions to operate on polynomials.\n\n"""\n__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',\n 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',\n 'polyfit']\n\nimport functools\nimport re\nimport warnings\n\nimport numpy._core.numeric as NX\nfrom numpy._core import (\n abs,\n array,\n atleast_1d,\n dot,\n finfo,\n hstack,\n isscalar,\n ones,\n overrides,\n)\nfrom numpy._utils import set_module\nfrom numpy.exceptions import RankWarning\nfrom numpy.lib._function_base_impl import trim_zeros\nfrom numpy.lib._twodim_base_impl import diag, vander\nfrom numpy.lib._type_check_impl import imag, iscomplex, mintypecode, real\nfrom numpy.linalg import eigvals, inv, lstsq\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _poly_dispatcher(seq_of_zeros):\n return seq_of_zeros\n\n\n@array_function_dispatch(_poly_dispatcher)\ndef poly(seq_of_zeros):\n """\n Find the coefficients of a polynomial with the given sequence of roots.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Returns the coefficients of the polynomial whose leading coefficient\n is one for the given sequence of zeros (multiple roots must be included\n in the sequence as many times as their multiplicity; see Examples).\n A square matrix (or array, which will be treated as a matrix) can also\n be given, in which case the coefficients of the characteristic polynomial\n of the matrix are returned.\n\n Parameters\n ----------\n seq_of_zeros : array_like, shape (N,) or (N, N)\n A sequence of polynomial roots, or a square array or matrix object.\n\n Returns\n -------\n c : ndarray\n 1D array of polynomial coefficients from highest to lowest degree:\n\n ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``\n where c[0] always equals 1.\n\n Raises\n ------\n ValueError\n If input is the wrong shape (the input must be a 1-D or square\n 2-D array).\n\n See Also\n --------\n polyval : Compute polynomial values.\n roots : Return the roots of a polynomial.\n polyfit : Least squares polynomial fit.\n poly1d : A one-dimensional polynomial class.\n\n Notes\n -----\n Specifying the roots of a polynomial still leaves one degree of\n freedom, typically represented by an undetermined leading\n coefficient. [1]_ In the case of this function, that coefficient -\n the first one in the returned array - is always taken as one. (If\n for some reason you have one other point, the only automatic way\n presently to leverage that information is to use ``polyfit``.)\n\n The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`\n matrix **A** is given by\n\n :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,\n\n where **I** is the `n`-by-`n` identity matrix. [2]_\n\n References\n ----------\n .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,\n Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.\n\n .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"\n Academic Press, pg. 182, 1980.\n\n Examples\n --------\n\n Given a sequence of a polynomial's zeros:\n\n >>> import numpy as np\n\n >>> np.poly((0, 0, 0)) # Multiple root example\n array([1., 0., 0., 0.])\n\n The line above represents z**3 + 0*z**2 + 0*z + 0.\n\n >>> np.poly((-1./2, 0, 1./2))\n array([ 1. , 0. , -0.25, 0. ])\n\n The line above represents z**3 - z/4\n\n >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))\n array([ 1. , -0.77086955, 0.08618131, 0. ]) # random\n\n Given a square array object:\n\n >>> P = np.array([[0, 1./3], [-1./2, 0]])\n >>> np.poly(P)\n array([1. , 0. , 0.16666667])\n\n Note how in all cases the leading coefficient is always 1.\n\n """\n seq_of_zeros = atleast_1d(seq_of_zeros)\n sh = seq_of_zeros.shape\n\n if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:\n seq_of_zeros = eigvals(seq_of_zeros)\n elif len(sh) == 1:\n dt = seq_of_zeros.dtype\n # Let object arrays slip through, e.g. for arbitrary precision\n if dt != object:\n seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))\n else:\n raise ValueError("input must be 1d or non-empty square 2d array.")\n\n if len(seq_of_zeros) == 0:\n return 1.0\n dt = seq_of_zeros.dtype\n a = ones((1,), dtype=dt)\n for zero in seq_of_zeros:\n a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')\n\n if issubclass(a.dtype.type, NX.complexfloating):\n # if complex roots are all complex conjugates, the roots are real.\n roots = NX.asarray(seq_of_zeros, complex)\n if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):\n a = a.real.copy()\n\n return a\n\n\ndef _roots_dispatcher(p):\n return p\n\n\n@array_function_dispatch(_roots_dispatcher)\ndef roots(p):\n """\n Return the roots of a polynomial with coefficients given in p.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n The values in the rank-1 array `p` are coefficients of a polynomial.\n If the length of `p` is n+1 then the polynomial is described by::\n\n p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]\n\n Parameters\n ----------\n p : array_like\n Rank-1 array of polynomial coefficients.\n\n Returns\n -------\n out : ndarray\n An array containing the roots of the polynomial.\n\n Raises\n ------\n ValueError\n When `p` cannot be converted to a rank-1 array.\n\n See also\n --------\n poly : Find the coefficients of a polynomial with a given sequence\n of roots.\n polyval : Compute polynomial values.\n polyfit : Least squares polynomial fit.\n poly1d : A one-dimensional polynomial class.\n\n Notes\n -----\n The algorithm relies on computing the eigenvalues of the\n companion matrix [1]_.\n\n References\n ----------\n .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:\n Cambridge University Press, 1999, pp. 146-7.\n\n Examples\n --------\n >>> import numpy as np\n >>> coeff = [3.2, 2, 1]\n >>> np.roots(coeff)\n array([-0.3125+0.46351241j, -0.3125-0.46351241j])\n\n """\n # If input is scalar, this makes it an array\n p = atleast_1d(p)\n if p.ndim != 1:\n raise ValueError("Input must be a rank-1 array.")\n\n # find non-zero array entries\n non_zero = NX.nonzero(NX.ravel(p))[0]\n\n # Return an empty array if polynomial is all zeros\n if len(non_zero) == 0:\n return NX.array([])\n\n # find the number of trailing zeros -- this is the number of roots at 0.\n trailing_zeros = len(p) - non_zero[-1] - 1\n\n # strip leading and trailing zeros\n p = p[int(non_zero[0]):int(non_zero[-1]) + 1]\n\n # casting: if incoming array isn't floating point, make it floating point.\n if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):\n p = p.astype(float)\n\n N = len(p)\n if N > 1:\n # build companion matrix and find its eigenvalues (the roots)\n A = diag(NX.ones((N - 2,), p.dtype), -1)\n A[0, :] = -p[1:] / p[0]\n roots = eigvals(A)\n else:\n roots = NX.array([])\n\n # tack any zeros onto the back of the array\n roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))\n return roots\n\n\ndef _polyint_dispatcher(p, m=None, k=None):\n return (p,)\n\n\n@array_function_dispatch(_polyint_dispatcher)\ndef polyint(p, m=1, k=None):\n """\n Return an antiderivative (indefinite integral) of a polynomial.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n The returned order `m` antiderivative `P` of polynomial `p` satisfies\n :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`\n integration constants `k`. The constants determine the low-order\n polynomial part\n\n .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}\n\n of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.\n\n Parameters\n ----------\n p : array_like or poly1d\n Polynomial to integrate.\n A sequence is interpreted as polynomial coefficients, see `poly1d`.\n m : int, optional\n Order of the antiderivative. (Default: 1)\n k : list of `m` scalars or scalar, optional\n Integration constants. They are given in the order of integration:\n those corresponding to highest-order terms come first.\n\n If ``None`` (default), all constants are assumed to be zero.\n If `m = 1`, a single scalar can be given instead of a list.\n\n See Also\n --------\n polyder : derivative of a polynomial\n poly1d.integ : equivalent method\n\n Examples\n --------\n\n The defining property of the antiderivative:\n\n >>> import numpy as np\n\n >>> p = np.poly1d([1,1,1])\n >>> P = np.polyint(p)\n >>> P\n poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary\n >>> np.polyder(P) == p\n True\n\n The integration constants default to zero, but can be specified:\n\n >>> P = np.polyint(p, 3)\n >>> P(0)\n 0.0\n >>> np.polyder(P)(0)\n 0.0\n >>> np.polyder(P, 2)(0)\n 0.0\n >>> P = np.polyint(p, 3, k=[6,5,3])\n >>> P\n poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary\n\n Note that 3 = 6 / 2!, and that the constants are given in the order of\n integrations. Constant of the highest-order polynomial term comes first:\n\n >>> np.polyder(P, 2)(0)\n 6.0\n >>> np.polyder(P, 1)(0)\n 5.0\n >>> P(0)\n 3.0\n\n """\n m = int(m)\n if m < 0:\n raise ValueError("Order of integral must be positive (see polyder)")\n if k is None:\n k = NX.zeros(m, float)\n k = atleast_1d(k)\n if len(k) == 1 and m > 1:\n k = k[0] * NX.ones(m, float)\n if len(k) < m:\n raise ValueError(\n "k must be a scalar or a rank-1 array of length 1 or >m.")\n\n truepoly = isinstance(p, poly1d)\n p = NX.asarray(p)\n if m == 0:\n if truepoly:\n return poly1d(p)\n return p\n else:\n # Note: this must work also with object and integer arrays\n y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))\n val = polyint(y, m - 1, k=k[1:])\n if truepoly:\n return poly1d(val)\n return val\n\n\ndef _polyder_dispatcher(p, m=None):\n return (p,)\n\n\n@array_function_dispatch(_polyder_dispatcher)\ndef polyder(p, m=1):\n """\n Return the derivative of the specified order of a polynomial.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Parameters\n ----------\n p : poly1d or sequence\n Polynomial to differentiate.\n A sequence is interpreted as polynomial coefficients, see `poly1d`.\n m : int, optional\n Order of differentiation (default: 1)\n\n Returns\n -------\n der : poly1d\n A new polynomial representing the derivative.\n\n See Also\n --------\n polyint : Anti-derivative of a polynomial.\n poly1d : Class for one-dimensional polynomials.\n\n Examples\n --------\n\n The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:\n\n >>> import numpy as np\n\n >>> p = np.poly1d([1,1,1,1])\n >>> p2 = np.polyder(p)\n >>> p2\n poly1d([3, 2, 1])\n\n which evaluates to:\n\n >>> p2(2.)\n 17.0\n\n We can verify this, approximating the derivative with\n ``(f(x + h) - f(x))/h``:\n\n >>> (p(2. + 0.001) - p(2.)) / 0.001\n 17.007000999997857\n\n The fourth-order derivative of a 3rd-order polynomial is zero:\n\n >>> np.polyder(p, 2)\n poly1d([6, 2])\n >>> np.polyder(p, 3)\n poly1d([6])\n >>> np.polyder(p, 4)\n poly1d([0])\n\n """\n m = int(m)\n if m < 0:\n raise ValueError("Order of derivative must be positive (see polyint)")\n\n truepoly = isinstance(p, poly1d)\n p = NX.asarray(p)\n n = len(p) - 1\n y = p[:-1] * NX.arange(n, 0, -1)\n if m == 0:\n val = p\n else:\n val = polyder(y, m - 1)\n if truepoly:\n val = poly1d(val)\n return val\n\n\ndef _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):\n return (x, y, w)\n\n\n@array_function_dispatch(_polyfit_dispatcher)\ndef polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):\n """\n Least squares polynomial fit.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`\n to points `(x, y)`. Returns a vector of coefficients `p` that minimises\n the squared error in the order `deg`, `deg-1`, ... `0`.\n\n The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class\n method is recommended for new code as it is more stable numerically. See\n the documentation of the method for more information.\n\n Parameters\n ----------\n x : array_like, shape (M,)\n x-coordinates of the M sample points ``(x[i], y[i])``.\n y : array_like, shape (M,) or (M, K)\n y-coordinates of the sample points. Several data sets of sample\n points sharing the same x-coordinates can be fitted at once by\n passing in a 2D-array that contains one dataset per column.\n deg : int\n Degree of the fitting polynomial\n rcond : float, optional\n Relative condition number of the fit. Singular values smaller than\n this relative to the largest singular value will be ignored. The\n default value is len(x)*eps, where eps is the relative precision of\n the float type, about 2e-16 in most cases.\n full : bool, optional\n Switch determining nature of return value. When it is False (the\n default) just the coefficients are returned, when True diagnostic\n information from the singular value decomposition is also returned.\n w : array_like, shape (M,), optional\n Weights. If not None, the weight ``w[i]`` applies to the unsquared\n residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are\n chosen so that the errors of the products ``w[i]*y[i]`` all have the\n same variance. When using inverse-variance weighting, use\n ``w[i] = 1/sigma(y[i])``. The default value is None.\n cov : bool or str, optional\n If given and not `False`, return not just the estimate but also its\n covariance matrix. By default, the covariance are scaled by\n chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed\n to be unreliable except in a relative sense and everything is scaled\n such that the reduced chi2 is unity. This scaling is omitted if\n ``cov='unscaled'``, as is relevant for the case that the weights are\n w = 1/sigma, with sigma known to be a reliable estimate of the\n uncertainty.\n\n Returns\n -------\n p : ndarray, shape (deg + 1,) or (deg + 1, K)\n Polynomial coefficients, highest power first. If `y` was 2-D, the\n coefficients for `k`-th data set are in ``p[:,k]``.\n\n residuals, rank, singular_values, rcond\n These values are only returned if ``full == True``\n\n - residuals -- sum of squared residuals of the least squares fit\n - rank -- the effective rank of the scaled Vandermonde\n coefficient matrix\n - singular_values -- singular values of the scaled Vandermonde\n coefficient matrix\n - rcond -- value of `rcond`.\n\n For more details, see `numpy.linalg.lstsq`.\n\n V : ndarray, shape (deg + 1, deg + 1) or (deg + 1, deg + 1, K)\n Present only if ``full == False`` and ``cov == True``. The covariance\n matrix of the polynomial coefficient estimates. The diagonal of\n this matrix are the variance estimates for each coefficient. If y\n is a 2-D array, then the covariance matrix for the `k`-th data set\n are in ``V[:,:,k]``\n\n\n Warns\n -----\n RankWarning\n The rank of the coefficient matrix in the least-squares fit is\n deficient. The warning is only raised if ``full == False``.\n\n The warnings can be turned off by\n\n >>> import warnings\n >>> warnings.simplefilter('ignore', np.exceptions.RankWarning)\n\n See Also\n --------\n polyval : Compute polynomial values.\n linalg.lstsq : Computes a least-squares fit.\n scipy.interpolate.UnivariateSpline : Computes spline fits.\n\n Notes\n -----\n The solution minimizes the squared error\n\n .. math::\n E = \\sum_{j=0}^k |p(x_j) - y_j|^2\n\n in the equations::\n\n x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]\n x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]\n ...\n x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]\n\n The coefficient matrix of the coefficients `p` is a Vandermonde matrix.\n\n `polyfit` issues a `~exceptions.RankWarning` when the least-squares fit is\n badly conditioned. This implies that the best fit is not well-defined due\n to numerical error. The results may be improved by lowering the polynomial\n degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter\n can also be set to a value smaller than its default, but the resulting\n fit may be spurious: including contributions from the small singular\n values can add numerical noise to the result.\n\n Note that fitting polynomial coefficients is inherently badly conditioned\n when the degree of the polynomial is large or the interval of sample points\n is badly centered. The quality of the fit should always be checked in these\n cases. When polynomial fits are not satisfactory, splines may be a good\n alternative.\n\n References\n ----------\n .. [1] Wikipedia, "Curve fitting",\n https://en.wikipedia.org/wiki/Curve_fitting\n .. [2] Wikipedia, "Polynomial interpolation",\n https://en.wikipedia.org/wiki/Polynomial_interpolation\n\n Examples\n --------\n >>> import numpy as np\n >>> import warnings\n >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])\n >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])\n >>> z = np.polyfit(x, y, 3)\n >>> z\n array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary\n\n It is convenient to use `poly1d` objects for dealing with polynomials:\n\n >>> p = np.poly1d(z)\n >>> p(0.5)\n 0.6143849206349179 # may vary\n >>> p(3.5)\n -0.34732142857143039 # may vary\n >>> p(10)\n 22.579365079365115 # may vary\n\n High-order polynomials may oscillate wildly:\n\n >>> with warnings.catch_warnings():\n ... warnings.simplefilter('ignore', np.exceptions.RankWarning)\n ... p30 = np.poly1d(np.polyfit(x, y, 30))\n ...\n >>> p30(4)\n -0.80000000000000204 # may vary\n >>> p30(5)\n -0.99999999999999445 # may vary\n >>> p30(4.5)\n -0.10547061179440398 # may vary\n\n Illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> xp = np.linspace(-2, 6, 100)\n >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')\n >>> plt.ylim(-2,2)\n (-2, 2)\n >>> plt.show()\n\n """\n order = int(deg) + 1\n x = NX.asarray(x) + 0.0\n y = NX.asarray(y) + 0.0\n\n # check arguments.\n if deg < 0:\n raise ValueError("expected deg >= 0")\n if x.ndim != 1:\n raise TypeError("expected 1D vector for x")\n if x.size == 0:\n raise TypeError("expected non-empty vector for x")\n if y.ndim < 1 or y.ndim > 2:\n raise TypeError("expected 1D or 2D array for y")\n if x.shape[0] != y.shape[0]:\n raise TypeError("expected x and y to have same length")\n\n # set rcond\n if rcond is None:\n rcond = len(x) * finfo(x.dtype).eps\n\n # set up least squares equation for powers of x\n lhs = vander(x, order)\n rhs = y\n\n # apply weighting\n if w is not None:\n w = NX.asarray(w) + 0.0\n if w.ndim != 1:\n raise TypeError("expected a 1-d array for weights")\n if w.shape[0] != y.shape[0]:\n raise TypeError("expected w and y to have the same length")\n lhs *= w[:, NX.newaxis]\n if rhs.ndim == 2:\n rhs *= w[:, NX.newaxis]\n else:\n rhs *= w\n\n # scale lhs to improve condition number and solve\n scale = NX.sqrt((lhs * lhs).sum(axis=0))\n lhs /= scale\n c, resids, rank, s = lstsq(lhs, rhs, rcond)\n c = (c.T / scale).T # broadcast scale coefficients\n\n # warn on rank reduction, which indicates an ill conditioned matrix\n if rank != order and not full:\n msg = "Polyfit may be poorly conditioned"\n warnings.warn(msg, RankWarning, stacklevel=2)\n\n if full:\n return c, resids, rank, s, rcond\n elif cov:\n Vbase = inv(dot(lhs.T, lhs))\n Vbase /= NX.outer(scale, scale)\n if cov == "unscaled":\n fac = 1\n else:\n if len(x) <= order:\n raise ValueError("the number of data points must exceed order "\n "to scale the covariance matrix")\n # note, this used to be: fac = resids / (len(x) - order - 2.0)\n # it was decided that the "- 2" (originally justified by "Bayesian\n # uncertainty analysis") is not what the user expects\n # (see gh-11196 and gh-11197)\n fac = resids / (len(x) - order)\n if y.ndim == 1:\n return c, Vbase * fac\n else:\n return c, Vbase[:, :, NX.newaxis] * fac\n else:\n return c\n\n\ndef _polyval_dispatcher(p, x):\n return (p, x)\n\n\n@array_function_dispatch(_polyval_dispatcher)\ndef polyval(p, x):\n """\n Evaluate a polynomial at specific values.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n If `p` is of length N, this function returns the value::\n\n p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]\n\n If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.\n If `x` is another polynomial then the composite polynomial ``p(x(t))``\n is returned.\n\n Parameters\n ----------\n p : array_like or poly1d object\n 1D array of polynomial coefficients (including coefficients equal\n to zero) from highest degree to the constant term, or an\n instance of poly1d.\n x : array_like or poly1d object\n A number, an array of numbers, or an instance of poly1d, at\n which to evaluate `p`.\n\n Returns\n -------\n values : ndarray or poly1d\n If `x` is a poly1d instance, the result is the composition of the two\n polynomials, i.e., `x` is "substituted" in `p` and the simplified\n result is returned. In addition, the type of `x` - array_like or\n poly1d - governs the type of the output: `x` array_like => `values`\n array_like, `x` a poly1d object => `values` is also.\n\n See Also\n --------\n poly1d: A polynomial class.\n\n Notes\n -----\n Horner's scheme [1]_ is used to evaluate the polynomial. Even so,\n for polynomials of high degree the values may be inaccurate due to\n rounding errors. Use carefully.\n\n If `x` is a subtype of `ndarray` the return value will be of the same type.\n\n References\n ----------\n .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.\n trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand\n Reinhold Co., 1985, pg. 720.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1\n 76\n >>> np.polyval([3,0,1], np.poly1d(5))\n poly1d([76])\n >>> np.polyval(np.poly1d([3,0,1]), 5)\n 76\n >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))\n poly1d([76])\n\n """\n p = NX.asarray(p)\n if isinstance(x, poly1d):\n y = 0\n else:\n x = NX.asanyarray(x)\n y = NX.zeros_like(x)\n for pv in p:\n y = y * x + pv\n return y\n\n\ndef _binary_op_dispatcher(a1, a2):\n return (a1, a2)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef polyadd(a1, a2):\n """\n Find the sum of two polynomials.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Returns the polynomial resulting from the sum of two input polynomials.\n Each input must be either a poly1d object or a 1D sequence of polynomial\n coefficients, from highest to lowest degree.\n\n Parameters\n ----------\n a1, a2 : array_like or poly1d object\n Input polynomials.\n\n Returns\n -------\n out : ndarray or poly1d object\n The sum of the inputs. If either input is a poly1d object, then the\n output is also a poly1d object. Otherwise, it is a 1D array of\n polynomial coefficients from highest to lowest degree.\n\n See Also\n --------\n poly1d : A one-dimensional polynomial class.\n poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval\n\n Examples\n --------\n >>> import numpy as np\n >>> np.polyadd([1, 2], [9, 5, 4])\n array([9, 6, 6])\n\n Using poly1d objects:\n\n >>> p1 = np.poly1d([1, 2])\n >>> p2 = np.poly1d([9, 5, 4])\n >>> print(p1)\n 1 x + 2\n >>> print(p2)\n 2\n 9 x + 5 x + 4\n >>> print(np.polyadd(p1, p2))\n 2\n 9 x + 6 x + 6\n\n """\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\n a1 = atleast_1d(a1)\n a2 = atleast_1d(a2)\n diff = len(a2) - len(a1)\n if diff == 0:\n val = a1 + a2\n elif diff > 0:\n zr = NX.zeros(diff, a1.dtype)\n val = NX.concatenate((zr, a1)) + a2\n else:\n zr = NX.zeros(abs(diff), a2.dtype)\n val = a1 + NX.concatenate((zr, a2))\n if truepoly:\n val = poly1d(val)\n return val\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef polysub(a1, a2):\n """\n Difference (subtraction) of two polynomials.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Given two polynomials `a1` and `a2`, returns ``a1 - a2``.\n `a1` and `a2` can be either array_like sequences of the polynomials'\n coefficients (including coefficients equal to zero), or `poly1d` objects.\n\n Parameters\n ----------\n a1, a2 : array_like or poly1d\n Minuend and subtrahend polynomials, respectively.\n\n Returns\n -------\n out : ndarray or poly1d\n Array or `poly1d` object of the difference polynomial's coefficients.\n\n See Also\n --------\n polyval, polydiv, polymul, polyadd\n\n Examples\n --------\n\n .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)\n\n >>> import numpy as np\n\n >>> np.polysub([2, 10, -2], [3, 10, -4])\n array([-1, 0, 2])\n\n """\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\n a1 = atleast_1d(a1)\n a2 = atleast_1d(a2)\n diff = len(a2) - len(a1)\n if diff == 0:\n val = a1 - a2\n elif diff > 0:\n zr = NX.zeros(diff, a1.dtype)\n val = NX.concatenate((zr, a1)) - a2\n else:\n zr = NX.zeros(abs(diff), a2.dtype)\n val = a1 - NX.concatenate((zr, a2))\n if truepoly:\n val = poly1d(val)\n return val\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef polymul(a1, a2):\n """\n Find the product of two polynomials.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n Finds the polynomial resulting from the multiplication of the two input\n polynomials. Each input must be either a poly1d object or a 1D sequence\n of polynomial coefficients, from highest to lowest degree.\n\n Parameters\n ----------\n a1, a2 : array_like or poly1d object\n Input polynomials.\n\n Returns\n -------\n out : ndarray or poly1d object\n The polynomial resulting from the multiplication of the inputs. If\n either inputs is a poly1d object, then the output is also a poly1d\n object. Otherwise, it is a 1D array of polynomial coefficients from\n highest to lowest degree.\n\n See Also\n --------\n poly1d : A one-dimensional polynomial class.\n poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval\n convolve : Array convolution. Same output as polymul, but has parameter\n for overlap mode.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.polymul([1, 2, 3], [9, 5, 1])\n array([ 9, 23, 38, 17, 3])\n\n Using poly1d objects:\n\n >>> p1 = np.poly1d([1, 2, 3])\n >>> p2 = np.poly1d([9, 5, 1])\n >>> print(p1)\n 2\n 1 x + 2 x + 3\n >>> print(p2)\n 2\n 9 x + 5 x + 1\n >>> print(np.polymul(p1, p2))\n 4 3 2\n 9 x + 23 x + 38 x + 17 x + 3\n\n """\n truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))\n a1, a2 = poly1d(a1), poly1d(a2)\n val = NX.convolve(a1, a2)\n if truepoly:\n val = poly1d(val)\n return val\n\n\ndef _polydiv_dispatcher(u, v):\n return (u, v)\n\n\n@array_function_dispatch(_polydiv_dispatcher)\ndef polydiv(u, v):\n """\n Returns the quotient and remainder of polynomial division.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n The input arrays are the coefficients (including any coefficients\n equal to zero) of the "numerator" (dividend) and "denominator"\n (divisor) polynomials, respectively.\n\n Parameters\n ----------\n u : array_like or poly1d\n Dividend polynomial's coefficients.\n\n v : array_like or poly1d\n Divisor polynomial's coefficients.\n\n Returns\n -------\n q : ndarray\n Coefficients, including those equal to zero, of the quotient.\n r : ndarray\n Coefficients, including those equal to zero, of the remainder.\n\n See Also\n --------\n poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub\n polyval\n\n Notes\n -----\n Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need\n not equal `v.ndim`. In other words, all four possible combinations -\n ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,\n ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.\n\n Examples\n --------\n\n .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25\n\n >>> import numpy as np\n\n >>> x = np.array([3.0, 5.0, 2.0])\n >>> y = np.array([2.0, 1.0])\n >>> np.polydiv(x, y)\n (array([1.5 , 1.75]), array([0.25]))\n\n """\n truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))\n u = atleast_1d(u) + 0.0\n v = atleast_1d(v) + 0.0\n # w has the common type\n w = u[0] + v[0]\n m = len(u) - 1\n n = len(v) - 1\n scale = 1. / v[0]\n q = NX.zeros((max(m - n + 1, 1),), w.dtype)\n r = u.astype(w.dtype)\n for k in range(m - n + 1):\n d = scale * r[k]\n q[k] = d\n r[k:k + n + 1] -= d * v\n while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):\n r = r[1:]\n if truepoly:\n return poly1d(q), poly1d(r)\n return q, r\n\n\n_poly_mat = re.compile(r"\*\*([0-9]*)")\ndef _raise_power(astr, wrap=70):\n n = 0\n line1 = ''\n line2 = ''\n output = ' '\n while True:\n mat = _poly_mat.search(astr, n)\n if mat is None:\n break\n span = mat.span()\n power = mat.groups()[0]\n partstr = astr[n:span[0]]\n n = span[1]\n toadd2 = partstr + ' ' * (len(power) - 1)\n toadd1 = ' ' * (len(partstr) - 1) + power\n if ((len(line2) + len(toadd2) > wrap) or\n (len(line1) + len(toadd1) > wrap)):\n output += line1 + "\n" + line2 + "\n "\n line1 = toadd1\n line2 = toadd2\n else:\n line2 += partstr + ' ' * (len(power) - 1)\n line1 += ' ' * (len(partstr) - 1) + power\n output += line1 + "\n" + line2\n return output + astr[n:]\n\n\n@set_module('numpy')\nclass poly1d:\n """\n A one-dimensional polynomial class.\n\n .. note::\n This forms part of the old polynomial API. Since version 1.4, the\n new polynomial API defined in `numpy.polynomial` is preferred.\n A summary of the differences can be found in the\n :doc:`transition guide </reference/routines.polynomials>`.\n\n A convenience class, used to encapsulate "natural" operations on\n polynomials so that said operations may take on their customary\n form in code (see Examples).\n\n Parameters\n ----------\n c_or_r : array_like\n The polynomial's coefficients, in decreasing powers, or if\n the value of the second parameter is True, the polynomial's\n roots (values where the polynomial evaluates to 0). For example,\n ``poly1d([1, 2, 3])`` returns an object that represents\n :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns\n one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.\n r : bool, optional\n If True, `c_or_r` specifies the polynomial's roots; the default\n is False.\n variable : str, optional\n Changes the variable used when printing `p` from `x` to `variable`\n (see Examples).\n\n Examples\n --------\n >>> import numpy as np\n\n Construct the polynomial :math:`x^2 + 2x + 3`:\n\n >>> import numpy as np\n\n >>> p = np.poly1d([1, 2, 3])\n >>> print(np.poly1d(p))\n 2\n 1 x + 2 x + 3\n\n Evaluate the polynomial at :math:`x = 0.5`:\n\n >>> p(0.5)\n 4.25\n\n Find the roots:\n\n >>> p.r\n array([-1.+1.41421356j, -1.-1.41421356j])\n >>> p(p.r)\n array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary\n\n These numbers in the previous line represent (0, 0) to machine precision\n\n Show the coefficients:\n\n >>> p.c\n array([1, 2, 3])\n\n Display the order (the leading zero-coefficients are removed):\n\n >>> p.order\n 2\n\n Show the coefficient of the k-th power in the polynomial\n (which is equivalent to ``p.c[-(i+1)]``):\n\n >>> p[1]\n 2\n\n Polynomials can be added, subtracted, multiplied, and divided\n (returns quotient and remainder):\n\n >>> p * p\n poly1d([ 1, 4, 10, 12, 9])\n\n >>> (p**3 + 4) / p\n (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))\n\n ``asarray(p)`` gives the coefficient array, so polynomials can be\n used in all functions that accept arrays:\n\n >>> p**2 # square of polynomial\n poly1d([ 1, 4, 10, 12, 9])\n\n >>> np.square(p) # square of individual coefficients\n array([1, 4, 9])\n\n The variable used in the string representation of `p` can be modified,\n using the `variable` parameter:\n\n >>> p = np.poly1d([1,2,3], variable='z')\n >>> print(p)\n 2\n 1 z + 2 z + 3\n\n Construct a polynomial from its roots:\n\n >>> np.poly1d([1, 2], True)\n poly1d([ 1., -3., 2.])\n\n This is the same polynomial as obtained by:\n\n >>> np.poly1d([1, -1]) * np.poly1d([1, -2])\n poly1d([ 1, -3, 2])\n\n """\n __hash__ = None\n\n @property\n def coeffs(self):\n """ The polynomial coefficients """\n return self._coeffs\n\n @coeffs.setter\n def coeffs(self, value):\n # allowing this makes p.coeffs *= 2 legal\n if value is not self._coeffs:\n raise AttributeError("Cannot set attribute")\n\n @property\n def variable(self):\n """ The name of the polynomial variable """\n return self._variable\n\n # calculated attributes\n @property\n def order(self):\n """ The order or degree of the polynomial """\n return len(self._coeffs) - 1\n\n @property\n def roots(self):\n """ The roots of the polynomial, where self(x) == 0 """\n return roots(self._coeffs)\n\n # our internal _coeffs property need to be backed by __dict__['coeffs'] for\n # scipy to work correctly.\n @property\n def _coeffs(self):\n return self.__dict__['coeffs']\n\n @_coeffs.setter\n def _coeffs(self, coeffs):\n self.__dict__['coeffs'] = coeffs\n\n # alias attributes\n r = roots\n c = coef = coefficients = coeffs\n o = order\n\n def __init__(self, c_or_r, r=False, variable=None):\n if isinstance(c_or_r, poly1d):\n self._variable = c_or_r._variable\n self._coeffs = c_or_r._coeffs\n\n if set(c_or_r.__dict__) - set(self.__dict__):\n msg = ("In the future extra properties will not be copied "\n "across when constructing one poly1d from another")\n warnings.warn(msg, FutureWarning, stacklevel=2)\n self.__dict__.update(c_or_r.__dict__)\n\n if variable is not None:\n self._variable = variable\n return\n if r:\n c_or_r = poly(c_or_r)\n c_or_r = atleast_1d(c_or_r)\n if c_or_r.ndim > 1:\n raise ValueError("Polynomial must be 1d only.")\n c_or_r = trim_zeros(c_or_r, trim='f')\n if len(c_or_r) == 0:\n c_or_r = NX.array([0], dtype=c_or_r.dtype)\n self._coeffs = c_or_r\n if variable is None:\n variable = 'x'\n self._variable = variable\n\n def __array__(self, t=None, copy=None):\n if t:\n return NX.asarray(self.coeffs, t, copy=copy)\n else:\n return NX.asarray(self.coeffs, copy=copy)\n\n def __repr__(self):\n vals = repr(self.coeffs)\n vals = vals[6:-1]\n return f"poly1d({vals})"\n\n def __len__(self):\n return self.order\n\n def __str__(self):\n thestr = "0"\n var = self.variable\n\n # Remove leading zeros\n coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]\n N = len(coeffs) - 1\n\n def fmt_float(q):\n s = f'{q:.4g}'\n s = s.removesuffix('.0000')\n return s\n\n for k, coeff in enumerate(coeffs):\n if not iscomplex(coeff):\n coefstr = fmt_float(real(coeff))\n elif real(coeff) == 0:\n coefstr = f'{fmt_float(imag(coeff))}j'\n else:\n coefstr = f'({fmt_float(real(coeff))} + {fmt_float(imag(coeff))}j)'\n\n power = (N - k)\n if power == 0:\n if coefstr != '0':\n newstr = f'{coefstr}'\n elif k == 0:\n newstr = '0'\n else:\n newstr = ''\n elif power == 1:\n if coefstr == '0':\n newstr = ''\n elif coefstr == 'b':\n newstr = var\n else:\n newstr = f'{coefstr} {var}'\n elif coefstr == '0':\n newstr = ''\n elif coefstr == 'b':\n newstr = '%s**%d' % (var, power,)\n else:\n newstr = '%s %s**%d' % (coefstr, var, power)\n\n if k > 0:\n if newstr != '':\n if newstr.startswith('-'):\n thestr = f"{thestr} - {newstr[1:]}"\n else:\n thestr = f"{thestr} + {newstr}"\n else:\n thestr = newstr\n return _raise_power(thestr)\n\n def __call__(self, val):\n return polyval(self.coeffs, val)\n\n def __neg__(self):\n return poly1d(-self.coeffs)\n\n def __pos__(self):\n return self\n\n def __mul__(self, other):\n if isscalar(other):\n return poly1d(self.coeffs * other)\n else:\n other = poly1d(other)\n return poly1d(polymul(self.coeffs, other.coeffs))\n\n def __rmul__(self, other):\n if isscalar(other):\n return poly1d(other * self.coeffs)\n else:\n other = poly1d(other)\n return poly1d(polymul(self.coeffs, other.coeffs))\n\n def __add__(self, other):\n other = poly1d(other)\n return poly1d(polyadd(self.coeffs, other.coeffs))\n\n def __radd__(self, other):\n other = poly1d(other)\n return poly1d(polyadd(self.coeffs, other.coeffs))\n\n def __pow__(self, val):\n if not isscalar(val) or int(val) != val or val < 0:\n raise ValueError("Power to non-negative integers only.")\n res = [1]\n for _ in range(val):\n res = polymul(self.coeffs, res)\n return poly1d(res)\n\n def __sub__(self, other):\n other = poly1d(other)\n return poly1d(polysub(self.coeffs, other.coeffs))\n\n def __rsub__(self, other):\n other = poly1d(other)\n return poly1d(polysub(other.coeffs, self.coeffs))\n\n def __truediv__(self, other):\n if isscalar(other):\n return poly1d(self.coeffs / other)\n else:\n other = poly1d(other)\n return polydiv(self, other)\n\n def __rtruediv__(self, other):\n if isscalar(other):\n return poly1d(other / self.coeffs)\n else:\n other = poly1d(other)\n return polydiv(other, self)\n\n def __eq__(self, other):\n if not isinstance(other, poly1d):\n return NotImplemented\n if self.coeffs.shape != other.coeffs.shape:\n return False\n return (self.coeffs == other.coeffs).all()\n\n def __ne__(self, other):\n if not isinstance(other, poly1d):\n return NotImplemented\n return not self.__eq__(other)\n\n def __getitem__(self, val):\n ind = self.order - val\n if val > self.order:\n return self.coeffs.dtype.type(0)\n if val < 0:\n return self.coeffs.dtype.type(0)\n return self.coeffs[ind]\n\n def __setitem__(self, key, val):\n ind = self.order - key\n if key < 0:\n raise ValueError("Does not support negative powers.")\n if key > self.order:\n zr = NX.zeros(key - self.order, self.coeffs.dtype)\n self._coeffs = NX.concatenate((zr, self.coeffs))\n ind = 0\n self._coeffs[ind] = val\n\n def __iter__(self):\n return iter(self.coeffs)\n\n def integ(self, m=1, k=0):\n """\n Return an antiderivative (indefinite integral) of this polynomial.\n\n Refer to `polyint` for full documentation.\n\n See Also\n --------\n polyint : equivalent function\n\n """\n return poly1d(polyint(self.coeffs, m=m, k=k))\n\n def deriv(self, m=1):\n """\n Return a derivative of this polynomial.\n\n Refer to `polyder` for full documentation.\n\n See Also\n --------\n polyder : equivalent function\n\n """\n return poly1d(polyder(self.coeffs, m=m))\n\n# Stuff to do on module import\n\n\nwarnings.simplefilter('always', RankWarning)\n | .venv\Lib\site-packages\numpy\lib\_polynomial_impl.py | _polynomial_impl.py | Python | 45,599 | 0.95 | 0.117406 | 0.024723 | python-kit | 73 | 2024-06-23T22:19:29.584407 | Apache-2.0 | false | 0f0a7a78238b3a109b29d24b15ec9a6c |
from typing import (\n Any,\n NoReturn,\n SupportsIndex,\n SupportsInt,\n TypeAlias,\n TypeVar,\n overload,\n)\nfrom typing import (\n Literal as L,\n)\n\nimport numpy as np\nfrom numpy import (\n complex128,\n complexfloating,\n float64,\n floating,\n int32,\n int64,\n object_,\n poly1d,\n signedinteger,\n unsignedinteger,\n)\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeUInt_co,\n)\n\n_T = TypeVar("_T")\n\n_2Tup: TypeAlias = tuple[_T, _T]\n_5Tup: TypeAlias = tuple[\n _T,\n NDArray[float64],\n NDArray[int32],\n NDArray[float64],\n NDArray[float64],\n]\n\n__all__ = [\n "poly",\n "roots",\n "polyint",\n "polyder",\n "polyadd",\n "polysub",\n "polymul",\n "polydiv",\n "polyval",\n "poly1d",\n "polyfit",\n]\n\ndef poly(seq_of_zeros: ArrayLike) -> NDArray[floating]: ...\n\n# Returns either a float or complex array depending on the input values.\n# See `np.linalg.eigvals`.\ndef roots(p: ArrayLike) -> NDArray[complexfloating] | NDArray[floating]: ...\n\n@overload\ndef polyint(\n p: poly1d,\n m: SupportsInt | SupportsIndex = ...,\n k: _ArrayLikeComplex_co | _ArrayLikeObject_co | None = ...,\n) -> poly1d: ...\n@overload\ndef polyint(\n p: _ArrayLikeFloat_co,\n m: SupportsInt | SupportsIndex = ...,\n k: _ArrayLikeFloat_co | None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef polyint(\n p: _ArrayLikeComplex_co,\n m: SupportsInt | SupportsIndex = ...,\n k: _ArrayLikeComplex_co | None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef polyint(\n p: _ArrayLikeObject_co,\n m: SupportsInt | SupportsIndex = ...,\n k: _ArrayLikeObject_co | None = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef polyder(\n p: poly1d,\n m: SupportsInt | SupportsIndex = ...,\n) -> poly1d: ...\n@overload\ndef polyder(\n p: _ArrayLikeFloat_co,\n m: SupportsInt | SupportsIndex = ...,\n) -> NDArray[floating]: ...\n@overload\ndef polyder(\n p: _ArrayLikeComplex_co,\n m: SupportsInt | SupportsIndex = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef polyder(\n p: _ArrayLikeObject_co,\n m: SupportsInt | SupportsIndex = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef polyfit(\n x: _ArrayLikeFloat_co,\n y: _ArrayLikeFloat_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[False] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: L[False] = ...,\n) -> NDArray[float64]: ...\n@overload\ndef polyfit(\n x: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[False] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: L[False] = ...,\n) -> NDArray[complex128]: ...\n@overload\ndef polyfit(\n x: _ArrayLikeFloat_co,\n y: _ArrayLikeFloat_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[False] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: L[True, "unscaled"] = ...,\n) -> _2Tup[NDArray[float64]]: ...\n@overload\ndef polyfit(\n x: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[False] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: L[True, "unscaled"] = ...,\n) -> _2Tup[NDArray[complex128]]: ...\n@overload\ndef polyfit(\n x: _ArrayLikeFloat_co,\n y: _ArrayLikeFloat_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[True] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: bool | L["unscaled"] = ...,\n) -> _5Tup[NDArray[float64]]: ...\n@overload\ndef polyfit(\n x: _ArrayLikeComplex_co,\n y: _ArrayLikeComplex_co,\n deg: SupportsIndex | SupportsInt,\n rcond: float | None = ...,\n full: L[True] = ...,\n w: _ArrayLikeFloat_co | None = ...,\n cov: bool | L["unscaled"] = ...,\n) -> _5Tup[NDArray[complex128]]: ...\n\n@overload\ndef polyval(\n p: _ArrayLikeBool_co,\n x: _ArrayLikeBool_co,\n) -> NDArray[int64]: ...\n@overload\ndef polyval(\n p: _ArrayLikeUInt_co,\n x: _ArrayLikeUInt_co,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef polyval(\n p: _ArrayLikeInt_co,\n x: _ArrayLikeInt_co,\n) -> NDArray[signedinteger]: ...\n@overload\ndef polyval(\n p: _ArrayLikeFloat_co,\n x: _ArrayLikeFloat_co,\n) -> NDArray[floating]: ...\n@overload\ndef polyval(\n p: _ArrayLikeComplex_co,\n x: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n@overload\ndef polyval(\n p: _ArrayLikeObject_co,\n x: _ArrayLikeObject_co,\n) -> NDArray[object_]: ...\n\n@overload\ndef polyadd(\n a1: poly1d,\n a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n) -> poly1d: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n a2: poly1d,\n) -> poly1d: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeBool_co,\n a2: _ArrayLikeBool_co,\n) -> NDArray[np.bool]: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeUInt_co,\n a2: _ArrayLikeUInt_co,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeInt_co,\n a2: _ArrayLikeInt_co,\n) -> NDArray[signedinteger]: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeFloat_co,\n a2: _ArrayLikeFloat_co,\n) -> NDArray[floating]: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeComplex_co,\n a2: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n@overload\ndef polyadd(\n a1: _ArrayLikeObject_co,\n a2: _ArrayLikeObject_co,\n) -> NDArray[object_]: ...\n\n@overload\ndef polysub(\n a1: poly1d,\n a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n) -> poly1d: ...\n@overload\ndef polysub(\n a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n a2: poly1d,\n) -> poly1d: ...\n@overload\ndef polysub(\n a1: _ArrayLikeBool_co,\n a2: _ArrayLikeBool_co,\n) -> NoReturn: ...\n@overload\ndef polysub(\n a1: _ArrayLikeUInt_co,\n a2: _ArrayLikeUInt_co,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef polysub(\n a1: _ArrayLikeInt_co,\n a2: _ArrayLikeInt_co,\n) -> NDArray[signedinteger]: ...\n@overload\ndef polysub(\n a1: _ArrayLikeFloat_co,\n a2: _ArrayLikeFloat_co,\n) -> NDArray[floating]: ...\n@overload\ndef polysub(\n a1: _ArrayLikeComplex_co,\n a2: _ArrayLikeComplex_co,\n) -> NDArray[complexfloating]: ...\n@overload\ndef polysub(\n a1: _ArrayLikeObject_co,\n a2: _ArrayLikeObject_co,\n) -> NDArray[object_]: ...\n\n# NOTE: Not an alias, but they do have the same signature (that we can reuse)\npolymul = polyadd\n\n@overload\ndef polydiv(\n u: poly1d,\n v: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n) -> _2Tup[poly1d]: ...\n@overload\ndef polydiv(\n u: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n v: poly1d,\n) -> _2Tup[poly1d]: ...\n@overload\ndef polydiv(\n u: _ArrayLikeFloat_co,\n v: _ArrayLikeFloat_co,\n) -> _2Tup[NDArray[floating]]: ...\n@overload\ndef polydiv(\n u: _ArrayLikeComplex_co,\n v: _ArrayLikeComplex_co,\n) -> _2Tup[NDArray[complexfloating]]: ...\n@overload\ndef polydiv(\n u: _ArrayLikeObject_co,\n v: _ArrayLikeObject_co,\n) -> _2Tup[NDArray[Any]]: ...\n | .venv\Lib\site-packages\numpy\lib\_polynomial_impl.pyi | _polynomial_impl.pyi | Other | 7,315 | 0.95 | 0.136076 | 0.009934 | node-utils | 586 | 2023-08-27T12:49:32.603059 | Apache-2.0 | false | c4815b81822a2e63167cf0a9203aaada |
"""\nWrapper functions to more user-friendly calling of certain math functions\nwhose output data-type is different than the input data-type in certain\ndomains of the input.\n\nFor example, for functions like `log` with branch cuts, the versions in this\nmodule provide the mathematically valid answers in the complex plane::\n\n >>> import math\n >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)\n True\n\nSimilarly, `sqrt`, other base logarithms, `power` and trig functions are\ncorrectly handled. See their respective docstrings for specific examples.\n\n"""\nimport numpy._core.numeric as nx\nimport numpy._core.numerictypes as nt\nfrom numpy._core.numeric import any, asarray\nfrom numpy._core.overrides import array_function_dispatch, set_module\nfrom numpy.lib._type_check_impl import isreal\n\n__all__ = [\n 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',\n 'arctanh'\n ]\n\n\n_ln2 = nx.log(2.0)\n\n\ndef _tocomplex(arr):\n """Convert its input `arr` to a complex array.\n\n The input is returned as a complex array of the smallest type that will fit\n the original data: types like single, byte, short, etc. become csingle,\n while others become cdouble.\n\n A copy of the input is always made.\n\n Parameters\n ----------\n arr : array\n\n Returns\n -------\n array\n An array with the same input data as the input but in complex form.\n\n Examples\n --------\n >>> import numpy as np\n\n First, consider an input of type short:\n\n >>> a = np.array([1,2,3],np.short)\n\n >>> ac = np.lib.scimath._tocomplex(a); ac\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\n\n >>> ac.dtype\n dtype('complex64')\n\n If the input is of type double, the output is correspondingly of the\n complex double type as well:\n\n >>> b = np.array([1,2,3],np.double)\n\n >>> bc = np.lib.scimath._tocomplex(b); bc\n array([1.+0.j, 2.+0.j, 3.+0.j])\n\n >>> bc.dtype\n dtype('complex128')\n\n Note that even if the input was complex to begin with, a copy is still\n made, since the astype() method always copies:\n\n >>> c = np.array([1,2,3],np.csingle)\n\n >>> cc = np.lib.scimath._tocomplex(c); cc\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\n\n >>> c *= 2; c\n array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)\n\n >>> cc\n array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)\n """\n if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,\n nt.ushort, nt.csingle)):\n return arr.astype(nt.csingle)\n else:\n return arr.astype(nt.cdouble)\n\n\ndef _fix_real_lt_zero(x):\n """Convert `x` to complex if it has real, negative components.\n\n Otherwise, output is just the array version of the input (via asarray).\n\n Parameters\n ----------\n x : array_like\n\n Returns\n -------\n array\n\n Examples\n --------\n >>> import numpy as np\n >>> np.lib.scimath._fix_real_lt_zero([1,2])\n array([1, 2])\n\n >>> np.lib.scimath._fix_real_lt_zero([-1,2])\n array([-1.+0.j, 2.+0.j])\n\n """\n x = asarray(x)\n if any(isreal(x) & (x < 0)):\n x = _tocomplex(x)\n return x\n\n\ndef _fix_int_lt_zero(x):\n """Convert `x` to double if it has real, negative components.\n\n Otherwise, output is just the array version of the input (via asarray).\n\n Parameters\n ----------\n x : array_like\n\n Returns\n -------\n array\n\n Examples\n --------\n >>> import numpy as np\n >>> np.lib.scimath._fix_int_lt_zero([1,2])\n array([1, 2])\n\n >>> np.lib.scimath._fix_int_lt_zero([-1,2])\n array([-1., 2.])\n """\n x = asarray(x)\n if any(isreal(x) & (x < 0)):\n x = x * 1.0\n return x\n\n\ndef _fix_real_abs_gt_1(x):\n """Convert `x` to complex if it has real components x_i with abs(x_i)>1.\n\n Otherwise, output is just the array version of the input (via asarray).\n\n Parameters\n ----------\n x : array_like\n\n Returns\n -------\n array\n\n Examples\n --------\n >>> import numpy as np\n >>> np.lib.scimath._fix_real_abs_gt_1([0,1])\n array([0, 1])\n\n >>> np.lib.scimath._fix_real_abs_gt_1([0,2])\n array([0.+0.j, 2.+0.j])\n """\n x = asarray(x)\n if any(isreal(x) & (abs(x) > 1)):\n x = _tocomplex(x)\n return x\n\n\ndef _unary_dispatcher(x):\n return (x,)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef sqrt(x):\n """\n Compute the square root of x.\n\n For negative input elements, a complex value is returned\n (unlike `numpy.sqrt` which returns NaN).\n\n Parameters\n ----------\n x : array_like\n The input value(s).\n\n Returns\n -------\n out : ndarray or scalar\n The square root of `x`. If `x` was a scalar, so is `out`,\n otherwise an array is returned.\n\n See Also\n --------\n numpy.sqrt\n\n Examples\n --------\n For real, non-negative inputs this works just like `numpy.sqrt`:\n\n >>> import numpy as np\n\n >>> np.emath.sqrt(1)\n 1.0\n >>> np.emath.sqrt([1, 4])\n array([1., 2.])\n\n But it automatically handles negative inputs:\n\n >>> np.emath.sqrt(-1)\n 1j\n >>> np.emath.sqrt([-1,4])\n array([0.+1.j, 2.+0.j])\n\n Different results are expected because:\n floating point 0.0 and -0.0 are distinct.\n\n For more control, explicitly use complex() as follows:\n\n >>> np.emath.sqrt(complex(-4.0, 0.0))\n 2j\n >>> np.emath.sqrt(complex(-4.0, -0.0))\n -2j\n """\n x = _fix_real_lt_zero(x)\n return nx.sqrt(x)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log(x):\n """\n Compute the natural logarithm of `x`.\n\n Return the "principal value" (for a description of this, see `numpy.log`)\n of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``\n returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the\n complex principle value is returned.\n\n Parameters\n ----------\n x : array_like\n The value(s) whose log is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The log of the `x` value(s). If `x` was a scalar, so is `out`,\n otherwise an array is returned.\n\n See Also\n --------\n numpy.log\n\n Notes\n -----\n For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`\n (note, however, that otherwise `numpy.log` and this `log` are identical,\n i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,\n notably, the complex principle value if ``x.imag != 0``).\n\n Examples\n --------\n >>> import numpy as np\n >>> np.emath.log(np.exp(1))\n 1.0\n\n Negative arguments are handled "correctly" (recall that\n ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):\n\n >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)\n True\n\n """\n x = _fix_real_lt_zero(x)\n return nx.log(x)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log10(x):\n """\n Compute the logarithm base 10 of `x`.\n\n Return the "principal value" (for a description of this, see\n `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this\n is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``\n returns ``inf``). Otherwise, the complex principle value is returned.\n\n Parameters\n ----------\n x : array_like or scalar\n The value(s) whose log base 10 is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,\n otherwise an array object is returned.\n\n See Also\n --------\n numpy.log10\n\n Notes\n -----\n For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`\n (note, however, that otherwise `numpy.log10` and this `log10` are\n identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,\n and, notably, the complex principle value if ``x.imag != 0``).\n\n Examples\n --------\n >>> import numpy as np\n\n (We set the printing precision so the example can be auto-tested)\n\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.log10(10**1)\n 1.0\n\n >>> np.emath.log10([-10**1, -10**2, 10**2])\n array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])\n\n """\n x = _fix_real_lt_zero(x)\n return nx.log10(x)\n\n\ndef _logn_dispatcher(n, x):\n return (n, x,)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_logn_dispatcher)\ndef logn(n, x):\n """\n Take log base n of x.\n\n If `x` contains negative inputs, the answer is computed and returned in the\n complex domain.\n\n Parameters\n ----------\n n : array_like\n The integer base(s) in which the log is taken.\n x : array_like\n The value(s) whose log base `n` is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The log base `n` of the `x` value(s). If `x` was a scalar, so is\n `out`, otherwise an array is returned.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.logn(2, [4, 8])\n array([2., 3.])\n >>> np.emath.logn(2, [-4, -8, 8])\n array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])\n\n """\n x = _fix_real_lt_zero(x)\n n = _fix_real_lt_zero(n)\n return nx.log(x) / nx.log(n)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef log2(x):\n """\n Compute the logarithm base 2 of `x`.\n\n Return the "principal value" (for a description of this, see\n `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is\n a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns\n ``inf``). Otherwise, the complex principle value is returned.\n\n Parameters\n ----------\n x : array_like\n The value(s) whose log base 2 is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,\n otherwise an array is returned.\n\n See Also\n --------\n numpy.log2\n\n Notes\n -----\n For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`\n (note, however, that otherwise `numpy.log2` and this `log2` are\n identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,\n and, notably, the complex principle value if ``x.imag != 0``).\n\n Examples\n --------\n\n We set the printing precision so the example can be auto-tested:\n\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.log2(8)\n 3.0\n >>> np.emath.log2([-4, -8, 8])\n array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])\n\n """\n x = _fix_real_lt_zero(x)\n return nx.log2(x)\n\n\ndef _power_dispatcher(x, p):\n return (x, p)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_power_dispatcher)\ndef power(x, p):\n """\n Return x to the power p, (x**p).\n\n If `x` contains negative values, the output is converted to the\n complex domain.\n\n Parameters\n ----------\n x : array_like\n The input value(s).\n p : array_like of ints\n The power(s) to which `x` is raised. If `x` contains multiple values,\n `p` has to either be a scalar, or contain the same number of values\n as `x`. In the latter case, the result is\n ``x[0]**p[0], x[1]**p[1], ...``.\n\n Returns\n -------\n out : ndarray or scalar\n The result of ``x**p``. If `x` and `p` are scalars, so is `out`,\n otherwise an array is returned.\n\n See Also\n --------\n numpy.power\n\n Examples\n --------\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.power(2, 2)\n 4\n\n >>> np.emath.power([2, 4], 2)\n array([ 4, 16])\n\n >>> np.emath.power([2, 4], -2)\n array([0.25 , 0.0625])\n\n >>> np.emath.power([-2, 4], 2)\n array([ 4.-0.j, 16.+0.j])\n\n >>> np.emath.power([2, 4], [2, 4])\n array([ 4, 256])\n\n """\n x = _fix_real_lt_zero(x)\n p = _fix_int_lt_zero(p)\n return nx.power(x, p)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arccos(x):\n """\n Compute the inverse cosine of x.\n\n Return the "principal value" (for a description of this, see\n `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that\n `abs(x) <= 1`, this is a real number in the closed interval\n :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.\n\n Parameters\n ----------\n x : array_like or scalar\n The value(s) whose arccos is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so\n is `out`, otherwise an array object is returned.\n\n See Also\n --------\n numpy.arccos\n\n Notes\n -----\n For an arccos() that returns ``NAN`` when real `x` is not in the\n interval ``[-1,1]``, use `numpy.arccos`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.arccos(1) # a scalar is returned\n 0.0\n\n >>> np.emath.arccos([1,2])\n array([0.-0.j , 0.-1.317j])\n\n """\n x = _fix_real_abs_gt_1(x)\n return nx.arccos(x)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arcsin(x):\n """\n Compute the inverse sine of x.\n\n Return the "principal value" (for a description of this, see\n `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that\n `abs(x) <= 1`, this is a real number in the closed interval\n :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is\n returned.\n\n Parameters\n ----------\n x : array_like or scalar\n The value(s) whose arcsin is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The inverse sine(s) of the `x` value(s). If `x` was a scalar, so\n is `out`, otherwise an array object is returned.\n\n See Also\n --------\n numpy.arcsin\n\n Notes\n -----\n For an arcsin() that returns ``NAN`` when real `x` is not in the\n interval ``[-1,1]``, use `numpy.arcsin`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.arcsin(0)\n 0.0\n\n >>> np.emath.arcsin([0,1])\n array([0. , 1.5708])\n\n """\n x = _fix_real_abs_gt_1(x)\n return nx.arcsin(x)\n\n\n@set_module('numpy.lib.scimath')\n@array_function_dispatch(_unary_dispatcher)\ndef arctanh(x):\n """\n Compute the inverse hyperbolic tangent of `x`.\n\n Return the "principal value" (for a description of this, see\n `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that\n ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is\n complex, the result is complex. Finally, `x = 1` returns``inf`` and\n ``x=-1`` returns ``-inf``.\n\n Parameters\n ----------\n x : array_like\n The value(s) whose arctanh is (are) required.\n\n Returns\n -------\n out : ndarray or scalar\n The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was\n a scalar so is `out`, otherwise an array is returned.\n\n\n See Also\n --------\n numpy.arctanh\n\n Notes\n -----\n For an arctanh() that returns ``NAN`` when real `x` is not in the\n interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does\n return +/-inf for ``x = +/-1``).\n\n Examples\n --------\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n\n >>> np.emath.arctanh(0.5)\n 0.5493061443340549\n\n >>> from numpy.testing import suppress_warnings\n >>> with suppress_warnings() as sup:\n ... sup.filter(RuntimeWarning)\n ... np.emath.arctanh(np.eye(2))\n array([[inf, 0.],\n [ 0., inf]])\n >>> np.emath.arctanh([1j])\n array([0.+0.7854j])\n\n """\n x = _fix_real_abs_gt_1(x)\n return nx.arctanh(x)\n | .venv\Lib\site-packages\numpy\lib\_scimath_impl.py | _scimath_impl.py | Python | 16,334 | 0.95 | 0.070093 | 0 | node-utils | 820 | 2024-09-22T23:31:40.131777 | BSD-3-Clause | false | 9768ec30fec11d0476ddcc9453c29041 |
from typing import Any, overload\n\nfrom numpy import complexfloating\nfrom numpy._typing import (\n NDArray,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ComplexLike_co,\n _FloatLike_co,\n)\n\n__all__ = ["sqrt", "log", "log2", "logn", "log10", "power", "arccos", "arcsin", "arctanh"]\n\n@overload\ndef sqrt(x: _FloatLike_co) -> Any: ...\n@overload\ndef sqrt(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef log(x: _FloatLike_co) -> Any: ...\n@overload\ndef log(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef log10(x: _FloatLike_co) -> Any: ...\n@overload\ndef log10(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef log2(x: _FloatLike_co) -> Any: ...\n@overload\ndef log2(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...\n@overload\ndef logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...\n@overload\ndef power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef arccos(x: _FloatLike_co) -> Any: ...\n@overload\ndef arccos(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef arcsin(x: _FloatLike_co) -> Any: ...\n@overload\ndef arcsin(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n\n@overload\ndef arctanh(x: _FloatLike_co) -> Any: ...\n@overload\ndef arctanh(x: _ComplexLike_co) -> complexfloating: ...\n@overload\ndef arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...\n@overload\ndef arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n | .venv\Lib\site-packages\numpy\lib\_scimath_impl.pyi | _scimath_impl.pyi | Other | 2,867 | 0.85 | 0.387097 | 0 | node-utils | 621 | 2023-09-14T21:56:07.275369 | MIT | false | 9c292822a7498826c0f807b460aa30a8 |
import functools\nimport warnings\n\nimport numpy._core.numeric as _nx\nfrom numpy._core import atleast_3d, overrides, vstack\nfrom numpy._core._multiarray_umath import _array_converter\nfrom numpy._core.fromnumeric import reshape, transpose\nfrom numpy._core.multiarray import normalize_axis_index\nfrom numpy._core.numeric import (\n array,\n asanyarray,\n asarray,\n normalize_axis_tuple,\n zeros,\n zeros_like,\n)\nfrom numpy._core.overrides import set_module\nfrom numpy._core.shape_base import _arrays_for_stack_dispatcher\nfrom numpy.lib._index_tricks_impl import ndindex\nfrom numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells\n\n__all__ = [\n 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',\n 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',\n 'apply_along_axis', 'kron', 'tile', 'take_along_axis',\n 'put_along_axis'\n ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _make_along_axis_idx(arr_shape, indices, axis):\n # compute dimensions to iterate over\n if not _nx.issubdtype(indices.dtype, _nx.integer):\n raise IndexError('`indices` must be an integer array')\n if len(arr_shape) != indices.ndim:\n raise ValueError(\n "`indices` and `arr` must have the same number of dimensions")\n shape_ones = (1,) * indices.ndim\n dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim))\n\n # build a fancy index, consisting of orthogonal aranges, with the\n # requested index inserted at the right location\n fancy_index = []\n for dim, n in zip(dest_dims, arr_shape):\n if dim is None:\n fancy_index.append(indices)\n else:\n ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1:]\n fancy_index.append(_nx.arange(n).reshape(ind_shape))\n\n return tuple(fancy_index)\n\n\ndef _take_along_axis_dispatcher(arr, indices, axis=None):\n return (arr, indices)\n\n\n@array_function_dispatch(_take_along_axis_dispatcher)\ndef take_along_axis(arr, indices, axis=-1):\n """\n Take values from the input array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to look up values in the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n Parameters\n ----------\n arr : ndarray (Ni..., M, Nk...)\n Source array\n indices : ndarray (Ni..., J, Nk...)\n Indices to take along each 1d slice of ``arr``. This must match the\n dimension of ``arr``, but dimensions Ni and Nj only need to broadcast\n against ``arr``.\n axis : int or None, optional\n The axis to take 1d slices along. If axis is None, the input array is\n treated as if it had first been flattened to 1d, for consistency with\n `sort` and `argsort`.\n\n .. versionchanged:: 2.3\n The default value is now ``-1``.\n\n Returns\n -------\n out: ndarray (Ni..., J, Nk...)\n The indexed result.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n out = np.empty(Ni + (J,) + Nk)\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n out_1d = out [ii + s_[:,] + kk]\n for j in range(J):\n out_1d[j] = a_1d[indices_1d[j]]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n out_1d[:] = a_1d[indices_1d]\n\n See Also\n --------\n take : Take along an axis, using the same indices for every 1d slice\n put_along_axis :\n Put values into the destination array by matching 1d index and data slices\n\n Examples\n --------\n >>> import numpy as np\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can sort either by using sort directly, or argsort and this function\n\n >>> np.sort(a, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n >>> ai = np.argsort(a, axis=1)\n >>> ai\n array([[0, 2, 1],\n [1, 2, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 20, 30],\n [40, 50, 60]])\n\n The same works for max and min, if you maintain the trivial dimension\n with ``keepdims``:\n\n >>> np.max(a, axis=1, keepdims=True)\n array([[30],\n [60]])\n >>> ai = np.argmax(a, axis=1, keepdims=True)\n >>> ai\n array([[1],\n [0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[30],\n [60]])\n\n If we want to get the max and min at the same time, we can stack the\n indices first\n\n >>> ai_min = np.argmin(a, axis=1, keepdims=True)\n >>> ai_max = np.argmax(a, axis=1, keepdims=True)\n >>> ai = np.concatenate([ai_min, ai_max], axis=1)\n >>> ai\n array([[0, 1],\n [1, 0]])\n >>> np.take_along_axis(a, ai, axis=1)\n array([[10, 30],\n [40, 60]])\n """\n # normalize inputs\n if axis is None:\n if indices.ndim != 1:\n raise ValueError(\n 'when axis=None, `indices` must have a single dimension.')\n arr = arr.flat\n arr_shape = (len(arr),) # flatiter has no .shape\n axis = 0\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n return arr[_make_along_axis_idx(arr_shape, indices, axis)]\n\n\ndef _put_along_axis_dispatcher(arr, indices, values, axis):\n return (arr, indices, values)\n\n\n@array_function_dispatch(_put_along_axis_dispatcher)\ndef put_along_axis(arr, indices, values, axis):\n """\n Put values into the destination array by matching 1d index and data slices.\n\n This iterates over matching 1d slices oriented along the specified axis in\n the index and data arrays, and uses the former to place values into the\n latter. These slices can be different lengths.\n\n Functions returning an index along an axis, like `argsort` and\n `argpartition`, produce suitable indices for this function.\n\n Parameters\n ----------\n arr : ndarray (Ni..., M, Nk...)\n Destination array.\n indices : ndarray (Ni..., J, Nk...)\n Indices to change along each 1d slice of `arr`. This must match the\n dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast\n against `arr`.\n values : array_like (Ni..., J, Nk...)\n values to insert at those indices. Its shape and dimension are\n broadcast to match that of `indices`.\n axis : int\n The axis to take 1d slices along. If axis is None, the destination\n array is treated as if a flattened 1d view had been created of it.\n\n Notes\n -----\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::\n\n Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]\n J = indices.shape[axis] # Need not equal M\n\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n a_1d = a [ii + s_[:,] + kk]\n indices_1d = indices[ii + s_[:,] + kk]\n values_1d = values [ii + s_[:,] + kk]\n for j in range(J):\n a_1d[indices_1d[j]] = values_1d[j]\n\n Equivalently, eliminating the inner loop, the last two lines would be::\n\n a_1d[indices_1d] = values_1d\n\n See Also\n --------\n take_along_axis :\n Take values from the input array by matching 1d index and data slices\n\n Examples\n --------\n >>> import numpy as np\n\n For this sample array\n\n >>> a = np.array([[10, 30, 20], [60, 40, 50]])\n\n We can replace the maximum values with:\n\n >>> ai = np.argmax(a, axis=1, keepdims=True)\n >>> ai\n array([[1],\n [0]])\n >>> np.put_along_axis(a, ai, 99, axis=1)\n >>> a\n array([[10, 99, 20],\n [99, 40, 50]])\n\n """\n # normalize inputs\n if axis is None:\n if indices.ndim != 1:\n raise ValueError(\n 'when axis=None, `indices` must have a single dimension.')\n arr = arr.flat\n axis = 0\n arr_shape = (len(arr),) # flatiter has no .shape\n else:\n axis = normalize_axis_index(axis, arr.ndim)\n arr_shape = arr.shape\n\n # use the fancy index\n arr[_make_along_axis_idx(arr_shape, indices, axis)] = values\n\n\ndef _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):\n return (arr,)\n\n\n@array_function_dispatch(_apply_along_axis_dispatcher)\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n """\n Apply a function to 1-D slices along the given axis.\n\n Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays\n and `a` is a 1-D slice of `arr` along `axis`.\n\n This is equivalent to (but faster than) the following use of `ndindex` and\n `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n f = func1d(arr[ii + s_[:,] + kk])\n Nj = f.shape\n for jj in ndindex(Nj):\n out[ii + jj + kk] = f[jj]\n\n Equivalently, eliminating the inner loop, this can be expressed as::\n\n Ni, Nk = a.shape[:axis], a.shape[axis+1:]\n for ii in ndindex(Ni):\n for kk in ndindex(Nk):\n out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])\n\n Parameters\n ----------\n func1d : function (M,) -> (Nj...)\n This function should accept 1-D arrays. It is applied to 1-D\n slices of `arr` along the specified axis.\n axis : integer\n Axis along which `arr` is sliced.\n arr : ndarray (Ni..., M, Nk...)\n Input array.\n args : any\n Additional arguments to `func1d`.\n kwargs : any\n Additional named arguments to `func1d`.\n\n Returns\n -------\n out : ndarray (Ni..., Nj..., Nk...)\n The output array. The shape of `out` is identical to the shape of\n `arr`, except along the `axis` dimension. This axis is removed, and\n replaced with new dimensions equal to the shape of the return value\n of `func1d`. So if `func1d` returns a scalar `out` will have one\n fewer dimensions than `arr`.\n\n See Also\n --------\n apply_over_axes : Apply a function repeatedly over multiple axes.\n\n Examples\n --------\n >>> import numpy as np\n >>> def my_func(a):\n ... \"\"\"Average first and last element of a 1-D array\"\"\"\n ... return (a[0] + a[-1]) * 0.5\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(my_func, 0, b)\n array([4., 5., 6.])\n >>> np.apply_along_axis(my_func, 1, b)\n array([2., 5., 8.])\n\n For a function that returns a 1D array, the number of dimensions in\n `outarr` is the same as `arr`.\n\n >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])\n >>> np.apply_along_axis(sorted, 1, b)\n array([[1, 7, 8],\n [3, 4, 9],\n [2, 5, 6]])\n\n For a function that returns a higher dimensional array, those dimensions\n are inserted in place of the `axis` dimension.\n\n >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])\n >>> np.apply_along_axis(np.diag, -1, b)\n array([[[1, 0, 0],\n [0, 2, 0],\n [0, 0, 3]],\n [[4, 0, 0],\n [0, 5, 0],\n [0, 0, 6]],\n [[7, 0, 0],\n [0, 8, 0],\n [0, 0, 9]]])\n """\n # handle negative axes\n conv = _array_converter(arr)\n arr = conv[0]\n\n nd = arr.ndim\n axis = normalize_axis_index(axis, nd)\n\n # arr, with the iteration axis at the end\n in_dims = list(range(nd))\n inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis + 1:] + [axis])\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars, which fixes gh-8642\n inds = ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError(\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n ) from None\n res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n if not isinstance(res, matrix):\n buff = zeros_like(res, shape=inarr_view.shape[:-1] + res.shape)\n else:\n # Matrices are nasty with reshaping, so do not preserve them here.\n buff = zeros(inarr_view.shape[:-1] + res.shape, dtype=res.dtype)\n\n # permutation of axes such that out = buff.transpose(buff_permute)\n buff_dims = list(range(buff.ndim))\n buff_permute = (\n buff_dims[0 : axis] +\n buff_dims[buff.ndim - res.ndim : buff.ndim] +\n buff_dims[axis : buff.ndim - res.ndim]\n )\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))\n\n res = transpose(buff, buff_permute)\n return conv.wrap(res)\n\n\ndef _apply_over_axes_dispatcher(func, a, axes):\n return (a,)\n\n\n@array_function_dispatch(_apply_over_axes_dispatcher)\ndef apply_over_axes(func, a, axes):\n """\n Apply a function repeatedly over multiple axes.\n\n `func` is called as `res = func(a, axis)`, where `axis` is the first\n element of `axes`. The result `res` of the function call must have\n either the same dimensions as `a` or one less dimension. If `res`\n has one less dimension than `a`, a dimension is inserted before\n `axis`. The call to `func` is then repeated for each axis in `axes`,\n with `res` as the first argument.\n\n Parameters\n ----------\n func : function\n This function must take two arguments, `func(a, axis)`.\n a : array_like\n Input array.\n axes : array_like\n Axes over which `func` is applied; the elements must be integers.\n\n Returns\n -------\n apply_over_axis : ndarray\n The output array. The number of dimensions is the same as `a`,\n but the shape can be different. This depends on whether `func`\n changes the shape of its output with respect to its input.\n\n See Also\n --------\n apply_along_axis :\n Apply a function to 1-D slices of an array along the given axis.\n\n Notes\n -----\n This function is equivalent to tuple axis arguments to reorderable ufuncs\n with keepdims=True. Tuple axis arguments to ufuncs have been available since\n version 1.7.0.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(24).reshape(2,3,4)\n >>> a\n array([[[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]],\n [[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]])\n\n Sum over axes 0 and 2. The result has same number of dimensions\n as the original array:\n\n >>> np.apply_over_axes(np.sum, a, [0,2])\n array([[[ 60],\n [ 92],\n [124]]])\n\n Tuple axis arguments to ufuncs are equivalent:\n\n >>> np.sum(a, axis=(0,2), keepdims=True)\n array([[[ 60],\n [ 92],\n [124]]])\n\n """\n val = asarray(a)\n N = a.ndim\n if array(axes).ndim == 0:\n axes = (axes,)\n for axis in axes:\n if axis < 0:\n axis = N + axis\n args = (val, axis)\n res = func(*args)\n if res.ndim == val.ndim:\n val = res\n else:\n res = expand_dims(res, axis)\n if res.ndim == val.ndim:\n val = res\n else:\n raise ValueError("function is not returning "\n "an array of the correct shape")\n return val\n\n\ndef _expand_dims_dispatcher(a, axis):\n return (a,)\n\n\n@array_function_dispatch(_expand_dims_dispatcher)\ndef expand_dims(a, axis):\n """\n Expand the shape of an array.\n\n Insert a new axis that will appear at the `axis` position in the expanded\n array shape.\n\n Parameters\n ----------\n a : array_like\n Input array.\n axis : int or tuple of ints\n Position in the expanded axes where the new axis (or axes) is placed.\n\n .. deprecated:: 1.13.0\n Passing an axis where ``axis > a.ndim`` will be treated as\n ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will\n be treated as ``axis == 0``. This behavior is deprecated.\n\n Returns\n -------\n result : ndarray\n View of `a` with the number of dimensions increased.\n\n See Also\n --------\n squeeze : The inverse operation, removing singleton dimensions\n reshape : Insert, remove, and combine dimensions, and resize existing ones\n atleast_1d, atleast_2d, atleast_3d\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1, 2])\n >>> x.shape\n (2,)\n\n The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=0)\n >>> y\n array([[1, 2]])\n >>> y.shape\n (1, 2)\n\n The following is equivalent to ``x[:, np.newaxis]``:\n\n >>> y = np.expand_dims(x, axis=1)\n >>> y\n array([[1],\n [2]])\n >>> y.shape\n (2, 1)\n\n ``axis`` may also be a tuple:\n\n >>> y = np.expand_dims(x, axis=(0, 1))\n >>> y\n array([[[1, 2]]])\n\n >>> y = np.expand_dims(x, axis=(2, 0))\n >>> y\n array([[[1],\n [2]]])\n\n Note that some examples may use ``None`` instead of ``np.newaxis``. These\n are the same objects:\n\n >>> np.newaxis is None\n True\n\n """\n if isinstance(a, matrix):\n a = asarray(a)\n else:\n a = asanyarray(a)\n\n if not isinstance(axis, (tuple, list)):\n axis = (axis,)\n\n out_ndim = len(axis) + a.ndim\n axis = normalize_axis_tuple(axis, out_ndim)\n\n shape_it = iter(a.shape)\n shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]\n\n return a.reshape(shape)\n\n\n# NOTE: Remove once deprecation period passes\n@set_module("numpy")\ndef row_stack(tup, *, dtype=None, casting="same_kind"):\n # Deprecated in NumPy 2.0, 2023-08-18\n warnings.warn(\n "`row_stack` alias is deprecated. "\n "Use `np.vstack` directly.",\n DeprecationWarning,\n stacklevel=2\n )\n return vstack(tup, dtype=dtype, casting=casting)\n\n\nrow_stack.__doc__ = vstack.__doc__\n\n\ndef _column_stack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_column_stack_dispatcher)\ndef column_stack(tup):\n """\n Stack 1-D arrays as columns into a 2-D array.\n\n Take a sequence of 1-D arrays and stack them as columns\n to make a single 2-D array. 2-D arrays are stacked as-is,\n just like with `hstack`. 1-D arrays are turned into 2-D columns\n first.\n\n Parameters\n ----------\n tup : sequence of 1-D or 2-D arrays.\n Arrays to stack. All of them must have the same first dimension.\n\n Returns\n -------\n stacked : 2-D array\n The array formed by stacking the given arrays.\n\n See Also\n --------\n stack, hstack, vstack, concatenate\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.column_stack((a,b))\n array([[1, 2],\n [2, 3],\n [3, 4]])\n\n """\n arrays = []\n for v in tup:\n arr = asanyarray(v)\n if arr.ndim < 2:\n arr = array(arr, copy=None, subok=True, ndmin=2).T\n arrays.append(arr)\n return _nx.concatenate(arrays, 1)\n\n\ndef _dstack_dispatcher(tup):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_dstack_dispatcher)\ndef dstack(tup):\n """\n Stack arrays in sequence depth wise (along third axis).\n\n This is equivalent to concatenation along the third axis after 2-D arrays\n of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape\n `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by\n `dsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of arrays\n The arrays must have the same shape along all but the third axis.\n 1-D or 2-D arrays must have the same shape.\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 3-D.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n vstack : Stack arrays in sequence vertically (row wise).\n hstack : Stack arrays in sequence horizontally (column wise).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n dsplit : Split array along third axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array((1,2,3))\n >>> b = np.array((2,3,4))\n >>> np.dstack((a,b))\n array([[[1, 2],\n [2, 3],\n [3, 4]]])\n\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[2],[3],[4]])\n >>> np.dstack((a,b))\n array([[[1, 2]],\n [[2, 3]],\n [[3, 4]]])\n\n """\n arrs = atleast_3d(*tup)\n if not isinstance(arrs, tuple):\n arrs = (arrs,)\n return _nx.concatenate(arrs, 2)\n\n\ndef _replace_zero_by_x_arrays(sub_arys):\n for i in range(len(sub_arys)):\n if _nx.ndim(sub_arys[i]) == 0:\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):\n sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)\n return sub_arys\n\n\ndef _array_split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_array_split_dispatcher)\ndef array_split(ary, indices_or_sections, axis=0):\n """\n Split an array into multiple sub-arrays.\n\n Please refer to the ``split`` documentation. The only difference\n between these functions is that ``array_split`` allows\n `indices_or_sections` to be an integer that does *not* equally\n divide the axis. For an array of length l that should be split\n into n sections, it returns l % n sub-arrays of size l//n + 1\n and the rest of size l//n.\n\n See Also\n --------\n split : Split array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(8.0)\n >>> np.array_split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]\n\n >>> x = np.arange(9)\n >>> np.array_split(x, 4)\n [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]\n\n """\n try:\n Ntotal = ary.shape[axis]\n except AttributeError:\n Ntotal = len(ary)\n try:\n # handle array case.\n Nsections = len(indices_or_sections) + 1\n div_points = [0] + list(indices_or_sections) + [Ntotal]\n except TypeError:\n # indices_or_sections is a scalar, not an array.\n Nsections = int(indices_or_sections)\n if Nsections <= 0:\n raise ValueError('number sections must be larger than 0.') from None\n Neach_section, extras = divmod(Ntotal, Nsections)\n section_sizes = ([0] +\n extras * [Neach_section + 1] +\n (Nsections - extras) * [Neach_section])\n div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()\n\n sub_arys = []\n sary = _nx.swapaxes(ary, axis, 0)\n for i in range(Nsections):\n st = div_points[i]\n end = div_points[i + 1]\n sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))\n\n return sub_arys\n\n\ndef _split_dispatcher(ary, indices_or_sections, axis=None):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_split_dispatcher)\ndef split(ary, indices_or_sections, axis=0):\n """\n Split an array into multiple sub-arrays as views into `ary`.\n\n Parameters\n ----------\n ary : ndarray\n Array to be divided into sub-arrays.\n indices_or_sections : int or 1-D array\n If `indices_or_sections` is an integer, N, the array will be divided\n into N equal arrays along `axis`. If such a split is not possible,\n an error is raised.\n\n If `indices_or_sections` is a 1-D array of sorted integers, the entries\n indicate where along `axis` the array is split. For example,\n ``[2, 3]`` would, for ``axis=0``, result in\n\n - ary[:2]\n - ary[2:3]\n - ary[3:]\n\n If an index exceeds the dimension of the array along `axis`,\n an empty sub-array is returned correspondingly.\n axis : int, optional\n The axis along which to split, default is 0.\n\n Returns\n -------\n sub-arrays : list of ndarrays\n A list of sub-arrays as views into `ary`.\n\n Raises\n ------\n ValueError\n If `indices_or_sections` is given as an integer, but\n a split does not result in equal division.\n\n See Also\n --------\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size. Does not raise an exception if\n an equal division cannot be made.\n hsplit : Split array into multiple sub-arrays horizontally (column-wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(9.0)\n >>> np.split(x, 3)\n [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]\n\n >>> x = np.arange(8.0)\n >>> np.split(x, [3, 5, 6, 10])\n [array([0., 1., 2.]),\n array([3., 4.]),\n array([5.]),\n array([6., 7.]),\n array([], dtype=float64)]\n\n """\n try:\n len(indices_or_sections)\n except TypeError:\n sections = indices_or_sections\n N = ary.shape[axis]\n if N % sections:\n raise ValueError(\n 'array split does not result in an equal division') from None\n return array_split(ary, indices_or_sections, axis)\n\n\ndef _hvdsplit_dispatcher(ary, indices_or_sections):\n return (ary, indices_or_sections)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef hsplit(ary, indices_or_sections):\n """\n Split an array into multiple sub-arrays horizontally (column-wise).\n\n Please refer to the `split` documentation. `hsplit` is equivalent\n to `split` with ``axis=1``, the array is always split along the second\n axis except for 1-D arrays, where it is split at ``axis=0``.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.hsplit(x, 2)\n [array([[ 0., 1.],\n [ 4., 5.],\n [ 8., 9.],\n [12., 13.]]),\n array([[ 2., 3.],\n [ 6., 7.],\n [10., 11.],\n [14., 15.]])]\n >>> np.hsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2.],\n [ 4., 5., 6.],\n [ 8., 9., 10.],\n [12., 13., 14.]]),\n array([[ 3.],\n [ 7.],\n [11.],\n [15.]]),\n array([], shape=(4, 0), dtype=float64)]\n\n With a higher dimensional array the split is still along the second axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.hsplit(x, 2)\n [array([[[0., 1.]],\n [[4., 5.]]]),\n array([[[2., 3.]],\n [[6., 7.]]])]\n\n With a 1-D array, the split is along axis 0.\n\n >>> x = np.array([0, 1, 2, 3, 4, 5])\n >>> np.hsplit(x, 2)\n [array([0, 1, 2]), array([3, 4, 5])]\n\n """\n if _nx.ndim(ary) == 0:\n raise ValueError('hsplit only works on arrays of 1 or more dimensions')\n if ary.ndim > 1:\n return split(ary, indices_or_sections, 1)\n else:\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef vsplit(ary, indices_or_sections):\n """\n Split an array into multiple sub-arrays vertically (row-wise).\n\n Please refer to the ``split`` documentation. ``vsplit`` is equivalent\n to ``split`` with `axis=0` (default), the array is always split along the\n first axis regardless of the array dimension.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(16.0).reshape(4, 4)\n >>> x\n array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])\n >>> np.vsplit(x, 2)\n [array([[0., 1., 2., 3.],\n [4., 5., 6., 7.]]),\n array([[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]])]\n >>> np.vsplit(x, np.array([3, 6]))\n [array([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]]),\n array([[12., 13., 14., 15.]]),\n array([], shape=(0, 4), dtype=float64)]\n\n With a higher dimensional array the split is still along the first axis.\n\n >>> x = np.arange(8.0).reshape(2, 2, 2)\n >>> x\n array([[[0., 1.],\n [2., 3.]],\n [[4., 5.],\n [6., 7.]]])\n >>> np.vsplit(x, 2)\n [array([[[0., 1.],\n [2., 3.]]]),\n array([[[4., 5.],\n [6., 7.]]])]\n\n """\n if _nx.ndim(ary) < 2:\n raise ValueError('vsplit only works on arrays of 2 or more dimensions')\n return split(ary, indices_or_sections, 0)\n\n\n@array_function_dispatch(_hvdsplit_dispatcher)\ndef dsplit(ary, indices_or_sections):\n """\n Split array into multiple sub-arrays along the 3rd axis (depth).\n\n Please refer to the `split` documentation. `dsplit` is equivalent\n to `split` with ``axis=2``, the array is always split along the third\n axis provided the array dimension is greater than or equal to 3.\n\n See Also\n --------\n split : Split an array into multiple sub-arrays of equal size.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(16.0).reshape(2, 2, 4)\n >>> x\n array([[[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.]],\n [[ 8., 9., 10., 11.],\n [12., 13., 14., 15.]]])\n >>> np.dsplit(x, 2)\n [array([[[ 0., 1.],\n [ 4., 5.]],\n [[ 8., 9.],\n [12., 13.]]]), array([[[ 2., 3.],\n [ 6., 7.]],\n [[10., 11.],\n [14., 15.]]])]\n >>> np.dsplit(x, np.array([3, 6]))\n [array([[[ 0., 1., 2.],\n [ 4., 5., 6.]],\n [[ 8., 9., 10.],\n [12., 13., 14.]]]),\n array([[[ 3.],\n [ 7.]],\n [[11.],\n [15.]]]),\n array([], shape=(2, 2, 0), dtype=float64)]\n """\n if _nx.ndim(ary) < 3:\n raise ValueError('dsplit only works on arrays of 3 or more dimensions')\n return split(ary, indices_or_sections, 2)\n\n\ndef get_array_wrap(*args):\n """Find the wrapper for the array with the highest priority.\n\n In case of ties, leftmost wins. If no wrapper is found, return None.\n\n .. deprecated:: 2.0\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`get_array_wrap` is deprecated. "\n "(deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n wrappers = sorted((getattr(x, '__array_priority__', 0), -i,\n x.__array_wrap__) for i, x in enumerate(args)\n if hasattr(x, '__array_wrap__'))\n if wrappers:\n return wrappers[-1][-1]\n return None\n\n\ndef _kron_dispatcher(a, b):\n return (a, b)\n\n\n@array_function_dispatch(_kron_dispatcher)\ndef kron(a, b):\n """\n Kronecker product of two arrays.\n\n Computes the Kronecker product, a composite array made of blocks of the\n second array scaled by the first.\n\n Parameters\n ----------\n a, b : array_like\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n outer : The outer product\n\n Notes\n -----\n The function assumes that the number of dimensions of `a` and `b`\n are the same, if necessary prepending the smallest with ones.\n If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,\n the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.\n The elements are products of elements from `a` and `b`, organized\n explicitly by::\n\n kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]\n\n where::\n\n kt = it * st + jt, t = 0,...,N\n\n In the common 2-D case (N=1), the block structure can be visualized::\n\n [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],\n [ ... ... ],\n [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]\n\n\n Examples\n --------\n >>> import numpy as np\n >>> np.kron([1,10,100], [5,6,7])\n array([ 5, 6, 7, ..., 500, 600, 700])\n >>> np.kron([5,6,7], [1,10,100])\n array([ 5, 50, 500, ..., 7, 70, 700])\n\n >>> np.kron(np.eye(2), np.ones((2,2)))\n array([[1., 1., 0., 0.],\n [1., 1., 0., 0.],\n [0., 0., 1., 1.],\n [0., 0., 1., 1.]])\n\n >>> a = np.arange(100).reshape((2,5,2,5))\n >>> b = np.arange(24).reshape((2,3,4))\n >>> c = np.kron(a,b)\n >>> c.shape\n (2, 10, 6, 20)\n >>> I = (1,3,0,2)\n >>> J = (0,2,1)\n >>> J1 = (0,) + J # extend to ndim=4\n >>> S1 = (1,) + b.shape\n >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))\n >>> c[K] == a[I]*b[J]\n True\n\n """\n # Working:\n # 1. Equalise the shapes by prepending smaller array with 1s\n # 2. Expand shapes of both the arrays by adding new axes at\n # odd positions for 1st array and even positions for 2nd\n # 3. Compute the product of the modified array\n # 4. The inner most array elements now contain the rows of\n # the Kronecker product\n # 5. Reshape the result to kron's shape, which is same as\n # product of shapes of the two arrays.\n b = asanyarray(b)\n a = array(a, copy=None, subok=True, ndmin=b.ndim)\n is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)\n ndb, nda = b.ndim, a.ndim\n nd = max(ndb, nda)\n\n if (nda == 0 or ndb == 0):\n return _nx.multiply(a, b)\n\n as_ = a.shape\n bs = b.shape\n if not a.flags.contiguous:\n a = reshape(a, as_)\n if not b.flags.contiguous:\n b = reshape(b, bs)\n\n # Equalise the shapes by prepending smaller one with 1s\n as_ = (1,) * max(0, ndb - nda) + as_\n bs = (1,) * max(0, nda - ndb) + bs\n\n # Insert empty dimensions\n a_arr = expand_dims(a, axis=tuple(range(ndb - nda)))\n b_arr = expand_dims(b, axis=tuple(range(nda - ndb)))\n\n # Compute the product\n a_arr = expand_dims(a_arr, axis=tuple(range(1, nd * 2, 2)))\n b_arr = expand_dims(b_arr, axis=tuple(range(0, nd * 2, 2)))\n # In case of `mat`, convert result to `array`\n result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))\n\n # Reshape back\n result = result.reshape(_nx.multiply(as_, bs))\n\n return result if not is_any_mat else matrix(result, copy=False)\n\n\ndef _tile_dispatcher(A, reps):\n return (A, reps)\n\n\n@array_function_dispatch(_tile_dispatcher)\ndef tile(A, reps):\n """\n Construct an array by repeating A the number of times given by reps.\n\n If `reps` has length ``d``, the result will have dimension of\n ``max(d, A.ndim)``.\n\n If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new\n axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,\n or shape (1, 1, 3) for 3-D replication. If this is not the desired\n behavior, promote `A` to d-dimensions manually before calling this\n function.\n\n If ``A.ndim > d``, `reps` is promoted to `A`.ndim by prepending 1's to it.\n Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as\n (1, 1, 2, 2).\n\n Note : Although tile may be used for broadcasting, it is strongly\n recommended to use numpy's broadcasting operations and functions.\n\n Parameters\n ----------\n A : array_like\n The input array.\n reps : array_like\n The number of repetitions of `A` along each axis.\n\n Returns\n -------\n c : ndarray\n The tiled output array.\n\n See Also\n --------\n repeat : Repeat elements of an array.\n broadcast_to : Broadcast an array to a new shape\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([0, 1, 2])\n >>> np.tile(a, 2)\n array([0, 1, 2, 0, 1, 2])\n >>> np.tile(a, (2, 2))\n array([[0, 1, 2, 0, 1, 2],\n [0, 1, 2, 0, 1, 2]])\n >>> np.tile(a, (2, 1, 2))\n array([[[0, 1, 2, 0, 1, 2]],\n [[0, 1, 2, 0, 1, 2]]])\n\n >>> b = np.array([[1, 2], [3, 4]])\n >>> np.tile(b, 2)\n array([[1, 2, 1, 2],\n [3, 4, 3, 4]])\n >>> np.tile(b, (2, 1))\n array([[1, 2],\n [3, 4],\n [1, 2],\n [3, 4]])\n\n >>> c = np.array([1,2,3,4])\n >>> np.tile(c,(4,1))\n array([[1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4],\n [1, 2, 3, 4]])\n """\n try:\n tup = tuple(reps)\n except TypeError:\n tup = (reps,)\n d = len(tup)\n if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):\n # Fixes the problem that the function does not make a copy if A is a\n # numpy array and the repetitions are 1 in all dimensions\n return _nx.array(A, copy=True, subok=True, ndmin=d)\n else:\n # Note that no copy of zero-sized arrays is made. However since they\n # have no data there is no risk of an inadvertent overwrite.\n c = _nx.array(A, copy=None, subok=True, ndmin=d)\n if (d < c.ndim):\n tup = (1,) * (c.ndim - d) + tup\n shape_out = tuple(s * t for s, t in zip(c.shape, tup))\n n = c.size\n if n > 0:\n for dim_in, nrep in zip(c.shape, tup):\n if nrep != 1:\n c = c.reshape(-1, n).repeat(nrep, 0)\n n //= dim_in\n return c.reshape(shape_out)\n | .venv\Lib\site-packages\numpy\lib\_shape_base_impl.py | _shape_base_impl.py | Python | 40,780 | 0.95 | 0.108378 | 0.039623 | vue-tools | 42 | 2024-02-09T21:33:21.702694 | MIT | false | c005279982129430b0c783efc0b379fc |
from collections.abc import Callable, Sequence\nfrom typing import (\n Any,\n Concatenate,\n ParamSpec,\n Protocol,\n SupportsIndex,\n TypeVar,\n overload,\n type_check_only,\n)\n\nfrom typing_extensions import deprecated\n\nimport numpy as np\nfrom numpy import (\n _CastingKind,\n complexfloating,\n floating,\n generic,\n integer,\n object_,\n signedinteger,\n ufunc,\n unsignedinteger,\n)\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeUInt_co,\n _ShapeLike,\n)\n\n__all__ = [\n "column_stack",\n "row_stack",\n "dstack",\n "array_split",\n "split",\n "hsplit",\n "vsplit",\n "dsplit",\n "apply_over_axes",\n "expand_dims",\n "apply_along_axis",\n "kron",\n "tile",\n "take_along_axis",\n "put_along_axis",\n]\n\n_P = ParamSpec("_P")\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n\n# Signature of `__array_wrap__`\n@type_check_only\nclass _ArrayWrap(Protocol):\n def __call__(\n self,\n array: NDArray[Any],\n context: tuple[ufunc, tuple[Any, ...], int] | None = ...,\n return_scalar: bool = ...,\n /,\n ) -> Any: ...\n\n@type_check_only\nclass _SupportsArrayWrap(Protocol):\n @property\n def __array_wrap__(self) -> _ArrayWrap: ...\n\n###\n\ndef take_along_axis(\n arr: _ScalarT | NDArray[_ScalarT],\n indices: NDArray[integer],\n axis: int | None = ...,\n) -> NDArray[_ScalarT]: ...\n\ndef put_along_axis(\n arr: NDArray[_ScalarT],\n indices: NDArray[integer],\n values: ArrayLike,\n axis: int | None,\n) -> None: ...\n\n@overload\ndef apply_along_axis(\n func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_ScalarT]],\n axis: SupportsIndex,\n arr: ArrayLike,\n *args: _P.args,\n **kwargs: _P.kwargs,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef apply_along_axis(\n func1d: Callable[Concatenate[NDArray[Any], _P], Any],\n axis: SupportsIndex,\n arr: ArrayLike,\n *args: _P.args,\n **kwargs: _P.kwargs,\n) -> NDArray[Any]: ...\n\ndef apply_over_axes(\n func: Callable[[NDArray[Any], int], NDArray[_ScalarT]],\n a: ArrayLike,\n axes: int | Sequence[int],\n) -> NDArray[_ScalarT]: ...\n\n@overload\ndef expand_dims(\n a: _ArrayLike[_ScalarT],\n axis: _ShapeLike,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef expand_dims(\n a: ArrayLike,\n axis: _ShapeLike,\n) -> NDArray[Any]: ...\n\n# Deprecated in NumPy 2.0, 2023-08-18\n@deprecated("`row_stack` alias is deprecated. Use `np.vstack` directly.")\ndef row_stack(\n tup: Sequence[ArrayLike],\n *,\n dtype: DTypeLike | None = None,\n casting: _CastingKind = "same_kind",\n) -> NDArray[Any]: ...\n\n#\n@overload\ndef column_stack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...\n@overload\ndef column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...\n\n@overload\ndef dstack(tup: Sequence[_ArrayLike[_ScalarT]]) -> NDArray[_ScalarT]: ...\n@overload\ndef dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...\n\n@overload\ndef array_split(\n ary: _ArrayLike[_ScalarT],\n indices_or_sections: _ShapeLike,\n axis: SupportsIndex = ...,\n) -> list[NDArray[_ScalarT]]: ...\n@overload\ndef array_split(\n ary: ArrayLike,\n indices_or_sections: _ShapeLike,\n axis: SupportsIndex = ...,\n) -> list[NDArray[Any]]: ...\n\n@overload\ndef split(\n ary: _ArrayLike[_ScalarT],\n indices_or_sections: _ShapeLike,\n axis: SupportsIndex = ...,\n) -> list[NDArray[_ScalarT]]: ...\n@overload\ndef split(\n ary: ArrayLike,\n indices_or_sections: _ShapeLike,\n axis: SupportsIndex = ...,\n) -> list[NDArray[Any]]: ...\n\n@overload\ndef hsplit(\n ary: _ArrayLike[_ScalarT],\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[_ScalarT]]: ...\n@overload\ndef hsplit(\n ary: ArrayLike,\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[Any]]: ...\n\n@overload\ndef vsplit(\n ary: _ArrayLike[_ScalarT],\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[_ScalarT]]: ...\n@overload\ndef vsplit(\n ary: ArrayLike,\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[Any]]: ...\n\n@overload\ndef dsplit(\n ary: _ArrayLike[_ScalarT],\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[_ScalarT]]: ...\n@overload\ndef dsplit(\n ary: ArrayLike,\n indices_or_sections: _ShapeLike,\n) -> list[NDArray[Any]]: ...\n\n@overload\ndef get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...\n@overload\ndef get_array_wrap(*args: object) -> _ArrayWrap | None: ...\n\n@overload\ndef kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[np.bool]: ... # type: ignore[misc]\n@overload\ndef kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger]: ... # type: ignore[misc]\n@overload\ndef kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger]: ... # type: ignore[misc]\n@overload\ndef kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating]: ... # type: ignore[misc]\n@overload\ndef kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating]: ...\n@overload\ndef kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...\n@overload\ndef kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...\n\n@overload\ndef tile(\n A: _ArrayLike[_ScalarT],\n reps: int | Sequence[int],\n) -> NDArray[_ScalarT]: ...\n@overload\ndef tile(\n A: ArrayLike,\n reps: int | Sequence[int],\n) -> NDArray[Any]: ...\n | .venv\Lib\site-packages\numpy\lib\_shape_base_impl.pyi | _shape_base_impl.pyi | Other | 5,647 | 0.95 | 0.157447 | 0.042453 | python-kit | 466 | 2024-10-12T00:59:12.041590 | BSD-3-Clause | false | fcfd7d197bd962777752f8b7cfa6018c |
"""\nUtilities that manipulate strides to achieve desirable effects.\n\nAn explanation of strides can be found in the :ref:`arrays.ndarray`.\n\n"""\nimport numpy as np\nfrom numpy._core.numeric import normalize_axis_tuple\nfrom numpy._core.overrides import array_function_dispatch, set_module\n\n__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']\n\n\nclass DummyArray:\n """Dummy object that just exists to hang __array_interface__ dictionaries\n and possibly keep alive a reference to a base array.\n """\n\n def __init__(self, interface, base=None):\n self.__array_interface__ = interface\n self.base = base\n\n\ndef _maybe_view_as_subclass(original_array, new_array):\n if type(original_array) is not type(new_array):\n # if input was an ndarray subclass and subclasses were OK,\n # then view the result as that subclass.\n new_array = new_array.view(type=type(original_array))\n # Since we have done something akin to a view from original_array, we\n # should let the subclass finalize (if it has it implemented, i.e., is\n # not None).\n if new_array.__array_finalize__:\n new_array.__array_finalize__(original_array)\n return new_array\n\n\n@set_module("numpy.lib.stride_tricks")\ndef as_strided(x, shape=None, strides=None, subok=False, writeable=True):\n """\n Create a view into the array with the given shape and strides.\n\n .. warning:: This function has to be used with extreme care, see notes.\n\n Parameters\n ----------\n x : ndarray\n Array to create a new.\n shape : sequence of int, optional\n The shape of the new array. Defaults to ``x.shape``.\n strides : sequence of int, optional\n The strides of the new array. Defaults to ``x.strides``.\n subok : bool, optional\n If True, subclasses are preserved.\n writeable : bool, optional\n If set to False, the returned array will always be readonly.\n Otherwise it will be writable if the original array was. It\n is advisable to set this to False if possible (see Notes).\n\n Returns\n -------\n view : ndarray\n\n See also\n --------\n broadcast_to : broadcast an array to a given shape.\n reshape : reshape an array.\n lib.stride_tricks.sliding_window_view :\n userfriendly and safe function for a creation of sliding window views.\n\n Notes\n -----\n ``as_strided`` creates a view into the array given the exact strides\n and shape. This means it manipulates the internal data structure of\n ndarray and, if done incorrectly, the array elements can point to\n invalid memory and can corrupt results or crash your program.\n It is advisable to always use the original ``x.strides`` when\n calculating new strides to avoid reliance on a contiguous memory\n layout.\n\n Furthermore, arrays created with this function often contain self\n overlapping memory, so that two elements are identical.\n Vectorized write operations on such arrays will typically be\n unpredictable. They may even give different results for small, large,\n or transposed arrays.\n\n Since writing to these arrays has to be tested and done with great\n care, you may want to use ``writeable=False`` to avoid accidental write\n operations.\n\n For these reasons it is advisable to avoid ``as_strided`` when\n possible.\n """\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=None, subok=subok)\n interface = dict(x.__array_interface__)\n if shape is not None:\n interface['shape'] = tuple(shape)\n if strides is not None:\n interface['strides'] = tuple(strides)\n\n array = np.asarray(DummyArray(interface, base=x))\n # The route via `__interface__` does not preserve structured\n # dtypes. Since dtype should remain unchanged, we set it explicitly.\n array.dtype = x.dtype\n\n view = _maybe_view_as_subclass(x, array)\n\n if view.flags.writeable and not writeable:\n view.flags.writeable = False\n\n return view\n\n\ndef _sliding_window_view_dispatcher(x, window_shape, axis=None, *,\n subok=None, writeable=None):\n return (x,)\n\n\n@array_function_dispatch(\n _sliding_window_view_dispatcher, module="numpy.lib.stride_tricks"\n)\ndef sliding_window_view(x, window_shape, axis=None, *,\n subok=False, writeable=False):\n """\n Create a sliding window view into the array with the given window shape.\n\n Also known as rolling or moving window, the window slides across all\n dimensions of the array and extracts subsets of the array at all window\n positions.\n\n .. versionadded:: 1.20.0\n\n Parameters\n ----------\n x : array_like\n Array to create the sliding window view from.\n window_shape : int or tuple of int\n Size of window over each axis that takes part in the sliding window.\n If `axis` is not present, must have same length as the number of input\n array dimensions. Single integers `i` are treated as if they were the\n tuple `(i,)`.\n axis : int or tuple of int, optional\n Axis or axes along which the sliding window is applied.\n By default, the sliding window is applied to all axes and\n `window_shape[i]` will refer to axis `i` of `x`.\n If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to\n the axis `axis[i]` of `x`.\n Single integers `i` are treated as if they were the tuple `(i,)`.\n subok : bool, optional\n If True, sub-classes will be passed-through, otherwise the returned\n array will be forced to be a base-class array (default).\n writeable : bool, optional\n When true, allow writing to the returned view. The default is false,\n as this should be used with caution: the returned view contains the\n same memory location multiple times, so writing to one location will\n cause others to change.\n\n Returns\n -------\n view : ndarray\n Sliding window view of the array. The sliding window dimensions are\n inserted at the end, and the original dimensions are trimmed as\n required by the size of the sliding window.\n That is, ``view.shape = x_shape_trimmed + window_shape``, where\n ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less\n than the corresponding window size.\n\n See Also\n --------\n lib.stride_tricks.as_strided: A lower-level and less safe routine for\n creating arbitrary views from custom shape and strides.\n broadcast_to: broadcast an array to a given shape.\n\n Notes\n -----\n For many applications using a sliding window view can be convenient, but\n potentially very slow. Often specialized solutions exist, for example:\n\n - `scipy.signal.fftconvolve`\n\n - filtering functions in `scipy.ndimage`\n\n - moving window functions provided by\n `bottleneck <https://github.com/pydata/bottleneck>`_.\n\n As a rough estimate, a sliding window approach with an input size of `N`\n and a window size of `W` will scale as `O(N*W)` where frequently a special\n algorithm can achieve `O(N)`. That means that the sliding window variant\n for a window size of 100 can be a 100 times slower than a more specialized\n version.\n\n Nevertheless, for small window sizes, when no custom algorithm exists, or\n as a prototyping and developing tool, this function can be a good solution.\n\n Examples\n --------\n >>> import numpy as np\n >>> from numpy.lib.stride_tricks import sliding_window_view\n >>> x = np.arange(6)\n >>> x.shape\n (6,)\n >>> v = sliding_window_view(x, 3)\n >>> v.shape\n (4, 3)\n >>> v\n array([[0, 1, 2],\n [1, 2, 3],\n [2, 3, 4],\n [3, 4, 5]])\n\n This also works in more dimensions, e.g.\n\n >>> i, j = np.ogrid[:3, :4]\n >>> x = 10*i + j\n >>> x.shape\n (3, 4)\n >>> x\n array([[ 0, 1, 2, 3],\n [10, 11, 12, 13],\n [20, 21, 22, 23]])\n >>> shape = (2,2)\n >>> v = sliding_window_view(x, shape)\n >>> v.shape\n (2, 3, 2, 2)\n >>> v\n array([[[[ 0, 1],\n [10, 11]],\n [[ 1, 2],\n [11, 12]],\n [[ 2, 3],\n [12, 13]]],\n [[[10, 11],\n [20, 21]],\n [[11, 12],\n [21, 22]],\n [[12, 13],\n [22, 23]]]])\n\n The axis can be specified explicitly:\n\n >>> v = sliding_window_view(x, 3, 0)\n >>> v.shape\n (1, 4, 3)\n >>> v\n array([[[ 0, 10, 20],\n [ 1, 11, 21],\n [ 2, 12, 22],\n [ 3, 13, 23]]])\n\n The same axis can be used several times. In that case, every use reduces\n the corresponding original dimension:\n\n >>> v = sliding_window_view(x, (2, 3), (1, 1))\n >>> v.shape\n (3, 1, 2, 3)\n >>> v\n array([[[[ 0, 1, 2],\n [ 1, 2, 3]]],\n [[[10, 11, 12],\n [11, 12, 13]]],\n [[[20, 21, 22],\n [21, 22, 23]]]])\n\n Combining with stepped slicing (`::step`), this can be used to take sliding\n views which skip elements:\n\n >>> x = np.arange(7)\n >>> sliding_window_view(x, 5)[:, ::2]\n array([[0, 2, 4],\n [1, 3, 5],\n [2, 4, 6]])\n\n or views which move by multiple elements\n\n >>> x = np.arange(7)\n >>> sliding_window_view(x, 3)[::2, :]\n array([[0, 1, 2],\n [2, 3, 4],\n [4, 5, 6]])\n\n A common application of `sliding_window_view` is the calculation of running\n statistics. The simplest example is the\n `moving average <https://en.wikipedia.org/wiki/Moving_average>`_:\n\n >>> x = np.arange(6)\n >>> x.shape\n (6,)\n >>> v = sliding_window_view(x, 3)\n >>> v.shape\n (4, 3)\n >>> v\n array([[0, 1, 2],\n [1, 2, 3],\n [2, 3, 4],\n [3, 4, 5]])\n >>> moving_average = v.mean(axis=-1)\n >>> moving_average\n array([1., 2., 3., 4.])\n\n Note that a sliding window approach is often **not** optimal (see Notes).\n """\n window_shape = (tuple(window_shape)\n if np.iterable(window_shape)\n else (window_shape,))\n # first convert input to array, possibly keeping subclass\n x = np.array(x, copy=None, subok=subok)\n\n window_shape_array = np.array(window_shape)\n if np.any(window_shape_array < 0):\n raise ValueError('`window_shape` cannot contain negative values')\n\n if axis is None:\n axis = tuple(range(x.ndim))\n if len(window_shape) != len(axis):\n raise ValueError(f'Since axis is `None`, must provide '\n f'window_shape for all dimensions of `x`; '\n f'got {len(window_shape)} window_shape elements '\n f'and `x.ndim` is {x.ndim}.')\n else:\n axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)\n if len(window_shape) != len(axis):\n raise ValueError(f'Must provide matching length window_shape and '\n f'axis; got {len(window_shape)} window_shape '\n f'elements and {len(axis)} axes elements.')\n\n out_strides = x.strides + tuple(x.strides[ax] for ax in axis)\n\n # note: same axis can be windowed repeatedly\n x_shape_trimmed = list(x.shape)\n for ax, dim in zip(axis, window_shape):\n if x_shape_trimmed[ax] < dim:\n raise ValueError(\n 'window shape cannot be larger than input array shape')\n x_shape_trimmed[ax] -= dim - 1\n out_shape = tuple(x_shape_trimmed) + window_shape\n return as_strided(x, strides=out_strides, shape=out_shape,\n subok=subok, writeable=writeable)\n\n\ndef _broadcast_to(array, shape, subok, readonly):\n shape = tuple(shape) if np.iterable(shape) else (shape,)\n array = np.array(array, copy=None, subok=subok)\n if not shape and array.shape:\n raise ValueError('cannot broadcast a non-scalar to a scalar array')\n if any(size < 0 for size in shape):\n raise ValueError('all elements of broadcast shape must be non-'\n 'negative')\n extras = []\n it = np.nditer(\n (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,\n op_flags=['readonly'], itershape=shape, order='C')\n with it:\n # never really has writebackifcopy semantics\n broadcast = it.itviews[0]\n result = _maybe_view_as_subclass(array, broadcast)\n # In a future version this will go away\n if not readonly and array.flags._writeable_no_warn:\n result.flags.writeable = True\n result.flags._warn_on_write = True\n return result\n\n\ndef _broadcast_to_dispatcher(array, shape, subok=None):\n return (array,)\n\n\n@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')\ndef broadcast_to(array, shape, subok=False):\n """Broadcast an array to a new shape.\n\n Parameters\n ----------\n array : array_like\n The array to broadcast.\n shape : tuple or int\n The shape of the desired array. A single integer ``i`` is interpreted\n as ``(i,)``.\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned array will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcast : array\n A readonly view on the original array with the given shape. It is\n typically not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location.\n\n Raises\n ------\n ValueError\n If the array is not compatible with the new shape according to NumPy's\n broadcasting rules.\n\n See Also\n --------\n broadcast\n broadcast_arrays\n broadcast_shapes\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1, 2, 3])\n >>> np.broadcast_to(x, (3, 3))\n array([[1, 2, 3],\n [1, 2, 3],\n [1, 2, 3]])\n """\n return _broadcast_to(array, shape, subok=subok, readonly=True)\n\n\ndef _broadcast_shape(*args):\n """Returns the shape of the arrays that would result from broadcasting the\n supplied arrays against each other.\n """\n # use the old-iterator because np.nditer does not handle size 0 arrays\n # consistently\n b = np.broadcast(*args[:32])\n # unfortunately, it cannot handle 32 or more arguments directly\n for pos in range(32, len(args), 31):\n # ironically, np.broadcast does not properly handle np.broadcast\n # objects (it treats them as scalars)\n # use broadcasting to avoid allocating the full array\n b = broadcast_to(0, b.shape)\n b = np.broadcast(b, *args[pos:(pos + 31)])\n return b.shape\n\n\n_size0_dtype = np.dtype([])\n\n\n@set_module('numpy')\ndef broadcast_shapes(*args):\n """\n Broadcast the input shapes into a single shape.\n\n :ref:`Learn more about broadcasting here <basics.broadcasting>`.\n\n .. versionadded:: 1.20.0\n\n Parameters\n ----------\n *args : tuples of ints, or ints\n The shapes to be broadcast against each other.\n\n Returns\n -------\n tuple\n Broadcasted shape.\n\n Raises\n ------\n ValueError\n If the shapes are not compatible and cannot be broadcast according\n to NumPy's broadcasting rules.\n\n See Also\n --------\n broadcast\n broadcast_arrays\n broadcast_to\n\n Examples\n --------\n >>> import numpy as np\n >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))\n (3, 2)\n\n >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))\n (5, 6, 7)\n """\n arrays = [np.empty(x, dtype=_size0_dtype) for x in args]\n return _broadcast_shape(*arrays)\n\n\ndef _broadcast_arrays_dispatcher(*args, subok=None):\n return args\n\n\n@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')\ndef broadcast_arrays(*args, subok=False):\n """\n Broadcast any number of arrays against each other.\n\n Parameters\n ----------\n *args : array_likes\n The arrays to broadcast.\n\n subok : bool, optional\n If True, then sub-classes will be passed-through, otherwise\n the returned arrays will be forced to be a base-class array (default).\n\n Returns\n -------\n broadcasted : tuple of arrays\n These arrays are views on the original arrays. They are typically\n not contiguous. Furthermore, more than one element of a\n broadcasted array may refer to a single memory location. If you need\n to write to the arrays, make copies first. While you can set the\n ``writable`` flag True, writing to a single output value may end up\n changing more than one location in the output array.\n\n .. deprecated:: 1.17\n The output is currently marked so that if written to, a deprecation\n warning will be emitted. A future version will set the\n ``writable`` flag False so writing to it will raise an error.\n\n See Also\n --------\n broadcast\n broadcast_to\n broadcast_shapes\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([[1,2,3]])\n >>> y = np.array([[4],[5]])\n >>> np.broadcast_arrays(x, y)\n (array([[1, 2, 3],\n [1, 2, 3]]),\n array([[4, 4, 4],\n [5, 5, 5]]))\n\n Here is a useful idiom for getting contiguous copies instead of\n non-contiguous views.\n\n >>> [np.array(a) for a in np.broadcast_arrays(x, y)]\n [array([[1, 2, 3],\n [1, 2, 3]]),\n array([[4, 4, 4],\n [5, 5, 5]])]\n\n """\n # nditer is not used here to avoid the limit of 32 arrays.\n # Otherwise, something like the following one-liner would suffice:\n # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\n # order='C').itviews\n\n args = [np.array(_m, copy=None, subok=subok) for _m in args]\n\n shape = _broadcast_shape(*args)\n\n result = [array if array.shape == shape\n else _broadcast_to(array, shape, subok=subok, readonly=False)\n for array in args]\n return tuple(result)\n | .venv\Lib\site-packages\numpy\lib\_stride_tricks_impl.py | _stride_tricks_impl.py | Python | 18,574 | 0.95 | 0.10929 | 0.05298 | awesome-app | 701 | 2024-09-28T05:49:56.047156 | Apache-2.0 | false | 12f4813761e8c5b71aea2c4a8fff1932 |
from collections.abc import Iterable\nfrom typing import Any, SupportsIndex, TypeVar, overload\n\nfrom numpy import generic\nfrom numpy._typing import ArrayLike, NDArray, _AnyShape, _ArrayLike, _ShapeLike\n\n__all__ = ["broadcast_to", "broadcast_arrays", "broadcast_shapes"]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n\nclass DummyArray:\n __array_interface__: dict[str, Any]\n base: NDArray[Any] | None\n def __init__(\n self,\n interface: dict[str, Any],\n base: NDArray[Any] | None = ...,\n ) -> None: ...\n\n@overload\ndef as_strided(\n x: _ArrayLike[_ScalarT],\n shape: Iterable[int] | None = ...,\n strides: Iterable[int] | None = ...,\n subok: bool = ...,\n writeable: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef as_strided(\n x: ArrayLike,\n shape: Iterable[int] | None = ...,\n strides: Iterable[int] | None = ...,\n subok: bool = ...,\n writeable: bool = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef sliding_window_view(\n x: _ArrayLike[_ScalarT],\n window_shape: int | Iterable[int],\n axis: SupportsIndex | None = ...,\n *,\n subok: bool = ...,\n writeable: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef sliding_window_view(\n x: ArrayLike,\n window_shape: int | Iterable[int],\n axis: SupportsIndex | None = ...,\n *,\n subok: bool = ...,\n writeable: bool = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef broadcast_to(\n array: _ArrayLike[_ScalarT],\n shape: int | Iterable[int],\n subok: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef broadcast_to(\n array: ArrayLike,\n shape: int | Iterable[int],\n subok: bool = ...,\n) -> NDArray[Any]: ...\n\ndef broadcast_shapes(*args: _ShapeLike) -> _AnyShape: ...\n\ndef broadcast_arrays(\n *args: ArrayLike,\n subok: bool = ...,\n) -> tuple[NDArray[Any], ...]: ...\n | .venv\Lib\site-packages\numpy\lib\_stride_tricks_impl.pyi | _stride_tricks_impl.pyi | Other | 1,889 | 0.85 | 0.135135 | 0.046154 | node-utils | 294 | 2023-11-06T02:30:25.999186 | BSD-3-Clause | false | 1bd8861c68d87b9e139702bbab62d466 |
""" Basic functions for manipulating 2d arrays\n\n"""\nimport functools\nimport operator\n\nfrom numpy._core import iinfo, overrides\nfrom numpy._core._multiarray_umath import _array_converter\nfrom numpy._core.numeric import (\n arange,\n asanyarray,\n asarray,\n diagonal,\n empty,\n greater_equal,\n indices,\n int8,\n int16,\n int32,\n int64,\n intp,\n multiply,\n nonzero,\n ones,\n promote_types,\n where,\n zeros,\n)\nfrom numpy._core.overrides import finalize_array_function_like, set_module\nfrom numpy.lib._stride_tricks_impl import broadcast_to\n\n__all__ = [\n 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',\n 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',\n 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ni1 = iinfo(int8)\ni2 = iinfo(int16)\ni4 = iinfo(int32)\n\n\ndef _min_int(low, high):\n """ get small int that fits the range """\n if high <= i1.max and low >= i1.min:\n return int8\n if high <= i2.max and low >= i2.min:\n return int16\n if high <= i4.max and low >= i4.min:\n return int32\n return int64\n\n\ndef _flip_dispatcher(m):\n return (m,)\n\n\n@array_function_dispatch(_flip_dispatcher)\ndef fliplr(m):\n """\n Reverse the order of elements along axis 1 (left/right).\n\n For a 2-D array, this flips the entries in each row in the left/right\n direction. Columns are preserved, but appear in a different order than\n before.\n\n Parameters\n ----------\n m : array_like\n Input array, must be at least 2-D.\n\n Returns\n -------\n f : ndarray\n A view of `m` with the columns reversed. Since a view\n is returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n flipud : Flip array in the up/down direction.\n flip : Flip array in one or more dimensions.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.\n Requires the array to be at least 2-D.\n\n Examples\n --------\n >>> import numpy as np\n >>> A = np.diag([1.,2.,3.])\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.fliplr(A)\n array([[0., 0., 1.],\n [0., 2., 0.],\n [3., 0., 0.]])\n\n >>> rng = np.random.default_rng()\n >>> A = rng.normal(size=(2,3,5))\n >>> np.all(np.fliplr(A) == A[:,::-1,...])\n True\n\n """\n m = asanyarray(m)\n if m.ndim < 2:\n raise ValueError("Input must be >= 2-d.")\n return m[:, ::-1]\n\n\n@array_function_dispatch(_flip_dispatcher)\ndef flipud(m):\n """\n Reverse the order of elements along axis 0 (up/down).\n\n For a 2-D array, this flips the entries in each column in the up/down\n direction. Rows are preserved, but appear in a different order than before.\n\n Parameters\n ----------\n m : array_like\n Input array.\n\n Returns\n -------\n out : array_like\n A view of `m` with the rows reversed. Since a view is\n returned, this operation is :math:`\\mathcal O(1)`.\n\n See Also\n --------\n fliplr : Flip array in the left/right direction.\n flip : Flip array in one or more dimensions.\n rot90 : Rotate array counterclockwise.\n\n Notes\n -----\n Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.\n Requires the array to be at least 1-D.\n\n Examples\n --------\n >>> import numpy as np\n >>> A = np.diag([1.0, 2, 3])\n >>> A\n array([[1., 0., 0.],\n [0., 2., 0.],\n [0., 0., 3.]])\n >>> np.flipud(A)\n array([[0., 0., 3.],\n [0., 2., 0.],\n [1., 0., 0.]])\n\n >>> rng = np.random.default_rng()\n >>> A = rng.normal(size=(2,3,5))\n >>> np.all(np.flipud(A) == A[::-1,...])\n True\n\n >>> np.flipud([1,2])\n array([2, 1])\n\n """\n m = asanyarray(m)\n if m.ndim < 1:\n raise ValueError("Input must be >= 1-d.")\n return m[::-1, ...]\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef eye(N, M=None, k=0, dtype=float, order='C', *, device=None, like=None):\n """\n Return a 2-D array with ones on the diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the output.\n M : int, optional\n Number of columns in the output. If None, defaults to `N`.\n k : int, optional\n Index of the diagonal: 0 (the default) refers to the main diagonal,\n a positive value refers to an upper diagonal, and a negative value\n to a lower diagonal.\n dtype : data-type, optional\n Data-type of the returned array.\n order : {'C', 'F'}, optional\n Whether the output should be stored in row-major (C-style) or\n column-major (Fortran-style) order in memory.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n I : ndarray of shape (N,M)\n An array where all elements are equal to zero, except for the `k`-th\n diagonal, whose values are equal to one.\n\n See Also\n --------\n identity : (almost) equivalent function\n diag : diagonal 2-D array from a 1-D array specified by the user.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.eye(2, dtype=int)\n array([[1, 0],\n [0, 1]])\n >>> np.eye(3, k=1)\n array([[0., 1., 0.],\n [0., 0., 1.],\n [0., 0., 0.]])\n\n """\n if like is not None:\n return _eye_with_like(\n like, N, M=M, k=k, dtype=dtype, order=order, device=device\n )\n if M is None:\n M = N\n m = zeros((N, M), dtype=dtype, order=order, device=device)\n if k >= M:\n return m\n # Ensure M and k are integers, so we don't get any surprise casting\n # results in the expressions `M-k` and `M+1` used below. This avoids\n # a problem with inputs with type (for example) np.uint64.\n M = operator.index(M)\n k = operator.index(k)\n if k >= 0:\n i = k\n else:\n i = (-k) * M\n m[:M - k].flat[i::M + 1] = 1\n return m\n\n\n_eye_with_like = array_function_dispatch()(eye)\n\n\ndef _diag_dispatcher(v, k=None):\n return (v,)\n\n\n@array_function_dispatch(_diag_dispatcher)\ndef diag(v, k=0):\n """\n Extract a diagonal or construct a diagonal array.\n\n See the more detailed documentation for ``numpy.diagonal`` if you use this\n function to extract a diagonal and wish to write to the resulting array;\n whether it returns a copy or a view depends on what version of numpy you\n are using.\n\n Parameters\n ----------\n v : array_like\n If `v` is a 2-D array, return a copy of its `k`-th diagonal.\n If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th\n diagonal.\n k : int, optional\n Diagonal in question. The default is 0. Use `k>0` for diagonals\n above the main diagonal, and `k<0` for diagonals below the main\n diagonal.\n\n Returns\n -------\n out : ndarray\n The extracted diagonal or constructed diagonal array.\n\n See Also\n --------\n diagonal : Return specified diagonals.\n diagflat : Create a 2-D array with the flattened input as a diagonal.\n trace : Sum along diagonals.\n triu : Upper triangle of an array.\n tril : Lower triangle of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(9).reshape((3,3))\n >>> x\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n\n >>> np.diag(x)\n array([0, 4, 8])\n >>> np.diag(x, k=1)\n array([1, 5])\n >>> np.diag(x, k=-1)\n array([3, 7])\n\n >>> np.diag(np.diag(x))\n array([[0, 0, 0],\n [0, 4, 0],\n [0, 0, 8]])\n\n """\n v = asanyarray(v)\n s = v.shape\n if len(s) == 1:\n n = s[0] + abs(k)\n res = zeros((n, n), v.dtype)\n if k >= 0:\n i = k\n else:\n i = (-k) * n\n res[:n - k].flat[i::n + 1] = v\n return res\n elif len(s) == 2:\n return diagonal(v, k)\n else:\n raise ValueError("Input must be 1- or 2-d.")\n\n\n@array_function_dispatch(_diag_dispatcher)\ndef diagflat(v, k=0):\n """\n Create a two-dimensional array with the flattened input as a diagonal.\n\n Parameters\n ----------\n v : array_like\n Input data, which is flattened and set as the `k`-th\n diagonal of the output.\n k : int, optional\n Diagonal to set; 0, the default, corresponds to the "main" diagonal,\n a positive (negative) `k` giving the number of the diagonal above\n (below) the main.\n\n Returns\n -------\n out : ndarray\n The 2-D output array.\n\n See Also\n --------\n diag : MATLAB work-alike for 1-D and 2-D arrays.\n diagonal : Return specified diagonals.\n trace : Sum along diagonals.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.diagflat([[1,2], [3,4]])\n array([[1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 0, 3, 0],\n [0, 0, 0, 4]])\n\n >>> np.diagflat([1,2], 1)\n array([[0, 1, 0],\n [0, 0, 2],\n [0, 0, 0]])\n\n """\n conv = _array_converter(v)\n v, = conv.as_arrays(subok=False)\n v = v.ravel()\n s = len(v)\n n = s + abs(k)\n res = zeros((n, n), v.dtype)\n if (k >= 0):\n i = arange(0, n - k, dtype=intp)\n fi = i + k + i * n\n else:\n i = arange(0, n + k, dtype=intp)\n fi = i + (i - k) * n\n res.flat[fi] = v\n\n return conv.wrap(res)\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef tri(N, M=None, k=0, dtype=float, *, like=None):\n """\n An array with ones at and below the given diagonal and zeros elsewhere.\n\n Parameters\n ----------\n N : int\n Number of rows in the array.\n M : int, optional\n Number of columns in the array.\n By default, `M` is taken equal to `N`.\n k : int, optional\n The sub-diagonal at and below which the array is filled.\n `k` = 0 is the main diagonal, while `k` < 0 is below it,\n and `k` > 0 is above. The default is 0.\n dtype : dtype, optional\n Data type of the returned array. The default is float.\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n tri : ndarray of shape (N, M)\n Array with its lower triangle filled with ones and zero elsewhere;\n in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.tri(3, 5, 2, dtype=int)\n array([[1, 1, 1, 0, 0],\n [1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1]])\n\n >>> np.tri(3, 5, -1)\n array([[0., 0., 0., 0., 0.],\n [1., 0., 0., 0., 0.],\n [1., 1., 0., 0., 0.]])\n\n """\n if like is not None:\n return _tri_with_like(like, N, M=M, k=k, dtype=dtype)\n\n if M is None:\n M = N\n\n m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),\n arange(-k, M - k, dtype=_min_int(-k, M - k)))\n\n # Avoid making a copy if the requested type is already bool\n m = m.astype(dtype, copy=False)\n\n return m\n\n\n_tri_with_like = array_function_dispatch()(tri)\n\n\ndef _trilu_dispatcher(m, k=None):\n return (m,)\n\n\n@array_function_dispatch(_trilu_dispatcher)\ndef tril(m, k=0):\n """\n Lower triangle of an array.\n\n Return a copy of an array with elements above the `k`-th diagonal zeroed.\n For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two\n axes.\n\n Parameters\n ----------\n m : array_like, shape (..., M, N)\n Input array.\n k : int, optional\n Diagonal above which to zero elements. `k = 0` (the default) is the\n main diagonal, `k < 0` is below it and `k > 0` is above.\n\n Returns\n -------\n tril : ndarray, shape (..., M, N)\n Lower triangle of `m`, of same shape and data-type as `m`.\n\n See Also\n --------\n triu : same thing, only for the upper triangle\n\n Examples\n --------\n >>> import numpy as np\n >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)\n array([[ 0, 0, 0],\n [ 4, 0, 0],\n [ 7, 8, 0],\n [10, 11, 12]])\n\n >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))\n array([[[ 0, 0, 0, 0, 0],\n [ 5, 6, 0, 0, 0],\n [10, 11, 12, 0, 0],\n [15, 16, 17, 18, 0]],\n [[20, 0, 0, 0, 0],\n [25, 26, 0, 0, 0],\n [30, 31, 32, 0, 0],\n [35, 36, 37, 38, 0]],\n [[40, 0, 0, 0, 0],\n [45, 46, 0, 0, 0],\n [50, 51, 52, 0, 0],\n [55, 56, 57, 58, 0]]])\n\n """\n m = asanyarray(m)\n mask = tri(*m.shape[-2:], k=k, dtype=bool)\n\n return where(mask, m, zeros(1, m.dtype))\n\n\n@array_function_dispatch(_trilu_dispatcher)\ndef triu(m, k=0):\n """\n Upper triangle of an array.\n\n Return a copy of an array with the elements below the `k`-th diagonal\n zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the\n final two axes.\n\n Please refer to the documentation for `tril` for further details.\n\n See Also\n --------\n tril : lower triangle of an array\n\n Examples\n --------\n >>> import numpy as np\n >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)\n array([[ 1, 2, 3],\n [ 4, 5, 6],\n [ 0, 8, 9],\n [ 0, 0, 12]])\n\n >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))\n array([[[ 0, 1, 2, 3, 4],\n [ 0, 6, 7, 8, 9],\n [ 0, 0, 12, 13, 14],\n [ 0, 0, 0, 18, 19]],\n [[20, 21, 22, 23, 24],\n [ 0, 26, 27, 28, 29],\n [ 0, 0, 32, 33, 34],\n [ 0, 0, 0, 38, 39]],\n [[40, 41, 42, 43, 44],\n [ 0, 46, 47, 48, 49],\n [ 0, 0, 52, 53, 54],\n [ 0, 0, 0, 58, 59]]])\n\n """\n m = asanyarray(m)\n mask = tri(*m.shape[-2:], k=k - 1, dtype=bool)\n\n return where(mask, zeros(1, m.dtype), m)\n\n\ndef _vander_dispatcher(x, N=None, increasing=None):\n return (x,)\n\n\n# Originally borrowed from John Hunter and matplotlib\n@array_function_dispatch(_vander_dispatcher)\ndef vander(x, N=None, increasing=False):\n """\n Generate a Vandermonde matrix.\n\n The columns of the output matrix are powers of the input vector. The\n order of the powers is determined by the `increasing` boolean argument.\n Specifically, when `increasing` is False, the `i`-th output column is\n the input vector raised element-wise to the power of ``N - i - 1``. Such\n a matrix with a geometric progression in each row is named for Alexandre-\n Theophile Vandermonde.\n\n Parameters\n ----------\n x : array_like\n 1-D input array.\n N : int, optional\n Number of columns in the output. If `N` is not specified, a square\n array is returned (``N = len(x)``).\n increasing : bool, optional\n Order of the powers of the columns. If True, the powers increase\n from left to right, if False (the default) they are reversed.\n\n Returns\n -------\n out : ndarray\n Vandermonde matrix. If `increasing` is False, the first column is\n ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is\n True, the columns are ``x^0, x^1, ..., x^(N-1)``.\n\n See Also\n --------\n polynomial.polynomial.polyvander\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1, 2, 3, 5])\n >>> N = 3\n >>> np.vander(x, N)\n array([[ 1, 1, 1],\n [ 4, 2, 1],\n [ 9, 3, 1],\n [25, 5, 1]])\n\n >>> np.column_stack([x**(N-1-i) for i in range(N)])\n array([[ 1, 1, 1],\n [ 4, 2, 1],\n [ 9, 3, 1],\n [25, 5, 1]])\n\n >>> x = np.array([1, 2, 3, 5])\n >>> np.vander(x)\n array([[ 1, 1, 1, 1],\n [ 8, 4, 2, 1],\n [ 27, 9, 3, 1],\n [125, 25, 5, 1]])\n >>> np.vander(x, increasing=True)\n array([[ 1, 1, 1, 1],\n [ 1, 2, 4, 8],\n [ 1, 3, 9, 27],\n [ 1, 5, 25, 125]])\n\n The determinant of a square Vandermonde matrix is the product\n of the differences between the values of the input vector:\n\n >>> np.linalg.det(np.vander(x))\n 48.000000000000043 # may vary\n >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)\n 48\n\n """\n x = asarray(x)\n if x.ndim != 1:\n raise ValueError("x must be a one-dimensional array or sequence.")\n if N is None:\n N = len(x)\n\n v = empty((len(x), N), dtype=promote_types(x.dtype, int))\n tmp = v[:, ::-1] if not increasing else v\n\n if N > 0:\n tmp[:, 0] = 1\n if N > 1:\n tmp[:, 1:] = x[:, None]\n multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)\n\n return v\n\n\ndef _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,\n weights=None):\n yield x\n yield y\n\n # This terrible logic is adapted from the checks in histogram2d\n try:\n N = len(bins)\n except TypeError:\n N = 1\n if N == 2:\n yield from bins # bins=[x, y]\n else:\n yield bins\n\n yield weights\n\n\n@array_function_dispatch(_histogram2d_dispatcher)\ndef histogram2d(x, y, bins=10, range=None, density=None, weights=None):\n """\n Compute the bi-dimensional histogram of two data samples.\n\n Parameters\n ----------\n x : array_like, shape (N,)\n An array containing the x coordinates of the points to be\n histogrammed.\n y : array_like, shape (N,)\n An array containing the y coordinates of the points to be\n histogrammed.\n bins : int or array_like or [int, int] or [array, array], optional\n The bin specification:\n\n * If int, the number of bins for the two dimensions (nx=ny=bins).\n * If array_like, the bin edges for the two dimensions\n (x_edges=y_edges=bins).\n * If [int, int], the number of bins in each dimension\n (nx, ny = bins).\n * If [array, array], the bin edges in each dimension\n (x_edges, y_edges = bins).\n * A combination [int, array] or [array, int], where int\n is the number of bins and array is the bin edges.\n\n range : array_like, shape(2,2), optional\n The leftmost and rightmost edges of the bins along each dimension\n (if not specified explicitly in the `bins` parameters):\n ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range\n will be considered outliers and not tallied in the histogram.\n density : bool, optional\n If False, the default, returns the number of samples in each bin.\n If True, returns the probability *density* function at the bin,\n ``bin_count / sample_count / bin_area``.\n weights : array_like, shape(N,), optional\n An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.\n Weights are normalized to 1 if `density` is True. If `density` is\n False, the values of the returned histogram are equal to the sum of\n the weights belonging to the samples falling into each bin.\n\n Returns\n -------\n H : ndarray, shape(nx, ny)\n The bi-dimensional histogram of samples `x` and `y`. Values in `x`\n are histogrammed along the first dimension and values in `y` are\n histogrammed along the second dimension.\n xedges : ndarray, shape(nx+1,)\n The bin edges along the first dimension.\n yedges : ndarray, shape(ny+1,)\n The bin edges along the second dimension.\n\n See Also\n --------\n histogram : 1D histogram\n histogramdd : Multidimensional histogram\n\n Notes\n -----\n When `density` is True, then the returned histogram is the sample\n density, defined such that the sum over bins of the product\n ``bin_value * bin_area`` is 1.\n\n Please note that the histogram does not follow the Cartesian convention\n where `x` values are on the abscissa and `y` values on the ordinate\n axis. Rather, `x` is histogrammed along the first dimension of the\n array (vertical), and `y` along the second dimension of the array\n (horizontal). This ensures compatibility with `histogramdd`.\n\n Examples\n --------\n >>> import numpy as np\n >>> from matplotlib.image import NonUniformImage\n >>> import matplotlib.pyplot as plt\n\n Construct a 2-D histogram with variable bin width. First define the bin\n edges:\n\n >>> xedges = [0, 1, 3, 5]\n >>> yedges = [0, 2, 3, 4, 6]\n\n Next we create a histogram H with random bin content:\n\n >>> x = np.random.normal(2, 1, 100)\n >>> y = np.random.normal(1, 1, 100)\n >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n >>> # Histogram does not follow Cartesian convention (see Notes),\n >>> # therefore transpose H for visualization purposes.\n >>> H = H.T\n\n :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:\n\n >>> fig = plt.figure(figsize=(7, 3))\n >>> ax = fig.add_subplot(131, title='imshow: square bins')\n >>> plt.imshow(H, interpolation='nearest', origin='lower',\n ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])\n <matplotlib.image.AxesImage object at 0x...>\n\n :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:\n\n >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',\n ... aspect='equal')\n >>> X, Y = np.meshgrid(xedges, yedges)\n >>> ax.pcolormesh(X, Y, H)\n <matplotlib.collections.QuadMesh object at 0x...>\n\n :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to\n display actual bin edges with interpolation:\n\n >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',\n ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])\n >>> im = NonUniformImage(ax, interpolation='bilinear')\n >>> xcenters = (xedges[:-1] + xedges[1:]) / 2\n >>> ycenters = (yedges[:-1] + yedges[1:]) / 2\n >>> im.set_data(xcenters, ycenters, H)\n >>> ax.add_image(im)\n >>> plt.show()\n\n It is also possible to construct a 2-D histogram without specifying bin\n edges:\n\n >>> # Generate non-symmetric test data\n >>> n = 10000\n >>> x = np.linspace(1, 100, n)\n >>> y = 2*np.log(x) + np.random.rand(n) - 0.5\n >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges\n >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)\n\n Now we can plot the histogram using\n :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a\n :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.\n\n >>> # Plot histogram using pcolormesh\n >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)\n >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')\n >>> ax1.plot(x, 2*np.log(x), 'k-')\n >>> ax1.set_xlim(x.min(), x.max())\n >>> ax1.set_ylim(y.min(), y.max())\n >>> ax1.set_xlabel('x')\n >>> ax1.set_ylabel('y')\n >>> ax1.set_title('histogram2d')\n >>> ax1.grid()\n\n >>> # Create hexbin plot for comparison\n >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')\n >>> ax2.plot(x, 2*np.log(x), 'k-')\n >>> ax2.set_title('hexbin')\n >>> ax2.set_xlim(x.min(), x.max())\n >>> ax2.set_xlabel('x')\n >>> ax2.grid()\n\n >>> plt.show()\n """\n from numpy import histogramdd\n\n if len(x) != len(y):\n raise ValueError('x and y must have the same length.')\n\n try:\n N = len(bins)\n except TypeError:\n N = 1\n\n if N not in {1, 2}:\n xedges = yedges = asarray(bins)\n bins = [xedges, yedges]\n hist, edges = histogramdd([x, y], bins, range, density, weights)\n return hist, edges[0], edges[1]\n\n\n@set_module('numpy')\ndef mask_indices(n, mask_func, k=0):\n """\n Return the indices to access (n, n) arrays, given a masking function.\n\n Assume `mask_func` is a function that, for a square array a of size\n ``(n, n)`` with a possible offset argument `k`, when called as\n ``mask_func(a, k)`` returns a new array with zeros in certain locations\n (functions like `triu` or `tril` do precisely this). Then this function\n returns the indices where the non-zero values would be located.\n\n Parameters\n ----------\n n : int\n The returned indices will be valid to access arrays of shape (n, n).\n mask_func : callable\n A function whose call signature is similar to that of `triu`, `tril`.\n That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.\n `k` is an optional argument to the function.\n k : scalar\n An optional argument which is passed through to `mask_func`. Functions\n like `triu`, `tril` take a second argument that is interpreted as an\n offset.\n\n Returns\n -------\n indices : tuple of arrays.\n The `n` arrays of indices corresponding to the locations where\n ``mask_func(np.ones((n, n)), k)`` is True.\n\n See Also\n --------\n triu, tril, triu_indices, tril_indices\n\n Examples\n --------\n >>> import numpy as np\n\n These are the indices that would allow you to access the upper triangular\n part of any 3x3 array:\n\n >>> iu = np.mask_indices(3, np.triu)\n\n For example, if `a` is a 3x3 array:\n\n >>> a = np.arange(9).reshape(3, 3)\n >>> a\n array([[0, 1, 2],\n [3, 4, 5],\n [6, 7, 8]])\n >>> a[iu]\n array([0, 1, 2, 4, 5, 8])\n\n An offset can be passed also to the masking function. This gets us the\n indices starting on the first diagonal right of the main one:\n\n >>> iu1 = np.mask_indices(3, np.triu, 1)\n\n with which we now extract only three elements:\n\n >>> a[iu1]\n array([1, 2, 5])\n\n """\n m = ones((n, n), int)\n a = mask_func(m, k)\n return nonzero(a != 0)\n\n\n@set_module('numpy')\ndef tril_indices(n, k=0, m=None):\n """\n Return the indices for the lower-triangle of an (n, m) array.\n\n Parameters\n ----------\n n : int\n The row dimension of the arrays for which the returned\n indices will be valid.\n k : int, optional\n Diagonal offset (see `tril` for details).\n m : int, optional\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n\n\n Returns\n -------\n inds : tuple of arrays\n The row and column indices, respectively. The row indices are sorted\n in non-decreasing order, and the correspdonding column indices are\n strictly increasing for each row.\n\n See also\n --------\n triu_indices : similar function, for upper-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n tril, triu\n\n Examples\n --------\n >>> import numpy as np\n\n Compute two different sets of indices to access 4x4 arrays, one for the\n lower triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n\n >>> il1 = np.tril_indices(4)\n >>> il1\n (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))\n\n Note that row indices (first array) are non-decreasing, and the corresponding\n column indices (second array) are strictly increasing for each row.\n Here is how they can be used with a sample array:\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Both for indexing:\n\n >>> a[il1]\n array([ 0, 4, 5, ..., 13, 14, 15])\n\n And for assigning values:\n\n >>> a[il1] = -1\n >>> a\n array([[-1, 1, 2, 3],\n [-1, -1, 6, 7],\n [-1, -1, -1, 11],\n [-1, -1, -1, -1]])\n\n These cover almost the whole array (two diagonals right of the main one):\n\n >>> il2 = np.tril_indices(4, 2)\n >>> a[il2] = -10\n >>> a\n array([[-10, -10, -10, 3],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10],\n [-10, -10, -10, -10]])\n\n """\n tri_ = tri(n, m, k=k, dtype=bool)\n\n return tuple(broadcast_to(inds, tri_.shape)[tri_]\n for inds in indices(tri_.shape, sparse=True))\n\n\ndef _trilu_indices_form_dispatcher(arr, k=None):\n return (arr,)\n\n\n@array_function_dispatch(_trilu_indices_form_dispatcher)\ndef tril_indices_from(arr, k=0):\n """\n Return the indices for the lower-triangle of arr.\n\n See `tril_indices` for full details.\n\n Parameters\n ----------\n arr : array_like\n The indices will be valid for square arrays whose dimensions are\n the same as arr.\n k : int, optional\n Diagonal offset (see `tril` for details).\n\n Examples\n --------\n >>> import numpy as np\n\n Create a 4 by 4 array\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Pass the array to get the indices of the lower triangular elements.\n\n >>> trili = np.tril_indices_from(a)\n >>> trili\n (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))\n\n >>> a[trili]\n array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])\n\n This is syntactic sugar for tril_indices().\n\n >>> np.tril_indices(a.shape[0])\n (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))\n\n Use the `k` parameter to return the indices for the lower triangular array\n up to the k-th diagonal.\n\n >>> trili1 = np.tril_indices_from(a, k=1)\n >>> a[trili1]\n array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])\n\n See Also\n --------\n tril_indices, tril, triu_indices_from\n """\n if arr.ndim != 2:\n raise ValueError("input array must be 2-d")\n return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n\n\n@set_module('numpy')\ndef triu_indices(n, k=0, m=None):\n """\n Return the indices for the upper-triangle of an (n, m) array.\n\n Parameters\n ----------\n n : int\n The size of the arrays for which the returned indices will\n be valid.\n k : int, optional\n Diagonal offset (see `triu` for details).\n m : int, optional\n The column dimension of the arrays for which the returned\n arrays will be valid.\n By default `m` is taken equal to `n`.\n\n\n Returns\n -------\n inds : tuple, shape(2) of ndarrays, shape(`n`)\n The row and column indices, respectively. The row indices are sorted\n in non-decreasing order, and the correspdonding column indices are\n strictly increasing for each row.\n\n See also\n --------\n tril_indices : similar function, for lower-triangular.\n mask_indices : generic function accepting an arbitrary mask function.\n triu, tril\n\n Examples\n --------\n >>> import numpy as np\n\n Compute two different sets of indices to access 4x4 arrays, one for the\n upper triangular part starting at the main diagonal, and one starting two\n diagonals further right:\n\n >>> iu1 = np.triu_indices(4)\n >>> iu1\n (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))\n\n Note that row indices (first array) are non-decreasing, and the corresponding\n column indices (second array) are strictly increasing for each row.\n\n Here is how they can be used with a sample array:\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Both for indexing:\n\n >>> a[iu1]\n array([ 0, 1, 2, ..., 10, 11, 15])\n\n And for assigning values:\n\n >>> a[iu1] = -1\n >>> a\n array([[-1, -1, -1, -1],\n [ 4, -1, -1, -1],\n [ 8, 9, -1, -1],\n [12, 13, 14, -1]])\n\n These cover only a small part of the whole array (two diagonals right\n of the main one):\n\n >>> iu2 = np.triu_indices(4, 2)\n >>> a[iu2] = -10\n >>> a\n array([[ -1, -1, -10, -10],\n [ 4, -1, -1, -10],\n [ 8, 9, -1, -1],\n [ 12, 13, 14, -1]])\n\n """\n tri_ = ~tri(n, m, k=k - 1, dtype=bool)\n\n return tuple(broadcast_to(inds, tri_.shape)[tri_]\n for inds in indices(tri_.shape, sparse=True))\n\n\n@array_function_dispatch(_trilu_indices_form_dispatcher)\ndef triu_indices_from(arr, k=0):\n """\n Return the indices for the upper-triangle of arr.\n\n See `triu_indices` for full details.\n\n Parameters\n ----------\n arr : ndarray, shape(N, N)\n The indices will be valid for square arrays.\n k : int, optional\n Diagonal offset (see `triu` for details).\n\n Returns\n -------\n triu_indices_from : tuple, shape(2) of ndarray, shape(N)\n Indices for the upper-triangle of `arr`.\n\n Examples\n --------\n >>> import numpy as np\n\n Create a 4 by 4 array\n\n >>> a = np.arange(16).reshape(4, 4)\n >>> a\n array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11],\n [12, 13, 14, 15]])\n\n Pass the array to get the indices of the upper triangular elements.\n\n >>> triui = np.triu_indices_from(a)\n >>> triui\n (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))\n\n >>> a[triui]\n array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])\n\n This is syntactic sugar for triu_indices().\n\n >>> np.triu_indices(a.shape[0])\n (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))\n\n Use the `k` parameter to return the indices for the upper triangular array\n from the k-th diagonal.\n\n >>> triuim1 = np.triu_indices_from(a, k=1)\n >>> a[triuim1]\n array([ 1, 2, 3, 6, 7, 11])\n\n\n See Also\n --------\n triu_indices, triu, tril_indices_from\n """\n if arr.ndim != 2:\n raise ValueError("input array must be 2-d")\n return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])\n | .venv\Lib\site-packages\numpy\lib\_twodim_base_impl.py | _twodim_base_impl.py | Python | 35,126 | 0.95 | 0.104913 | 0.011423 | node-utils | 461 | 2024-04-06T18:02:46.140747 | Apache-2.0 | false | 148b2969ac0238827e16628840a66324 |
from collections.abc import Callable, Sequence\nfrom typing import (\n Any,\n TypeAlias,\n TypeVar,\n overload,\n)\nfrom typing import (\n Literal as L,\n)\n\nimport numpy as np\nfrom numpy import (\n _OrderCF,\n complex128,\n complexfloating,\n datetime64,\n float64,\n floating,\n generic,\n int_,\n intp,\n object_,\n signedinteger,\n timedelta64,\n)\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _DTypeLike,\n _SupportsArray,\n _SupportsArrayFunc,\n)\n\n__all__ = [\n "diag",\n "diagflat",\n "eye",\n "fliplr",\n "flipud",\n "tri",\n "triu",\n "tril",\n "vander",\n "histogram2d",\n "mask_indices",\n "tril_indices",\n "tril_indices_from",\n "triu_indices",\n "triu_indices_from",\n]\n\n###\n\n_T = TypeVar("_T")\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_ComplexFloatingT = TypeVar("_ComplexFloatingT", bound=np.complexfloating)\n_InexactT = TypeVar("_InexactT", bound=np.inexact)\n_NumberCoT = TypeVar("_NumberCoT", bound=_Number_co)\n\n# The returned arrays dtype must be compatible with `np.equal`\n_MaskFunc: TypeAlias = Callable[[NDArray[int_], _T], NDArray[_Number_co | timedelta64 | datetime64 | object_]]\n\n_Int_co: TypeAlias = np.integer | np.bool\n_Float_co: TypeAlias = np.floating | _Int_co\n_Number_co: TypeAlias = np.number | np.bool\n\n_ArrayLike1D: TypeAlias = _SupportsArray[np.dtype[_ScalarT]] | Sequence[_ScalarT]\n_ArrayLike1DInt_co: TypeAlias = _SupportsArray[np.dtype[_Int_co]] | Sequence[int | _Int_co]\n_ArrayLike1DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[float | _Float_co]\n_ArrayLike2DFloat_co: TypeAlias = _SupportsArray[np.dtype[_Float_co]] | Sequence[_ArrayLike1DFloat_co]\n_ArrayLike1DNumber_co: TypeAlias = _SupportsArray[np.dtype[_Number_co]] | Sequence[complex | _Number_co]\n\n###\n\n@overload\ndef fliplr(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...\n@overload\ndef fliplr(m: ArrayLike) -> NDArray[Any]: ...\n\n@overload\ndef flipud(m: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...\n@overload\ndef flipud(m: ArrayLike) -> NDArray[Any]: ...\n\n@overload\ndef eye(\n N: int,\n M: int | None = ...,\n k: int = ...,\n dtype: None = ...,\n order: _OrderCF = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef eye(\n N: int,\n M: int | None,\n k: int,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderCF = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef eye(\n N: int,\n M: int | None = ...,\n k: int = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderCF = ...,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef eye(\n N: int,\n M: int | None = ...,\n k: int = ...,\n dtype: DTypeLike = ...,\n order: _OrderCF = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef diag(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...\n@overload\ndef diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...\n\n@overload\ndef diagflat(v: _ArrayLike[_ScalarT], k: int = ...) -> NDArray[_ScalarT]: ...\n@overload\ndef diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...\n\n@overload\ndef tri(\n N: int,\n M: int | None = ...,\n k: int = ...,\n dtype: None = ...,\n *,\n like: _SupportsArrayFunc | None = ...\n) -> NDArray[float64]: ...\n@overload\ndef tri(\n N: int,\n M: int | None,\n k: int,\n dtype: _DTypeLike[_ScalarT],\n *,\n like: _SupportsArrayFunc | None = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef tri(\n N: int,\n M: int | None = ...,\n k: int = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n like: _SupportsArrayFunc | None = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef tri(\n N: int,\n M: int | None = ...,\n k: int = ...,\n dtype: DTypeLike = ...,\n *,\n like: _SupportsArrayFunc | None = ...\n) -> NDArray[Any]: ...\n\n@overload\ndef tril(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...\n@overload\ndef tril(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...\n\n@overload\ndef triu(m: _ArrayLike[_ScalarT], k: int = 0) -> NDArray[_ScalarT]: ...\n@overload\ndef triu(m: ArrayLike, k: int = 0) -> NDArray[Any]: ...\n\n@overload\ndef vander( # type: ignore[misc]\n x: _ArrayLikeInt_co,\n N: int | None = ...,\n increasing: bool = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef vander( # type: ignore[misc]\n x: _ArrayLikeFloat_co,\n N: int | None = ...,\n increasing: bool = ...,\n) -> NDArray[floating]: ...\n@overload\ndef vander(\n x: _ArrayLikeComplex_co,\n N: int | None = ...,\n increasing: bool = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef vander(\n x: _ArrayLikeObject_co,\n N: int | None = ...,\n increasing: bool = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef histogram2d(\n x: _ArrayLike1D[_ComplexFloatingT],\n y: _ArrayLike1D[_ComplexFloatingT | _Float_co],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_ComplexFloatingT],\n NDArray[_ComplexFloatingT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1D[_ComplexFloatingT | _Float_co],\n y: _ArrayLike1D[_ComplexFloatingT],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_ComplexFloatingT],\n NDArray[_ComplexFloatingT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1D[_InexactT],\n y: _ArrayLike1D[_InexactT | _Int_co],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_InexactT],\n NDArray[_InexactT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1D[_InexactT | _Int_co],\n y: _ArrayLike1D[_InexactT],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_InexactT],\n NDArray[_InexactT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DInt_co | Sequence[float],\n y: _ArrayLike1DInt_co | Sequence[float],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[float64],\n NDArray[float64],\n]: ...\n@overload\ndef histogram2d(\n x: Sequence[complex],\n y: Sequence[complex],\n bins: int | Sequence[int] = ...,\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[complex128 | float64],\n NDArray[complex128 | float64],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DNumber_co,\n y: _ArrayLike1DNumber_co,\n bins: _ArrayLike1D[_NumberCoT] | Sequence[_ArrayLike1D[_NumberCoT]],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_NumberCoT],\n NDArray[_NumberCoT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1D[_InexactT],\n y: _ArrayLike1D[_InexactT],\n bins: Sequence[_ArrayLike1D[_NumberCoT] | int],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_NumberCoT | _InexactT],\n NDArray[_NumberCoT | _InexactT],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DInt_co | Sequence[float],\n y: _ArrayLike1DInt_co | Sequence[float],\n bins: Sequence[_ArrayLike1D[_NumberCoT] | int],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_NumberCoT | float64],\n NDArray[_NumberCoT | float64],\n]: ...\n@overload\ndef histogram2d(\n x: Sequence[complex],\n y: Sequence[complex],\n bins: Sequence[_ArrayLike1D[_NumberCoT] | int],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[_NumberCoT | complex128 | float64],\n NDArray[_NumberCoT | complex128 | float64],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DNumber_co,\n y: _ArrayLike1DNumber_co,\n bins: Sequence[Sequence[bool]],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[np.bool],\n NDArray[np.bool],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DNumber_co,\n y: _ArrayLike1DNumber_co,\n bins: Sequence[Sequence[int]],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[np.int_ | np.bool],\n NDArray[np.int_ | np.bool],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DNumber_co,\n y: _ArrayLike1DNumber_co,\n bins: Sequence[Sequence[float]],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[np.float64 | np.int_ | np.bool],\n NDArray[np.float64 | np.int_ | np.bool],\n]: ...\n@overload\ndef histogram2d(\n x: _ArrayLike1DNumber_co,\n y: _ArrayLike1DNumber_co,\n bins: Sequence[Sequence[complex]],\n range: _ArrayLike2DFloat_co | None = ...,\n density: bool | None = ...,\n weights: _ArrayLike1DFloat_co | None = ...,\n) -> tuple[\n NDArray[float64],\n NDArray[np.complex128 | np.float64 | np.int_ | np.bool],\n NDArray[np.complex128 | np.float64 | np.int_ | np.bool],\n]: ...\n\n# NOTE: we're assuming/demanding here the `mask_func` returns\n# an ndarray of shape `(n, n)`; otherwise there is the possibility\n# of the output tuple having more or less than 2 elements\n@overload\ndef mask_indices(\n n: int,\n mask_func: _MaskFunc[int],\n k: int = ...,\n) -> tuple[NDArray[intp], NDArray[intp]]: ...\n@overload\ndef mask_indices(\n n: int,\n mask_func: _MaskFunc[_T],\n k: _T,\n) -> tuple[NDArray[intp], NDArray[intp]]: ...\n\ndef tril_indices(\n n: int,\n k: int = ...,\n m: int | None = ...,\n) -> tuple[NDArray[int_], NDArray[int_]]: ...\n\ndef tril_indices_from(\n arr: NDArray[Any],\n k: int = ...,\n) -> tuple[NDArray[int_], NDArray[int_]]: ...\n\ndef triu_indices(\n n: int,\n k: int = ...,\n m: int | None = ...,\n) -> tuple[NDArray[int_], NDArray[int_]]: ...\n\ndef triu_indices_from(\n arr: NDArray[Any],\n k: int = ...,\n) -> tuple[NDArray[int_], NDArray[int_]]: ...\n | .venv\Lib\site-packages\numpy\lib\_twodim_base_impl.pyi | _twodim_base_impl.pyi | Other | 11,631 | 0.95 | 0.100457 | 0.033735 | react-lib | 754 | 2023-07-14T09:37:20.472693 | GPL-3.0 | false | a176c9c0b3d89081dd54a66f28edcd35 |
"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py\n\n"""\nimport functools\n\n__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',\n 'isreal', 'nan_to_num', 'real', 'real_if_close',\n 'typename', 'mintypecode',\n 'common_type']\n\nimport numpy._core.numeric as _nx\nfrom numpy._core import getlimits, overrides\nfrom numpy._core.numeric import asanyarray, asarray, isnan, zeros\nfrom numpy._utils import set_module\n\nfrom ._ufunclike_impl import isneginf, isposinf\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'\n\n\n@set_module('numpy')\ndef mintypecode(typechars, typeset='GDFgdf', default='d'):\n """\n Return the character for the minimum-size type to which given types can\n be safely cast.\n\n The returned type character must represent the smallest size dtype such\n that an array of the returned type can handle the data from an array of\n all types in `typechars` (or if `typechars` is an array, then its\n dtype.char).\n\n Parameters\n ----------\n typechars : list of str or array_like\n If a list of strings, each string should represent a dtype.\n If array_like, the character representation of the array dtype is used.\n typeset : str or list of str, optional\n The set of characters that the returned character is chosen from.\n The default set is 'GDFgdf'.\n default : str, optional\n The default character, this is returned if none of the characters in\n `typechars` matches a character in `typeset`.\n\n Returns\n -------\n typechar : str\n The character representing the minimum-size type that was found.\n\n See Also\n --------\n dtype\n\n Examples\n --------\n >>> import numpy as np\n >>> np.mintypecode(['d', 'f', 'S'])\n 'd'\n >>> x = np.array([1.1, 2-3.j])\n >>> np.mintypecode(x)\n 'D'\n\n >>> np.mintypecode('abceh', default='G')\n 'G'\n\n """\n typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char\n for t in typechars)\n intersection = {t for t in typecodes if t in typeset}\n if not intersection:\n return default\n if 'F' in intersection and 'd' in intersection:\n return 'D'\n return min(intersection, key=_typecodes_by_elsize.index)\n\n\ndef _real_dispatcher(val):\n return (val,)\n\n\n@array_function_dispatch(_real_dispatcher)\ndef real(val):\n """\n Return the real part of the complex argument.\n\n Parameters\n ----------\n val : array_like\n Input array.\n\n Returns\n -------\n out : ndarray or scalar\n The real component of the complex argument. If `val` is real, the type\n of `val` is used for the output. If `val` has complex elements, the\n returned type is float.\n\n See Also\n --------\n real_if_close, imag, angle\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1+2j, 3+4j, 5+6j])\n >>> a.real\n array([1., 3., 5.])\n >>> a.real = 9\n >>> a\n array([9.+2.j, 9.+4.j, 9.+6.j])\n >>> a.real = np.array([9, 8, 7])\n >>> a\n array([9.+2.j, 8.+4.j, 7.+6.j])\n >>> np.real(1 + 1j)\n 1.0\n\n """\n try:\n return val.real\n except AttributeError:\n return asanyarray(val).real\n\n\ndef _imag_dispatcher(val):\n return (val,)\n\n\n@array_function_dispatch(_imag_dispatcher)\ndef imag(val):\n """\n Return the imaginary part of the complex argument.\n\n Parameters\n ----------\n val : array_like\n Input array.\n\n Returns\n -------\n out : ndarray or scalar\n The imaginary component of the complex argument. If `val` is real,\n the type of `val` is used for the output. If `val` has complex\n elements, the returned type is float.\n\n See Also\n --------\n real, angle, real_if_close\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1+2j, 3+4j, 5+6j])\n >>> a.imag\n array([2., 4., 6.])\n >>> a.imag = np.array([8, 10, 12])\n >>> a\n array([1. +8.j, 3.+10.j, 5.+12.j])\n >>> np.imag(1 + 1j)\n 1.0\n\n """\n try:\n return val.imag\n except AttributeError:\n return asanyarray(val).imag\n\n\ndef _is_type_dispatcher(x):\n return (x,)\n\n\n@array_function_dispatch(_is_type_dispatcher)\ndef iscomplex(x):\n """\n Returns a bool array, where True if input element is complex.\n\n What is tested is whether the input has a non-zero imaginary part, not if\n the input type is complex.\n\n Parameters\n ----------\n x : array_like\n Input array.\n\n Returns\n -------\n out : ndarray of bools\n Output array.\n\n See Also\n --------\n isreal\n iscomplexobj : Return True if x is a complex type or an array of complex\n numbers.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])\n array([ True, False, False, False, False, True])\n\n """\n ax = asanyarray(x)\n if issubclass(ax.dtype.type, _nx.complexfloating):\n return ax.imag != 0\n res = zeros(ax.shape, bool)\n return res[()] # convert to scalar if needed\n\n\n@array_function_dispatch(_is_type_dispatcher)\ndef isreal(x):\n """\n Returns a bool array, where True if input element is real.\n\n If element has complex type with zero imaginary part, the return value\n for that element is True.\n\n Parameters\n ----------\n x : array_like\n Input array.\n\n Returns\n -------\n out : ndarray, bool\n Boolean array of same shape as `x`.\n\n Notes\n -----\n `isreal` may behave unexpectedly for string or object arrays (see examples)\n\n See Also\n --------\n iscomplex\n isrealobj : Return True if x is not a complex type.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)\n >>> np.isreal(a)\n array([False, True, True, True, True, False])\n\n The function does not work on string arrays.\n\n >>> a = np.array([2j, "a"], dtype="U")\n >>> np.isreal(a) # Warns about non-elementwise comparison\n False\n\n Returns True for all elements in input array of ``dtype=object`` even if\n any of the elements is complex.\n\n >>> a = np.array([1, "2", 3+4j], dtype=object)\n >>> np.isreal(a)\n array([ True, True, True])\n\n isreal should not be used with object arrays\n\n >>> a = np.array([1+2j, 2+1j], dtype=object)\n >>> np.isreal(a)\n array([ True, True])\n\n """\n return imag(x) == 0\n\n\n@array_function_dispatch(_is_type_dispatcher)\ndef iscomplexobj(x):\n """\n Check for a complex type or an array of complex numbers.\n\n The type of the input is checked, not the value. Even if the input\n has an imaginary part equal to zero, `iscomplexobj` evaluates to True.\n\n Parameters\n ----------\n x : any\n The input can be of any type and shape.\n\n Returns\n -------\n iscomplexobj : bool\n The return value, True if `x` is of a complex type or has at least\n one complex element.\n\n See Also\n --------\n isrealobj, iscomplex\n\n Examples\n --------\n >>> import numpy as np\n >>> np.iscomplexobj(1)\n False\n >>> np.iscomplexobj(1+0j)\n True\n >>> np.iscomplexobj([3, 1+0j, True])\n True\n\n """\n try:\n dtype = x.dtype\n type_ = dtype.type\n except AttributeError:\n type_ = asarray(x).dtype.type\n return issubclass(type_, _nx.complexfloating)\n\n\n@array_function_dispatch(_is_type_dispatcher)\ndef isrealobj(x):\n """\n Return True if x is a not complex type or an array of complex numbers.\n\n The type of the input is checked, not the value. So even if the input\n has an imaginary part equal to zero, `isrealobj` evaluates to False\n if the data type is complex.\n\n Parameters\n ----------\n x : any\n The input can be of any type and shape.\n\n Returns\n -------\n y : bool\n The return value, False if `x` is of a complex type.\n\n See Also\n --------\n iscomplexobj, isreal\n\n Notes\n -----\n The function is only meant for arrays with numerical values but it\n accepts all other objects. Since it assumes array input, the return\n value of other objects may be True.\n\n >>> np.isrealobj('A string')\n True\n >>> np.isrealobj(False)\n True\n >>> np.isrealobj(None)\n True\n\n Examples\n --------\n >>> import numpy as np\n >>> np.isrealobj(1)\n True\n >>> np.isrealobj(1+0j)\n False\n >>> np.isrealobj([3, 1+0j, True])\n False\n\n """\n return not iscomplexobj(x)\n\n#-----------------------------------------------------------------------------\n\ndef _getmaxmin(t):\n from numpy._core import getlimits\n f = getlimits.finfo(t)\n return f.max, f.min\n\n\ndef _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):\n return (x,)\n\n\n@array_function_dispatch(_nan_to_num_dispatcher)\ndef nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):\n """\n Replace NaN with zero and infinity with large finite numbers (default\n behaviour) or with the numbers defined by the user using the `nan`,\n `posinf` and/or `neginf` keywords.\n\n If `x` is inexact, NaN is replaced by zero or by the user defined value in\n `nan` keyword, infinity is replaced by the largest finite floating point\n values representable by ``x.dtype`` or by the user defined value in\n `posinf` keyword and -infinity is replaced by the most negative finite\n floating point values representable by ``x.dtype`` or by the user defined\n value in `neginf` keyword.\n\n For complex dtypes, the above is applied to each of the real and\n imaginary components of `x` separately.\n\n If `x` is not inexact, then no replacements are made.\n\n Parameters\n ----------\n x : scalar or array_like\n Input data.\n copy : bool, optional\n Whether to create a copy of `x` (True) or to replace values\n in-place (False). The in-place operation only occurs if\n casting to an array does not require a copy.\n Default is True.\n nan : int, float, optional\n Value to be used to fill NaN values. If no value is passed\n then NaN values will be replaced with 0.0.\n posinf : int, float, optional\n Value to be used to fill positive infinity values. If no value is\n passed then positive infinity values will be replaced with a very\n large number.\n neginf : int, float, optional\n Value to be used to fill negative infinity values. If no value is\n passed then negative infinity values will be replaced with a very\n small (or negative) number.\n\n Returns\n -------\n out : ndarray\n `x`, with the non-finite values replaced. If `copy` is False, this may\n be `x` itself.\n\n See Also\n --------\n isinf : Shows which elements are positive or negative infinity.\n isneginf : Shows which elements are negative infinity.\n isposinf : Shows which elements are positive infinity.\n isnan : Shows which elements are Not a Number (NaN).\n isfinite : Shows which elements are finite (not NaN, not infinity)\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754). This means that Not a Number is not equivalent to infinity.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.nan_to_num(np.inf)\n 1.7976931348623157e+308\n >>> np.nan_to_num(-np.inf)\n -1.7976931348623157e+308\n >>> np.nan_to_num(np.nan)\n 0.0\n >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])\n >>> np.nan_to_num(x)\n array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary\n -1.28000000e+002, 1.28000000e+002])\n >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)\n array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,\n -1.2800000e+02, 1.2800000e+02])\n >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])\n array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary\n -1.28000000e+002, 1.28000000e+002])\n >>> np.nan_to_num(y)\n array([ 1.79769313e+308 +0.00000000e+000j, # may vary\n 0.00000000e+000 +0.00000000e+000j,\n 0.00000000e+000 +1.79769313e+308j])\n >>> np.nan_to_num(y, nan=111111, posinf=222222)\n array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])\n """\n x = _nx.array(x, subok=True, copy=copy)\n xtype = x.dtype.type\n\n isscalar = (x.ndim == 0)\n\n if not issubclass(xtype, _nx.inexact):\n return x[()] if isscalar else x\n\n iscomplex = issubclass(xtype, _nx.complexfloating)\n\n dest = (x.real, x.imag) if iscomplex else (x,)\n maxf, minf = _getmaxmin(x.real.dtype)\n if posinf is not None:\n maxf = posinf\n if neginf is not None:\n minf = neginf\n for d in dest:\n idx_nan = isnan(d)\n idx_posinf = isposinf(d)\n idx_neginf = isneginf(d)\n _nx.copyto(d, nan, where=idx_nan)\n _nx.copyto(d, maxf, where=idx_posinf)\n _nx.copyto(d, minf, where=idx_neginf)\n return x[()] if isscalar else x\n\n#-----------------------------------------------------------------------------\n\ndef _real_if_close_dispatcher(a, tol=None):\n return (a,)\n\n\n@array_function_dispatch(_real_if_close_dispatcher)\ndef real_if_close(a, tol=100):\n """\n If input is complex with all imaginary parts close to zero, return\n real parts.\n\n "Close to zero" is defined as `tol` * (machine epsilon of the type for\n `a`).\n\n Parameters\n ----------\n a : array_like\n Input array.\n tol : float\n Tolerance in machine epsilons for the complex part of the elements\n in the array. If the tolerance is <=1, then the absolute tolerance\n is used.\n\n Returns\n -------\n out : ndarray\n If `a` is real, the type of `a` is used for the output. If `a`\n has complex elements, the returned type is float.\n\n See Also\n --------\n real, imag, angle\n\n Notes\n -----\n Machine epsilon varies from machine to machine and between data types\n but Python floats on most platforms have a machine epsilon equal to\n 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print\n out the machine epsilon for floats.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.finfo(float).eps\n 2.2204460492503131e-16 # may vary\n\n >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)\n array([2.1, 5.2])\n >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)\n array([2.1+4.e-13j, 5.2 + 3e-15j])\n\n """\n a = asanyarray(a)\n type_ = a.dtype.type\n if not issubclass(type_, _nx.complexfloating):\n return a\n if tol > 1:\n f = getlimits.finfo(type_)\n tol = f.eps * tol\n if _nx.all(_nx.absolute(a.imag) < tol):\n a = a.real\n return a\n\n\n#-----------------------------------------------------------------------------\n\n_namefromtype = {'S1': 'character',\n '?': 'bool',\n 'b': 'signed char',\n 'B': 'unsigned char',\n 'h': 'short',\n 'H': 'unsigned short',\n 'i': 'integer',\n 'I': 'unsigned integer',\n 'l': 'long integer',\n 'L': 'unsigned long integer',\n 'q': 'long long integer',\n 'Q': 'unsigned long long integer',\n 'f': 'single precision',\n 'd': 'double precision',\n 'g': 'long precision',\n 'F': 'complex single precision',\n 'D': 'complex double precision',\n 'G': 'complex long double precision',\n 'S': 'string',\n 'U': 'unicode',\n 'V': 'void',\n 'O': 'object'\n }\n\n@set_module('numpy')\ndef typename(char):\n """\n Return a description for the given data type code.\n\n Parameters\n ----------\n char : str\n Data type code.\n\n Returns\n -------\n out : str\n Description of the input data type code.\n\n See Also\n --------\n dtype\n\n Examples\n --------\n >>> import numpy as np\n >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',\n ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']\n >>> for typechar in typechars:\n ... print(typechar, ' : ', np.typename(typechar))\n ...\n S1 : character\n ? : bool\n B : unsigned char\n D : complex double precision\n G : complex long double precision\n F : complex single precision\n I : unsigned integer\n H : unsigned short\n L : unsigned long integer\n O : object\n Q : unsigned long long integer\n S : string\n U : unicode\n V : void\n b : signed char\n d : double precision\n g : long precision\n f : single precision\n i : integer\n h : short\n l : long integer\n q : long long integer\n\n """\n return _namefromtype[char]\n\n#-----------------------------------------------------------------------------\n\n\n#determine the "minimum common type" for a group of arrays.\narray_type = [[_nx.float16, _nx.float32, _nx.float64, _nx.longdouble],\n [None, _nx.complex64, _nx.complex128, _nx.clongdouble]]\narray_precision = {_nx.float16: 0,\n _nx.float32: 1,\n _nx.float64: 2,\n _nx.longdouble: 3,\n _nx.complex64: 1,\n _nx.complex128: 2,\n _nx.clongdouble: 3}\n\n\ndef _common_type_dispatcher(*arrays):\n return arrays\n\n\n@array_function_dispatch(_common_type_dispatcher)\ndef common_type(*arrays):\n """\n Return a scalar type which is common to the input arrays.\n\n The return type will always be an inexact (i.e. floating point) scalar\n type, even if all the arrays are integer arrays. If one of the inputs is\n an integer array, the minimum precision type that is returned is a\n 64-bit floating point dtype.\n\n All input arrays except int64 and uint64 can be safely cast to the\n returned dtype without loss of information.\n\n Parameters\n ----------\n array1, array2, ... : ndarrays\n Input arrays.\n\n Returns\n -------\n out : data type code\n Data type code.\n\n See Also\n --------\n dtype, mintypecode\n\n Examples\n --------\n >>> np.common_type(np.arange(2, dtype=np.float32))\n <class 'numpy.float32'>\n >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))\n <class 'numpy.float64'>\n >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))\n <class 'numpy.complex128'>\n\n """\n is_complex = False\n precision = 0\n for a in arrays:\n t = a.dtype.type\n if iscomplexobj(a):\n is_complex = True\n if issubclass(t, _nx.integer):\n p = 2 # array_precision[_nx.double]\n else:\n p = array_precision.get(t)\n if p is None:\n raise TypeError("can't get common type for non-numeric array")\n precision = max(precision, p)\n if is_complex:\n return array_type[1][precision]\n else:\n return array_type[0][precision]\n | .venv\Lib\site-packages\numpy\lib\_type_check_impl.py | _type_check_impl.py | Python | 19,920 | 0.95 | 0.118741 | 0.008787 | awesome-app | 339 | 2023-09-25T09:04:01.000587 | MIT | false | 3a001879460df0bcb962e897676c70bd |
from collections.abc import Container, Iterable\nfrom typing import Any, Protocol, TypeAlias, overload, type_check_only\nfrom typing import Literal as L\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import TypeVar\n\nimport numpy as np\nfrom numpy._typing import (\n ArrayLike,\n NDArray,\n _16Bit,\n _32Bit,\n _64Bit,\n _ArrayLike,\n _NestedSequence,\n _ScalarLike_co,\n _SupportsArray,\n)\n\n__all__ = [\n "common_type",\n "imag",\n "iscomplex",\n "iscomplexobj",\n "isreal",\n "isrealobj",\n "mintypecode",\n "nan_to_num",\n "real",\n "real_if_close",\n "typename",\n]\n\n_T = TypeVar("_T")\n_T_co = TypeVar("_T_co", covariant=True)\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_ScalarT_co = TypeVar("_ScalarT_co", bound=np.generic, covariant=True)\n_RealT = TypeVar("_RealT", bound=np.floating | np.integer | np.bool)\n\n_FloatMax32: TypeAlias = np.float32 | np.float16\n_ComplexMax128: TypeAlias = np.complex128 | np.complex64\n_RealMax64: TypeAlias = np.float64 | np.float32 | np.float16 | np.integer\n_Real: TypeAlias = np.floating | np.integer\n_InexactMax32: TypeAlias = np.inexact[_32Bit] | np.float16\n_NumberMax64: TypeAlias = np.number[_64Bit] | np.number[_32Bit] | np.number[_16Bit] | np.integer\n\n@type_check_only\nclass _HasReal(Protocol[_T_co]):\n @property\n def real(self, /) -> _T_co: ...\n\n@type_check_only\nclass _HasImag(Protocol[_T_co]):\n @property\n def imag(self, /) -> _T_co: ...\n\n@type_check_only\nclass _HasDType(Protocol[_ScalarT_co]):\n @property\n def dtype(self, /) -> np.dtype[_ScalarT_co]: ...\n\n###\n\ndef mintypecode(typechars: Iterable[str | ArrayLike], typeset: str | Container[str] = "GDFgdf", default: str = "d") -> str: ...\n\n#\n@overload\ndef real(val: _HasReal[_T]) -> _T: ... # type: ignore[overload-overlap]\n@overload\ndef real(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...\n@overload\ndef real(val: ArrayLike) -> NDArray[Any]: ...\n\n#\n@overload\ndef imag(val: _HasImag[_T]) -> _T: ... # type: ignore[overload-overlap]\n@overload\ndef imag(val: _ArrayLike[_RealT]) -> NDArray[_RealT]: ...\n@overload\ndef imag(val: ArrayLike) -> NDArray[Any]: ...\n\n#\n@overload\ndef iscomplex(x: _ScalarLike_co) -> np.bool: ...\n@overload\ndef iscomplex(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...\n@overload\ndef iscomplex(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...\n\n#\n@overload\ndef isreal(x: _ScalarLike_co) -> np.bool: ...\n@overload\ndef isreal(x: NDArray[Any] | _NestedSequence[ArrayLike]) -> NDArray[np.bool]: ...\n@overload\ndef isreal(x: ArrayLike) -> np.bool | NDArray[np.bool]: ...\n\n#\ndef iscomplexobj(x: _HasDType[Any] | ArrayLike) -> bool: ...\ndef isrealobj(x: _HasDType[Any] | ArrayLike) -> bool: ...\n\n#\n@overload\ndef nan_to_num(\n x: _ScalarT,\n copy: bool = True,\n nan: float = 0.0,\n posinf: float | None = None,\n neginf: float | None = None,\n) -> _ScalarT: ...\n@overload\ndef nan_to_num(\n x: NDArray[_ScalarT] | _NestedSequence[_ArrayLike[_ScalarT]],\n copy: bool = True,\n nan: float = 0.0,\n posinf: float | None = None,\n neginf: float | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef nan_to_num(\n x: _SupportsArray[np.dtype[_ScalarT]],\n copy: bool = True,\n nan: float = 0.0,\n posinf: float | None = None,\n neginf: float | None = None,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef nan_to_num(\n x: _NestedSequence[ArrayLike],\n copy: bool = True,\n nan: float = 0.0,\n posinf: float | None = None,\n neginf: float | None = None,\n) -> NDArray[Incomplete]: ...\n@overload\ndef nan_to_num(\n x: ArrayLike,\n copy: bool = True,\n nan: float = 0.0,\n posinf: float | None = None,\n neginf: float | None = None,\n) -> Incomplete: ...\n\n# NOTE: The [overload-overlap] mypy error is a false positive\n@overload\ndef real_if_close(a: _ArrayLike[np.complex64], tol: float = 100) -> NDArray[np.float32 | np.complex64]: ... # type: ignore[overload-overlap]\n@overload\ndef real_if_close(a: _ArrayLike[np.complex128], tol: float = 100) -> NDArray[np.float64 | np.complex128]: ...\n@overload\ndef real_if_close(a: _ArrayLike[np.clongdouble], tol: float = 100) -> NDArray[np.longdouble | np.clongdouble]: ...\n@overload\ndef real_if_close(a: _ArrayLike[_RealT], tol: float = 100) -> NDArray[_RealT]: ...\n@overload\ndef real_if_close(a: ArrayLike, tol: float = 100) -> NDArray[Any]: ...\n\n#\n@overload\ndef typename(char: L['S1']) -> L['character']: ...\n@overload\ndef typename(char: L['?']) -> L['bool']: ...\n@overload\ndef typename(char: L['b']) -> L['signed char']: ...\n@overload\ndef typename(char: L['B']) -> L['unsigned char']: ...\n@overload\ndef typename(char: L['h']) -> L['short']: ...\n@overload\ndef typename(char: L['H']) -> L['unsigned short']: ...\n@overload\ndef typename(char: L['i']) -> L['integer']: ...\n@overload\ndef typename(char: L['I']) -> L['unsigned integer']: ...\n@overload\ndef typename(char: L['l']) -> L['long integer']: ...\n@overload\ndef typename(char: L['L']) -> L['unsigned long integer']: ...\n@overload\ndef typename(char: L['q']) -> L['long long integer']: ...\n@overload\ndef typename(char: L['Q']) -> L['unsigned long long integer']: ...\n@overload\ndef typename(char: L['f']) -> L['single precision']: ...\n@overload\ndef typename(char: L['d']) -> L['double precision']: ...\n@overload\ndef typename(char: L['g']) -> L['long precision']: ...\n@overload\ndef typename(char: L['F']) -> L['complex single precision']: ...\n@overload\ndef typename(char: L['D']) -> L['complex double precision']: ...\n@overload\ndef typename(char: L['G']) -> L['complex long double precision']: ...\n@overload\ndef typename(char: L['S']) -> L['string']: ...\n@overload\ndef typename(char: L['U']) -> L['unicode']: ...\n@overload\ndef typename(char: L['V']) -> L['void']: ...\n@overload\ndef typename(char: L['O']) -> L['object']: ...\n\n# NOTE: The [overload-overlap] mypy errors are false positives\n@overload\ndef common_type() -> type[np.float16]: ...\n@overload\ndef common_type(a0: _HasDType[np.float16], /, *ai: _HasDType[np.float16]) -> type[np.float16]: ... # type: ignore[overload-overlap]\n@overload\ndef common_type(a0: _HasDType[np.float32], /, *ai: _HasDType[_FloatMax32]) -> type[np.float32]: ... # type: ignore[overload-overlap]\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[np.float64 | np.integer],\n /,\n *ai: _HasDType[_RealMax64],\n) -> type[np.float64]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[np.longdouble],\n /,\n *ai: _HasDType[_Real],\n) -> type[np.longdouble]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[np.complex64],\n /,\n *ai: _HasDType[_InexactMax32],\n) -> type[np.complex64]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[np.complex128],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[np.clongdouble],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.clongdouble]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[_FloatMax32],\n array1: _HasDType[np.float32],\n /,\n *ai: _HasDType[_FloatMax32],\n) -> type[np.float32]: ...\n@overload\ndef common_type(\n a0: _HasDType[_RealMax64],\n array1: _HasDType[np.float64 | np.integer],\n /,\n *ai: _HasDType[_RealMax64],\n) -> type[np.float64]: ...\n@overload\ndef common_type(\n a0: _HasDType[_Real],\n array1: _HasDType[np.longdouble],\n /,\n *ai: _HasDType[_Real],\n) -> type[np.longdouble]: ...\n@overload\ndef common_type( # type: ignore[overload-overlap]\n a0: _HasDType[_InexactMax32],\n array1: _HasDType[np.complex64],\n /,\n *ai: _HasDType[_InexactMax32],\n) -> type[np.complex64]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.float64],\n array1: _HasDType[_ComplexMax128],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type(\n a0: _HasDType[_ComplexMax128],\n array1: _HasDType[np.float64],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type(\n a0: _HasDType[_NumberMax64],\n array1: _HasDType[np.complex128],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type(\n a0: _HasDType[_ComplexMax128],\n array1: _HasDType[np.complex128 | np.integer],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.complex128 | np.integer],\n array1: _HasDType[_ComplexMax128],\n /,\n *ai: _HasDType[_NumberMax64],\n) -> type[np.complex128]: ...\n@overload\ndef common_type(\n a0: _HasDType[_Real],\n /,\n *ai: _HasDType[_Real],\n) -> type[np.floating]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.number],\n array1: _HasDType[np.clongdouble],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.clongdouble]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.longdouble],\n array1: _HasDType[np.complexfloating],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.clongdouble]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.complexfloating],\n array1: _HasDType[np.longdouble],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.clongdouble]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.complexfloating],\n array1: _HasDType[np.number],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.complexfloating]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.number],\n array1: _HasDType[np.complexfloating],\n /,\n *ai: _HasDType[np.number],\n) -> type[np.complexfloating]: ...\n@overload\ndef common_type(\n a0: _HasDType[np.number],\n array1: _HasDType[np.number],\n /,\n *ai: _HasDType[np.number],\n) -> type[Any]: ...\n | .venv\Lib\site-packages\numpy\lib\_type_check_impl.pyi | _type_check_impl.pyi | Other | 10,063 | 0.95 | 0.22 | 0.093656 | vue-tools | 311 | 2025-02-14T21:25:16.446258 | Apache-2.0 | false | cabdd77f2c16c8c551e6b27e6a1fc34c |
"""\nModule of functions that are like ufuncs in acting on arrays and optionally\nstoring results in an output array.\n\n"""\n__all__ = ['fix', 'isneginf', 'isposinf']\n\nimport numpy._core.numeric as nx\nfrom numpy._core.overrides import array_function_dispatch\n\n\ndef _dispatcher(x, out=None):\n return (x, out)\n\n\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\ndef fix(x, out=None):\n """\n Round to nearest integer towards zero.\n\n Round an array of floats element-wise to nearest integer towards zero.\n The rounded values have the same data-type as the input.\n\n Parameters\n ----------\n x : array_like\n An array to be rounded\n out : ndarray, optional\n A location into which the result is stored. If provided, it must have\n a shape that the input broadcasts to. If not provided or None, a\n freshly-allocated array is returned.\n\n Returns\n -------\n out : ndarray of floats\n An array with the same dimensions and data-type as the input.\n If second argument is not supplied then a new array is returned\n with the rounded values.\n\n If a second argument is supplied the result is stored there.\n The return value ``out`` is then a reference to that array.\n\n See Also\n --------\n rint, trunc, floor, ceil\n around : Round to given number of decimals\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fix(3.14)\n 3.0\n >>> np.fix(3)\n 3\n >>> np.fix([2.1, 2.9, -2.1, -2.9])\n array([ 2., 2., -2., -2.])\n\n """\n # promote back to an array if flattened\n res = nx.asanyarray(nx.ceil(x, out=out))\n res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))\n\n # when no out argument is passed and no subclasses are involved, flatten\n # scalars\n if out is None and type(res) is nx.ndarray:\n res = res[()]\n return res\n\n\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\ndef isposinf(x, out=None):\n """\n Test element-wise for positive infinity, return result as bool array.\n\n Parameters\n ----------\n x : array_like\n The input array.\n out : array_like, optional\n A location into which the result is stored. If provided, it must have a\n shape that the input broadcasts to. If not provided or None, a\n freshly-allocated boolean array is returned.\n\n Returns\n -------\n out : ndarray\n A boolean array with the same dimensions as the input.\n If second argument is not supplied then a boolean array is returned\n with values True where the corresponding element of the input is\n positive infinity and values False where the element of the input is\n not positive infinity.\n\n If a second argument is supplied the result is stored there. If the\n type of that array is a numeric type the result is represented as zeros\n and ones, if the type is boolean then as False and True.\n The return value `out` is then a reference to that array.\n\n See Also\n --------\n isinf, isneginf, isfinite, isnan\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754).\n\n Errors result if the second argument is also supplied when x is a scalar\n input, if first and second arguments have different shapes, or if the\n first argument has complex values\n\n Examples\n --------\n >>> import numpy as np\n >>> np.isposinf(np.inf)\n True\n >>> np.isposinf(-np.inf)\n False\n >>> np.isposinf([-np.inf, 0., np.inf])\n array([False, False, True])\n\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([2, 2, 2])\n >>> np.isposinf(x, y)\n array([0, 0, 1])\n >>> y\n array([0, 0, 1])\n\n """\n is_inf = nx.isinf(x)\n try:\n signbit = ~nx.signbit(x)\n except TypeError as e:\n dtype = nx.asanyarray(x).dtype\n raise TypeError(f'This operation is not supported for {dtype} values '\n 'because it would be ambiguous.') from e\n else:\n return nx.logical_and(is_inf, signbit, out)\n\n\n@array_function_dispatch(_dispatcher, verify=False, module='numpy')\ndef isneginf(x, out=None):\n """\n Test element-wise for negative infinity, return result as bool array.\n\n Parameters\n ----------\n x : array_like\n The input array.\n out : array_like, optional\n A location into which the result is stored. If provided, it must have a\n shape that the input broadcasts to. If not provided or None, a\n freshly-allocated boolean array is returned.\n\n Returns\n -------\n out : ndarray\n A boolean array with the same dimensions as the input.\n If second argument is not supplied then a numpy boolean array is\n returned with values True where the corresponding element of the\n input is negative infinity and values False where the element of\n the input is not negative infinity.\n\n If a second argument is supplied the result is stored there. If the\n type of that array is a numeric type the result is represented as\n zeros and ones, if the type is boolean then as False and True. The\n return value `out` is then a reference to that array.\n\n See Also\n --------\n isinf, isposinf, isnan, isfinite\n\n Notes\n -----\n NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic\n (IEEE 754).\n\n Errors result if the second argument is also supplied when x is a scalar\n input, if first and second arguments have different shapes, or if the\n first argument has complex values.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.isneginf(-np.inf)\n True\n >>> np.isneginf(np.inf)\n False\n >>> np.isneginf([-np.inf, 0., np.inf])\n array([ True, False, False])\n\n >>> x = np.array([-np.inf, 0., np.inf])\n >>> y = np.array([2, 2, 2])\n >>> np.isneginf(x, y)\n array([1, 0, 0])\n >>> y\n array([1, 0, 0])\n\n """\n is_inf = nx.isinf(x)\n try:\n signbit = nx.signbit(x)\n except TypeError as e:\n dtype = nx.asanyarray(x).dtype\n raise TypeError(f'This operation is not supported for {dtype} values '\n 'because it would be ambiguous.') from e\n else:\n return nx.logical_and(is_inf, signbit, out)\n | .venv\Lib\site-packages\numpy\lib\_ufunclike_impl.py | _ufunclike_impl.py | Python | 6,516 | 0.95 | 0.115942 | 0.017544 | awesome-app | 391 | 2024-03-15T16:40:53.318417 | BSD-3-Clause | false | b4a89db96c9d017f9619a70931889ece |
from typing import Any, TypeVar, overload\n\nimport numpy as np\nfrom numpy import floating, object_\nfrom numpy._typing import (\n NDArray,\n _ArrayLikeFloat_co,\n _ArrayLikeObject_co,\n _FloatLike_co,\n)\n\n__all__ = ["fix", "isneginf", "isposinf"]\n\n_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])\n\n@overload\ndef fix( # type: ignore[misc]\n x: _FloatLike_co,\n out: None = ...,\n) -> floating: ...\n@overload\ndef fix(\n x: _ArrayLikeFloat_co,\n out: None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef fix(\n x: _ArrayLikeObject_co,\n out: None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef fix(\n x: _ArrayLikeFloat_co | _ArrayLikeObject_co,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef isposinf( # type: ignore[misc]\n x: _FloatLike_co,\n out: None = ...,\n) -> np.bool: ...\n@overload\ndef isposinf(\n x: _ArrayLikeFloat_co,\n out: None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef isposinf(\n x: _ArrayLikeFloat_co,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef isneginf( # type: ignore[misc]\n x: _FloatLike_co,\n out: None = ...,\n) -> np.bool: ...\n@overload\ndef isneginf(\n x: _ArrayLikeFloat_co,\n out: None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef isneginf(\n x: _ArrayLikeFloat_co,\n out: _ArrayT,\n) -> _ArrayT: ...\n | .venv\Lib\site-packages\numpy\lib\_ufunclike_impl.pyi | _ufunclike_impl.pyi | Other | 1,355 | 0.95 | 0.149254 | 0 | react-lib | 958 | 2024-10-24T08:35:50.921166 | BSD-3-Clause | false | 221f234dcfd0ec7a50245d650f22d0be |
"""\nContainer class for backward compatibility with NumArray.\n\nThe user_array.container class exists for backward compatibility with NumArray\nand is not meant to be used in new code. If you need to create an array\ncontainer class, we recommend either creating a class that wraps an ndarray\nor subclasses ndarray.\n\n"""\nfrom numpy._core import (\n absolute,\n add,\n arange,\n array,\n asarray,\n bitwise_and,\n bitwise_or,\n bitwise_xor,\n divide,\n equal,\n greater,\n greater_equal,\n invert,\n left_shift,\n less,\n less_equal,\n multiply,\n not_equal,\n power,\n remainder,\n reshape,\n right_shift,\n shape,\n sin,\n sqrt,\n subtract,\n transpose,\n)\nfrom numpy._core.overrides import set_module\n\n\n@set_module("numpy.lib.user_array")\nclass container:\n """\n container(data, dtype=None, copy=True)\n\n Standard container-class for easy multiple-inheritance.\n\n Methods\n -------\n copy\n byteswap\n astype\n\n """\n def __init__(self, data, dtype=None, copy=True):\n self.array = array(data, dtype, copy=copy)\n\n def __repr__(self):\n if self.ndim > 0:\n return self.__class__.__name__ + repr(self.array)[len("array"):]\n else:\n return self.__class__.__name__ + "(" + repr(self.array) + ")"\n\n def __array__(self, t=None):\n if t:\n return self.array.astype(t)\n return self.array\n\n # Array as sequence\n def __len__(self):\n return len(self.array)\n\n def __getitem__(self, index):\n return self._rc(self.array[index])\n\n def __setitem__(self, index, value):\n self.array[index] = asarray(value, self.dtype)\n\n def __abs__(self):\n return self._rc(absolute(self.array))\n\n def __neg__(self):\n return self._rc(-self.array)\n\n def __add__(self, other):\n return self._rc(self.array + asarray(other))\n\n __radd__ = __add__\n\n def __iadd__(self, other):\n add(self.array, other, self.array)\n return self\n\n def __sub__(self, other):\n return self._rc(self.array - asarray(other))\n\n def __rsub__(self, other):\n return self._rc(asarray(other) - self.array)\n\n def __isub__(self, other):\n subtract(self.array, other, self.array)\n return self\n\n def __mul__(self, other):\n return self._rc(multiply(self.array, asarray(other)))\n\n __rmul__ = __mul__\n\n def __imul__(self, other):\n multiply(self.array, other, self.array)\n return self\n\n def __mod__(self, other):\n return self._rc(remainder(self.array, other))\n\n def __rmod__(self, other):\n return self._rc(remainder(other, self.array))\n\n def __imod__(self, other):\n remainder(self.array, other, self.array)\n return self\n\n def __divmod__(self, other):\n return (self._rc(divide(self.array, other)),\n self._rc(remainder(self.array, other)))\n\n def __rdivmod__(self, other):\n return (self._rc(divide(other, self.array)),\n self._rc(remainder(other, self.array)))\n\n def __pow__(self, other):\n return self._rc(power(self.array, asarray(other)))\n\n def __rpow__(self, other):\n return self._rc(power(asarray(other), self.array))\n\n def __ipow__(self, other):\n power(self.array, other, self.array)\n return self\n\n def __lshift__(self, other):\n return self._rc(left_shift(self.array, other))\n\n def __rshift__(self, other):\n return self._rc(right_shift(self.array, other))\n\n def __rlshift__(self, other):\n return self._rc(left_shift(other, self.array))\n\n def __rrshift__(self, other):\n return self._rc(right_shift(other, self.array))\n\n def __ilshift__(self, other):\n left_shift(self.array, other, self.array)\n return self\n\n def __irshift__(self, other):\n right_shift(self.array, other, self.array)\n return self\n\n def __and__(self, other):\n return self._rc(bitwise_and(self.array, other))\n\n def __rand__(self, other):\n return self._rc(bitwise_and(other, self.array))\n\n def __iand__(self, other):\n bitwise_and(self.array, other, self.array)\n return self\n\n def __xor__(self, other):\n return self._rc(bitwise_xor(self.array, other))\n\n def __rxor__(self, other):\n return self._rc(bitwise_xor(other, self.array))\n\n def __ixor__(self, other):\n bitwise_xor(self.array, other, self.array)\n return self\n\n def __or__(self, other):\n return self._rc(bitwise_or(self.array, other))\n\n def __ror__(self, other):\n return self._rc(bitwise_or(other, self.array))\n\n def __ior__(self, other):\n bitwise_or(self.array, other, self.array)\n return self\n\n def __pos__(self):\n return self._rc(self.array)\n\n def __invert__(self):\n return self._rc(invert(self.array))\n\n def _scalarfunc(self, func):\n if self.ndim == 0:\n return func(self[0])\n else:\n raise TypeError(\n "only rank-0 arrays can be converted to Python scalars.")\n\n def __complex__(self):\n return self._scalarfunc(complex)\n\n def __float__(self):\n return self._scalarfunc(float)\n\n def __int__(self):\n return self._scalarfunc(int)\n\n def __hex__(self):\n return self._scalarfunc(hex)\n\n def __oct__(self):\n return self._scalarfunc(oct)\n\n def __lt__(self, other):\n return self._rc(less(self.array, other))\n\n def __le__(self, other):\n return self._rc(less_equal(self.array, other))\n\n def __eq__(self, other):\n return self._rc(equal(self.array, other))\n\n def __ne__(self, other):\n return self._rc(not_equal(self.array, other))\n\n def __gt__(self, other):\n return self._rc(greater(self.array, other))\n\n def __ge__(self, other):\n return self._rc(greater_equal(self.array, other))\n\n def copy(self):\n ""\n return self._rc(self.array.copy())\n\n def tobytes(self):\n ""\n return self.array.tobytes()\n\n def byteswap(self):\n ""\n return self._rc(self.array.byteswap())\n\n def astype(self, typecode):\n ""\n return self._rc(self.array.astype(typecode))\n\n def _rc(self, a):\n if len(shape(a)) == 0:\n return a\n else:\n return self.__class__(a)\n\n def __array_wrap__(self, *args):\n return self.__class__(args[0])\n\n def __setattr__(self, attr, value):\n if attr == 'array':\n object.__setattr__(self, attr, value)\n return\n try:\n self.array.__setattr__(attr, value)\n except AttributeError:\n object.__setattr__(self, attr, value)\n\n # Only called after other approaches fail.\n def __getattr__(self, attr):\n if (attr == 'array'):\n return object.__getattribute__(self, attr)\n return self.array.__getattribute__(attr)\n\n\n#############################################################\n# Test of class container\n#############################################################\nif __name__ == '__main__':\n temp = reshape(arange(10000), (100, 100))\n\n ua = container(temp)\n # new object created begin test\n print(dir(ua))\n print(shape(ua), ua.shape) # I have changed Numeric.py\n\n ua_small = ua[:3, :5]\n print(ua_small)\n # this did not change ua[0,0], which is not normal behavior\n ua_small[0, 0] = 10\n print(ua_small[0, 0], ua[0, 0])\n print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))\n print(less(ua_small, 103), type(less(ua_small, 103)))\n print(type(ua_small * reshape(arange(15), shape(ua_small))))\n print(reshape(ua_small, (5, 3)))\n print(transpose(ua_small))\n | .venv\Lib\site-packages\numpy\lib\_user_array_impl.py | _user_array_impl.py | Python | 7,996 | 0.95 | 0.26087 | 0.030837 | react-lib | 665 | 2025-06-26T11:44:02.400076 | Apache-2.0 | false | a0b4a629273c349a7037a1e9a155901a |
from types import EllipsisType\nfrom typing import Any, Generic, Self, SupportsIndex, TypeAlias, overload\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import TypeVar, override\n\nimport numpy as np\nimport numpy.typing as npt\nfrom numpy._typing import (\n _AnyShape,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeInt_co,\n _DTypeLike,\n)\n\n###\n\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])\n_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], default=_AnyShape, covariant=True)\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)\n\n_BoolArrayT = TypeVar("_BoolArrayT", bound=container[Any, np.dtype[np.bool]])\n_IntegralArrayT = TypeVar("_IntegralArrayT", bound=container[Any, np.dtype[np.bool | np.integer | np.object_]])\n_RealContainerT = TypeVar(\n "_RealContainerT",\n bound=container[Any, np.dtype[np.bool | np.integer | np.floating | np.timedelta64 | np.object_]],\n)\n_NumericContainerT = TypeVar("_NumericContainerT", bound=container[Any, np.dtype[np.number | np.timedelta64 | np.object_]])\n\n_ArrayInt_co: TypeAlias = npt.NDArray[np.integer | np.bool]\n\n_ToIndexSlice: TypeAlias = slice | EllipsisType | _ArrayInt_co | None\n_ToIndexSlices: TypeAlias = _ToIndexSlice | tuple[_ToIndexSlice, ...]\n_ToIndex: TypeAlias = SupportsIndex | _ToIndexSlice\n_ToIndices: TypeAlias = _ToIndex | tuple[_ToIndex, ...]\n\n###\n\nclass container(Generic[_ShapeT_co, _DTypeT_co]):\n array: np.ndarray[_ShapeT_co, _DTypeT_co]\n\n @overload\n def __init__(\n self,\n /,\n data: container[_ShapeT_co, _DTypeT_co] | np.ndarray[_ShapeT_co, _DTypeT_co],\n dtype: None = None,\n copy: bool = True,\n ) -> None: ...\n @overload\n def __init__(\n self: container[Any, np.dtype[_ScalarT]],\n /,\n data: _ArrayLike[_ScalarT],\n dtype: None = None,\n copy: bool = True,\n ) -> None: ...\n @overload\n def __init__(\n self: container[Any, np.dtype[_ScalarT]],\n /,\n data: npt.ArrayLike,\n dtype: _DTypeLike[_ScalarT],\n copy: bool = True,\n ) -> None: ...\n @overload\n def __init__(self, /, data: npt.ArrayLike, dtype: npt.DTypeLike | None = None, copy: bool = True) -> None: ...\n\n #\n def __complex__(self, /) -> complex: ...\n def __float__(self, /) -> float: ...\n def __int__(self, /) -> int: ...\n def __hex__(self, /) -> str: ...\n def __oct__(self, /) -> str: ...\n\n #\n @override\n def __eq__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]\n @override\n def __ne__(self, other: object, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]\n\n #\n def __lt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...\n def __le__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...\n def __gt__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...\n def __ge__(self, other: npt.ArrayLike, /) -> container[_ShapeT_co, np.dtype[np.bool]]: ...\n\n #\n def __len__(self, /) -> int: ...\n\n # keep in sync with np.ndarray\n @overload\n def __getitem__(self, key: _ArrayInt_co | tuple[_ArrayInt_co, ...], /) -> container[_ShapeT_co, _DTypeT_co]: ...\n @overload\n def __getitem__(self, key: _ToIndexSlices, /) -> container[_AnyShape, _DTypeT_co]: ...\n @overload\n def __getitem__(self, key: _ToIndices, /) -> Any: ...\n @overload\n def __getitem__(self: container[Any, np.dtype[np.void]], key: list[str], /) -> container[_ShapeT_co, np.dtype[np.void]]: ...\n @overload\n def __getitem__(self: container[Any, np.dtype[np.void]], key: str, /) -> container[_ShapeT_co, np.dtype]: ...\n\n # keep in sync with np.ndarray\n @overload\n def __setitem__(self, index: _ToIndices, value: object, /) -> None: ...\n @overload\n def __setitem__(self: container[Any, np.dtype[np.void]], key: str | list[str], value: object, /) -> None: ...\n\n # keep in sync with np.ndarray\n @overload\n def __abs__(self: container[_ShapeT, np.dtype[np.complex64]], /) -> container[_ShapeT, np.dtype[np.float32]]: ... # type: ignore[overload-overlap]\n @overload\n def __abs__(self: container[_ShapeT, np.dtype[np.complex128]], /) -> container[_ShapeT, np.dtype[np.float64]]: ...\n @overload\n def __abs__(self: container[_ShapeT, np.dtype[np.complex192]], /) -> container[_ShapeT, np.dtype[np.float96]]: ...\n @overload\n def __abs__(self: container[_ShapeT, np.dtype[np.complex256]], /) -> container[_ShapeT, np.dtype[np.float128]]: ...\n @overload\n def __abs__(self: _RealContainerT, /) -> _RealContainerT: ...\n\n #\n def __neg__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019\n def __pos__(self: _NumericContainerT, /) -> _NumericContainerT: ... # noqa: PYI019\n def __invert__(self: _IntegralArrayT, /) -> _IntegralArrayT: ... # noqa: PYI019\n\n # TODO(jorenham): complete these binary ops\n\n #\n def __add__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __radd__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __iadd__(self, other: npt.ArrayLike, /) -> Self: ...\n\n #\n def __sub__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __rsub__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __isub__(self, other: npt.ArrayLike, /) -> Self: ...\n\n #\n def __mul__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __rmul__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __imul__(self, other: npt.ArrayLike, /) -> Self: ...\n\n #\n def __mod__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __rmod__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __imod__(self, other: npt.ArrayLike, /) -> Self: ...\n\n #\n def __divmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...\n def __rdivmod__(self, other: npt.ArrayLike, /) -> tuple[Incomplete, Incomplete]: ...\n\n #\n def __pow__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __rpow__(self, other: npt.ArrayLike, /) -> Incomplete: ...\n def __ipow__(self, other: npt.ArrayLike, /) -> Self: ...\n\n #\n def __lshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...\n def __rlshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...\n def __ilshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...\n\n #\n def __rshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...\n def __rrshift__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.integer]]: ...\n def __irshift__(self, other: _ArrayLikeInt_co, /) -> Self: ...\n\n #\n @overload\n def __and__(\n self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /\n ) -> container[_AnyShape, np.dtype[np.bool]]: ...\n @overload\n def __and__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...\n __rand__ = __and__\n @overload\n def __iand__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...\n @overload\n def __iand__(self, other: _ArrayLikeInt_co, /) -> Self: ...\n\n #\n @overload\n def __xor__(\n self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /\n ) -> container[_AnyShape, np.dtype[np.bool]]: ...\n @overload\n def __xor__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...\n __rxor__ = __xor__\n @overload\n def __ixor__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...\n @overload\n def __ixor__(self, other: _ArrayLikeInt_co, /) -> Self: ...\n\n #\n @overload\n def __or__(\n self: container[Any, np.dtype[np.bool]], other: _ArrayLikeBool_co, /\n ) -> container[_AnyShape, np.dtype[np.bool]]: ...\n @overload\n def __or__(self, other: _ArrayLikeInt_co, /) -> container[_AnyShape, np.dtype[np.bool | np.integer]]: ...\n __ror__ = __or__\n @overload\n def __ior__(self: _BoolArrayT, other: _ArrayLikeBool_co, /) -> _BoolArrayT: ...\n @overload\n def __ior__(self, other: _ArrayLikeInt_co, /) -> Self: ...\n\n #\n @overload\n def __array__(self, /, t: None = None) -> np.ndarray[_ShapeT_co, _DTypeT_co]: ...\n @overload\n def __array__(self, /, t: _DTypeT) -> np.ndarray[_ShapeT_co, _DTypeT]: ...\n\n #\n @overload\n def __array_wrap__(self, arg0: npt.ArrayLike, /) -> container[_ShapeT_co, _DTypeT_co]: ...\n @overload\n def __array_wrap__(self, a: np.ndarray[_ShapeT, _DTypeT], c: Any = ..., s: Any = ..., /) -> container[_ShapeT, _DTypeT]: ...\n\n #\n def copy(self, /) -> Self: ...\n def tobytes(self, /) -> bytes: ...\n def byteswap(self, /) -> Self: ...\n def astype(self, /, typecode: _DTypeLike[_ScalarT]) -> container[_ShapeT_co, np.dtype[_ScalarT]]: ...\n | .venv\Lib\site-packages\numpy\lib\_user_array_impl.pyi | _user_array_impl.pyi | Other | 9,335 | 0.95 | 0.333333 | 0.130208 | node-utils | 591 | 2023-07-17T03:45:26.162038 | Apache-2.0 | false | 77a1260109c4005e9583752b7304bab5 |
import functools\nimport os\nimport platform\nimport sys\nimport textwrap\nimport types\nimport warnings\n\nimport numpy as np\nfrom numpy._core import ndarray\nfrom numpy._utils import set_module\n\n__all__ = [\n 'get_include', 'info', 'show_runtime'\n]\n\n\n@set_module('numpy')\ndef show_runtime():\n """\n Print information about various resources in the system\n including available intrinsic support and BLAS/LAPACK library\n in use\n\n .. versionadded:: 1.24.0\n\n See Also\n --------\n show_config : Show libraries in the system on which NumPy was built.\n\n Notes\n -----\n 1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_\n library if available.\n 2. SIMD related information is derived from ``__cpu_features__``,\n ``__cpu_baseline__`` and ``__cpu_dispatch__``\n\n """\n from pprint import pprint\n\n from numpy._core._multiarray_umath import (\n __cpu_baseline__,\n __cpu_dispatch__,\n __cpu_features__,\n )\n config_found = [{\n "numpy_version": np.__version__,\n "python": sys.version,\n "uname": platform.uname(),\n }]\n features_found, features_not_found = [], []\n for feature in __cpu_dispatch__:\n if __cpu_features__[feature]:\n features_found.append(feature)\n else:\n features_not_found.append(feature)\n config_found.append({\n "simd_extensions": {\n "baseline": __cpu_baseline__,\n "found": features_found,\n "not_found": features_not_found\n }\n })\n try:\n from threadpoolctl import threadpool_info\n config_found.extend(threadpool_info())\n except ImportError:\n print("WARNING: `threadpoolctl` not found in system!"\n " Install it by `pip install threadpoolctl`."\n " Once installed, try `np.show_runtime` again"\n " for more detailed build information")\n pprint(config_found)\n\n\n@set_module('numpy')\ndef get_include():\n """\n Return the directory that contains the NumPy \\*.h header files.\n\n Extension modules that need to compile against NumPy may need to use this\n function to locate the appropriate include directory.\n\n Notes\n -----\n When using ``setuptools``, for example in ``setup.py``::\n\n import numpy as np\n ...\n Extension('extension_name', ...\n include_dirs=[np.get_include()])\n ...\n\n Note that a CLI tool ``numpy-config`` was introduced in NumPy 2.0, using\n that is likely preferred for build systems other than ``setuptools``::\n\n $ numpy-config --cflags\n -I/path/to/site-packages/numpy/_core/include\n\n # Or rely on pkg-config:\n $ export PKG_CONFIG_PATH=$(numpy-config --pkgconfigdir)\n $ pkg-config --cflags\n -I/path/to/site-packages/numpy/_core/include\n\n Examples\n --------\n >>> np.get_include()\n '.../site-packages/numpy/core/include' # may vary\n\n """\n import numpy\n if numpy.show_config is None:\n # running from numpy source directory\n d = os.path.join(os.path.dirname(numpy.__file__), '_core', 'include')\n else:\n # using installed numpy core headers\n import numpy._core as _core\n d = os.path.join(os.path.dirname(_core.__file__), 'include')\n return d\n\n\nclass _Deprecate:\n """\n Decorator class to deprecate old functions.\n\n Refer to `deprecate` for details.\n\n See Also\n --------\n deprecate\n\n """\n\n def __init__(self, old_name=None, new_name=None, message=None):\n self.old_name = old_name\n self.new_name = new_name\n self.message = message\n\n def __call__(self, func, *args, **kwargs):\n """\n Decorator call. Refer to ``decorate``.\n\n """\n old_name = self.old_name\n new_name = self.new_name\n message = self.message\n\n if old_name is None:\n old_name = func.__name__\n if new_name is None:\n depdoc = f"`{old_name}` is deprecated!"\n else:\n depdoc = f"`{old_name}` is deprecated, use `{new_name}` instead!"\n\n if message is not None:\n depdoc += "\n" + message\n\n @functools.wraps(func)\n def newfunc(*args, **kwds):\n warnings.warn(depdoc, DeprecationWarning, stacklevel=2)\n return func(*args, **kwds)\n\n newfunc.__name__ = old_name\n doc = func.__doc__\n if doc is None:\n doc = depdoc\n else:\n lines = doc.expandtabs().split('\n')\n indent = _get_indent(lines[1:])\n if lines[0].lstrip():\n # Indent the original first line to let inspect.cleandoc()\n # dedent the docstring despite the deprecation notice.\n doc = indent * ' ' + doc\n else:\n # Remove the same leading blank lines as cleandoc() would.\n skip = len(lines[0]) + 1\n for line in lines[1:]:\n if len(line) > indent:\n break\n skip += len(line) + 1\n doc = doc[skip:]\n depdoc = textwrap.indent(depdoc, ' ' * indent)\n doc = f'{depdoc}\n\n{doc}'\n newfunc.__doc__ = doc\n\n return newfunc\n\n\ndef _get_indent(lines):\n """\n Determines the leading whitespace that could be removed from all the lines.\n """\n indent = sys.maxsize\n for line in lines:\n content = len(line.lstrip())\n if content:\n indent = min(indent, len(line) - content)\n if indent == sys.maxsize:\n indent = 0\n return indent\n\n\ndef deprecate(*args, **kwargs):\n """\n Issues a DeprecationWarning, adds warning to `old_name`'s\n docstring, rebinds ``old_name.__name__`` and returns the new\n function object.\n\n This function may also be used as a decorator.\n\n .. deprecated:: 2.0\n Use `~warnings.warn` with :exc:`DeprecationWarning` instead.\n\n Parameters\n ----------\n func : function\n The function to be deprecated.\n old_name : str, optional\n The name of the function to be deprecated. Default is None, in\n which case the name of `func` is used.\n new_name : str, optional\n The new name for the function. Default is None, in which case the\n deprecation message is that `old_name` is deprecated. If given, the\n deprecation message is that `old_name` is deprecated and `new_name`\n should be used instead.\n message : str, optional\n Additional explanation of the deprecation. Displayed in the\n docstring after the warning.\n\n Returns\n -------\n old_func : function\n The deprecated function.\n\n Examples\n --------\n Note that ``olduint`` returns a value after printing Deprecation\n Warning:\n\n >>> olduint = np.lib.utils.deprecate(np.uint)\n DeprecationWarning: `uint64` is deprecated! # may vary\n >>> olduint(6)\n 6\n\n """\n # Deprecate may be run as a function or as a decorator\n # If run as a function, we initialise the decorator class\n # and execute its __call__ method.\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`deprecate` is deprecated, "\n "use `warn` with `DeprecationWarning` instead. "\n "(deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n if args:\n fn = args[0]\n args = args[1:]\n\n return _Deprecate(*args, **kwargs)(fn)\n else:\n return _Deprecate(*args, **kwargs)\n\n\ndef deprecate_with_doc(msg):\n """\n Deprecates a function and includes the deprecation in its docstring.\n\n .. deprecated:: 2.0\n Use `~warnings.warn` with :exc:`DeprecationWarning` instead.\n\n This function is used as a decorator. It returns an object that can be\n used to issue a DeprecationWarning, by passing the to-be decorated\n function as argument, this adds warning to the to-be decorated function's\n docstring and returns the new function object.\n\n See Also\n --------\n deprecate : Decorate a function such that it issues a\n :exc:`DeprecationWarning`\n\n Parameters\n ----------\n msg : str\n Additional explanation of the deprecation. Displayed in the\n docstring after the warning.\n\n Returns\n -------\n obj : object\n\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`deprecate` is deprecated, "\n "use `warn` with `DeprecationWarning` instead. "\n "(deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n return _Deprecate(message=msg)\n\n\n#-----------------------------------------------------------------------------\n\n\n# NOTE: pydoc defines a help function which works similarly to this\n# except it uses a pager to take over the screen.\n\n# combine name and arguments and split to multiple lines of width\n# characters. End lines on a comma and begin argument list indented with\n# the rest of the arguments.\ndef _split_line(name, arguments, width):\n firstwidth = len(name)\n k = firstwidth\n newstr = name\n sepstr = ", "\n arglist = arguments.split(sepstr)\n for argument in arglist:\n if k == firstwidth:\n addstr = ""\n else:\n addstr = sepstr\n k = k + len(argument) + len(addstr)\n if k > width:\n k = firstwidth + 1 + len(argument)\n newstr = newstr + ",\n" + " " * (firstwidth + 2) + argument\n else:\n newstr = newstr + addstr + argument\n return newstr\n\n\n_namedict = None\n_dictlist = None\n\n# Traverse all module directories underneath globals\n# to see if something is defined\ndef _makenamedict(module='numpy'):\n module = __import__(module, globals(), locals(), [])\n thedict = {module.__name__: module.__dict__}\n dictlist = [module.__name__]\n totraverse = [module.__dict__]\n while True:\n if len(totraverse) == 0:\n break\n thisdict = totraverse.pop(0)\n for x in thisdict.keys():\n if isinstance(thisdict[x], types.ModuleType):\n modname = thisdict[x].__name__\n if modname not in dictlist:\n moddict = thisdict[x].__dict__\n dictlist.append(modname)\n totraverse.append(moddict)\n thedict[modname] = moddict\n return thedict, dictlist\n\n\ndef _info(obj, output=None):\n """Provide information about ndarray obj.\n\n Parameters\n ----------\n obj : ndarray\n Must be ndarray, not checked.\n output\n Where printed output goes.\n\n Notes\n -----\n Copied over from the numarray module prior to its removal.\n Adapted somewhat as only numpy is an option now.\n\n Called by info.\n\n """\n extra = ""\n tic = ""\n bp = lambda x: x\n cls = getattr(obj, '__class__', type(obj))\n nm = getattr(cls, '__name__', cls)\n strides = obj.strides\n endian = obj.dtype.byteorder\n\n if output is None:\n output = sys.stdout\n\n print("class: ", nm, file=output)\n print("shape: ", obj.shape, file=output)\n print("strides: ", strides, file=output)\n print("itemsize: ", obj.itemsize, file=output)\n print("aligned: ", bp(obj.flags.aligned), file=output)\n print("contiguous: ", bp(obj.flags.contiguous), file=output)\n print("fortran: ", obj.flags.fortran, file=output)\n print(\n f"data pointer: {hex(obj.ctypes._as_parameter_.value)}{extra}",\n file=output\n )\n print("byteorder: ", end=' ', file=output)\n if endian in ['|', '=']:\n print(f"{tic}{sys.byteorder}{tic}", file=output)\n byteswap = False\n elif endian == '>':\n print(f"{tic}big{tic}", file=output)\n byteswap = sys.byteorder != "big"\n else:\n print(f"{tic}little{tic}", file=output)\n byteswap = sys.byteorder != "little"\n print("byteswap: ", bp(byteswap), file=output)\n print(f"type: {obj.dtype}", file=output)\n\n\n@set_module('numpy')\ndef info(object=None, maxwidth=76, output=None, toplevel='numpy'):\n """\n Get help information for an array, function, class, or module.\n\n Parameters\n ----------\n object : object or str, optional\n Input object or name to get information about. If `object` is\n an `ndarray` instance, information about the array is printed.\n If `object` is a numpy object, its docstring is given. If it is\n a string, available modules are searched for matching objects.\n If None, information about `info` itself is returned.\n maxwidth : int, optional\n Printing width.\n output : file like object, optional\n File like object that the output is written to, default is\n ``None``, in which case ``sys.stdout`` will be used.\n The object has to be opened in 'w' or 'a' mode.\n toplevel : str, optional\n Start search at this level.\n\n Notes\n -----\n When used interactively with an object, ``np.info(obj)`` is equivalent\n to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython\n prompt.\n\n Examples\n --------\n >>> np.info(np.polyval) # doctest: +SKIP\n polyval(p, x)\n Evaluate the polynomial p at x.\n ...\n\n When using a string for `object` it is possible to get multiple results.\n\n >>> np.info('fft') # doctest: +SKIP\n *** Found in numpy ***\n Core FFT routines\n ...\n *** Found in numpy.fft ***\n fft(a, n=None, axis=-1)\n ...\n *** Repeat reference found in numpy.fft.fftpack ***\n *** Total of 3 references found. ***\n\n When the argument is an array, information about the array is printed.\n\n >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)\n >>> np.info(a)\n class: ndarray\n shape: (2, 3)\n strides: (24, 8)\n itemsize: 8\n aligned: True\n contiguous: True\n fortran: False\n data pointer: 0x562b6e0d2860 # may vary\n byteorder: little\n byteswap: False\n type: complex64\n\n """\n global _namedict, _dictlist\n # Local import to speed up numpy's import time.\n import inspect\n import pydoc\n\n if (hasattr(object, '_ppimport_importer') or\n hasattr(object, '_ppimport_module')):\n object = object._ppimport_module\n elif hasattr(object, '_ppimport_attr'):\n object = object._ppimport_attr\n\n if output is None:\n output = sys.stdout\n\n if object is None:\n info(info)\n elif isinstance(object, ndarray):\n _info(object, output=output)\n elif isinstance(object, str):\n if _namedict is None:\n _namedict, _dictlist = _makenamedict(toplevel)\n numfound = 0\n objlist = []\n for namestr in _dictlist:\n try:\n obj = _namedict[namestr][object]\n if id(obj) in objlist:\n print(f"\n *** Repeat reference found in {namestr} *** ",\n file=output\n )\n else:\n objlist.append(id(obj))\n print(f" *** Found in {namestr} ***", file=output)\n info(obj)\n print("-" * maxwidth, file=output)\n numfound += 1\n except KeyError:\n pass\n if numfound == 0:\n print(f"Help for {object} not found.", file=output)\n else:\n print("\n "\n "*** Total of %d references found. ***" % numfound,\n file=output\n )\n\n elif inspect.isfunction(object) or inspect.ismethod(object):\n name = object.__name__\n try:\n arguments = str(inspect.signature(object))\n except Exception:\n arguments = "()"\n\n if len(name + arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(" " + argstr + "\n", file=output)\n print(inspect.getdoc(object), file=output)\n\n elif inspect.isclass(object):\n name = object.__name__\n try:\n arguments = str(inspect.signature(object))\n except Exception:\n arguments = "()"\n\n if len(name + arguments) > maxwidth:\n argstr = _split_line(name, arguments, maxwidth)\n else:\n argstr = name + arguments\n\n print(" " + argstr + "\n", file=output)\n doc1 = inspect.getdoc(object)\n if doc1 is None:\n if hasattr(object, '__init__'):\n print(inspect.getdoc(object.__init__), file=output)\n else:\n print(inspect.getdoc(object), file=output)\n\n methods = pydoc.allmethods(object)\n\n public_methods = [meth for meth in methods if meth[0] != '_']\n if public_methods:\n print("\n\nMethods:\n", file=output)\n for meth in public_methods:\n thisobj = getattr(object, meth, None)\n if thisobj is not None:\n methstr, other = pydoc.splitdoc(\n inspect.getdoc(thisobj) or "None"\n )\n print(f" {meth} -- {methstr}", file=output)\n\n elif hasattr(object, '__doc__'):\n print(inspect.getdoc(object), file=output)\n\n\ndef safe_eval(source):\n """\n Protected string evaluation.\n\n .. deprecated:: 2.0\n Use `ast.literal_eval` instead.\n\n Evaluate a string containing a Python literal expression without\n allowing the execution of arbitrary non-literal code.\n\n .. warning::\n\n This function is identical to :py:meth:`ast.literal_eval` and\n has the same security implications. It may not always be safe\n to evaluate large input strings.\n\n Parameters\n ----------\n source : str\n The string to evaluate.\n\n Returns\n -------\n obj : object\n The result of evaluating `source`.\n\n Raises\n ------\n SyntaxError\n If the code has invalid Python syntax, or if it contains\n non-literal code.\n\n Examples\n --------\n >>> np.safe_eval('1')\n 1\n >>> np.safe_eval('[1, 2, 3]')\n [1, 2, 3]\n >>> np.safe_eval('{"foo": ("bar", 10.0)}')\n {'foo': ('bar', 10.0)}\n\n >>> np.safe_eval('import os')\n Traceback (most recent call last):\n ...\n SyntaxError: invalid syntax\n\n >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')\n Traceback (most recent call last):\n ...\n ValueError: malformed node or string: <_ast.Call object at 0x...>\n\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`safe_eval` is deprecated. Use `ast.literal_eval` instead. "\n "Be aware of security implications, such as memory exhaustion "\n "based attacks (deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n # Local import to speed up numpy's import time.\n import ast\n return ast.literal_eval(source)\n\n\ndef _median_nancheck(data, result, axis):\n """\n Utility function to check median result from data for NaN values at the end\n and return NaN in that case. Input result can also be a MaskedArray.\n\n Parameters\n ----------\n data : array\n Sorted input data to median function\n result : Array or MaskedArray\n Result of median function.\n axis : int\n Axis along which the median was computed.\n\n Returns\n -------\n result : scalar or ndarray\n Median or NaN in axes which contained NaN in the input. If the input\n was an array, NaN will be inserted in-place. If a scalar, either the\n input itself or a scalar NaN.\n """\n if data.size == 0:\n return result\n potential_nans = data.take(-1, axis=axis)\n n = np.isnan(potential_nans)\n # masked NaN values are ok, although for masked the copyto may fail for\n # unmasked ones (this was always broken) when the result is a scalar.\n if np.ma.isMaskedArray(n):\n n = n.filled(False)\n\n if not n.any():\n return result\n\n # Without given output, it is possible that the current result is a\n # numpy scalar, which is not writeable. If so, just return nan.\n if isinstance(result, np.generic):\n return potential_nans\n\n # Otherwise copy NaNs (if there are any)\n np.copyto(result, potential_nans, where=n)\n return result\n\ndef _opt_info():\n """\n Returns a string containing the CPU features supported\n by the current build.\n\n The format of the string can be explained as follows:\n - Dispatched features supported by the running machine end with `*`.\n - Dispatched features not supported by the running machine\n end with `?`.\n - Remaining features represent the baseline.\n\n Returns:\n str: A formatted string indicating the supported CPU features.\n """\n from numpy._core._multiarray_umath import (\n __cpu_baseline__,\n __cpu_dispatch__,\n __cpu_features__,\n )\n\n if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:\n return ''\n\n enabled_features = ' '.join(__cpu_baseline__)\n for feature in __cpu_dispatch__:\n if __cpu_features__[feature]:\n enabled_features += f" {feature}*"\n else:\n enabled_features += f" {feature}?"\n\n return enabled_features\n\ndef drop_metadata(dtype, /):\n """\n Returns the dtype unchanged if it contained no metadata or a copy of the\n dtype if it (or any of its structure dtypes) contained metadata.\n\n This utility is used by `np.save` and `np.savez` to drop metadata before\n saving.\n\n .. note::\n\n Due to its limitation this function may move to a more appropriate\n home or change in the future and is considered semi-public API only.\n\n .. warning::\n\n This function does not preserve more strange things like record dtypes\n and user dtypes may simply return the wrong thing. If you need to be\n sure about the latter, check the result with:\n ``np.can_cast(new_dtype, dtype, casting="no")``.\n\n """\n if dtype.fields is not None:\n found_metadata = dtype.metadata is not None\n\n names = []\n formats = []\n offsets = []\n titles = []\n for name, field in dtype.fields.items():\n field_dt = drop_metadata(field[0])\n if field_dt is not field[0]:\n found_metadata = True\n\n names.append(name)\n formats.append(field_dt)\n offsets.append(field[1])\n titles.append(None if len(field) < 3 else field[2])\n\n if not found_metadata:\n return dtype\n\n structure = {\n 'names': names, 'formats': formats, 'offsets': offsets, 'titles': titles,\n 'itemsize': dtype.itemsize}\n\n # NOTE: Could pass (dtype.type, structure) to preserve record dtypes...\n return np.dtype(structure, align=dtype.isalignedstruct)\n elif dtype.subdtype is not None:\n # subarray dtype\n subdtype, shape = dtype.subdtype\n new_subdtype = drop_metadata(subdtype)\n if dtype.metadata is None and new_subdtype is subdtype:\n return dtype\n\n return np.dtype((new_subdtype, shape))\n else:\n # Normal unstructured dtype\n if dtype.metadata is None:\n return dtype\n # Note that `dt.str` doesn't round-trip e.g. for user-dtypes.\n return np.dtype(dtype.str)\n | .venv\Lib\site-packages\numpy\lib\_utils_impl.py | _utils_impl.py | Python | 24,125 | 0.95 | 0.160462 | 0.054348 | react-lib | 962 | 2024-04-12T23:11:14.212658 | Apache-2.0 | false | a2639d4fce3d62c05da8911108369cc8 |
from _typeshed import SupportsWrite\n\nfrom numpy._typing import DTypeLike\n\n__all__ = ["get_include", "info", "show_runtime"]\n\ndef get_include() -> str: ...\ndef show_runtime() -> None: ...\ndef info(object: object = ..., maxwidth: int = ..., output: SupportsWrite[str] | None = ..., toplevel: str = ...) -> None: ...\ndef drop_metadata(dtype: DTypeLike, /) -> DTypeLike: ...\n | .venv\Lib\site-packages\numpy\lib\_utils_impl.pyi | _utils_impl.pyi | Other | 381 | 0.85 | 0.4 | 0 | python-kit | 221 | 2025-05-03T00:20:56.978760 | Apache-2.0 | false | 27f15d2a9ae7313a19ed0b20c844ab27 |
"""Utility to compare (NumPy) version strings.\n\nThe NumpyVersion class allows properly comparing numpy version strings.\nThe LooseVersion and StrictVersion classes that distutils provides don't\nwork; they don't recognize anything like alpha/beta/rc/dev versions.\n\n"""\nimport re\n\n__all__ = ['NumpyVersion']\n\n\nclass NumpyVersion:\n """Parse and compare numpy version strings.\n\n NumPy has the following versioning scheme (numbers given are examples; they\n can be > 9 in principle):\n\n - Released version: '1.8.0', '1.8.1', etc.\n - Alpha: '1.8.0a1', '1.8.0a2', etc.\n - Beta: '1.8.0b1', '1.8.0b2', etc.\n - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.\n - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)\n - Development versions after a1: '1.8.0a1.dev-f1234afa',\n '1.8.0b2.dev-f1234afa',\n '1.8.1rc1.dev-f1234afa', etc.\n - Development versions (no git hash available): '1.8.0.dev-Unknown'\n\n Comparing needs to be done against a valid version string or other\n `NumpyVersion` instance. Note that all development versions of the same\n (pre-)release compare equal.\n\n Parameters\n ----------\n vstring : str\n NumPy version string (``np.__version__``).\n\n Examples\n --------\n >>> from numpy.lib import NumpyVersion\n >>> if NumpyVersion(np.__version__) < '1.7.0':\n ... print('skip')\n >>> # skip\n\n >>> NumpyVersion('1.7') # raises ValueError, add ".0"\n Traceback (most recent call last):\n ...\n ValueError: Not a valid numpy version string\n\n """\n\n __module__ = "numpy.lib"\n\n def __init__(self, vstring):\n self.vstring = vstring\n ver_main = re.match(r'\d+\.\d+\.\d+', vstring)\n if not ver_main:\n raise ValueError("Not a valid numpy version string")\n\n self.version = ver_main.group()\n self.major, self.minor, self.bugfix = [int(x) for x in\n self.version.split('.')]\n if len(vstring) == ver_main.end():\n self.pre_release = 'final'\n else:\n alpha = re.match(r'a\d', vstring[ver_main.end():])\n beta = re.match(r'b\d', vstring[ver_main.end():])\n rc = re.match(r'rc\d', vstring[ver_main.end():])\n pre_rel = [m for m in [alpha, beta, rc] if m is not None]\n if pre_rel:\n self.pre_release = pre_rel[0].group()\n else:\n self.pre_release = ''\n\n self.is_devversion = bool(re.search(r'.dev', vstring))\n\n def _compare_version(self, other):\n """Compare major.minor.bugfix"""\n if self.major == other.major:\n if self.minor == other.minor:\n if self.bugfix == other.bugfix:\n vercmp = 0\n elif self.bugfix > other.bugfix:\n vercmp = 1\n else:\n vercmp = -1\n elif self.minor > other.minor:\n vercmp = 1\n else:\n vercmp = -1\n elif self.major > other.major:\n vercmp = 1\n else:\n vercmp = -1\n\n return vercmp\n\n def _compare_pre_release(self, other):\n """Compare alpha/beta/rc/final."""\n if self.pre_release == other.pre_release:\n vercmp = 0\n elif self.pre_release == 'final':\n vercmp = 1\n elif other.pre_release == 'final':\n vercmp = -1\n elif self.pre_release > other.pre_release:\n vercmp = 1\n else:\n vercmp = -1\n\n return vercmp\n\n def _compare(self, other):\n if not isinstance(other, (str, NumpyVersion)):\n raise ValueError("Invalid object to compare with NumpyVersion.")\n\n if isinstance(other, str):\n other = NumpyVersion(other)\n\n vercmp = self._compare_version(other)\n if vercmp == 0:\n # Same x.y.z version, check for alpha/beta/rc\n vercmp = self._compare_pre_release(other)\n if vercmp == 0:\n # Same version and same pre-release, check if dev version\n if self.is_devversion is other.is_devversion:\n vercmp = 0\n elif self.is_devversion:\n vercmp = -1\n else:\n vercmp = 1\n\n return vercmp\n\n def __lt__(self, other):\n return self._compare(other) < 0\n\n def __le__(self, other):\n return self._compare(other) <= 0\n\n def __eq__(self, other):\n return self._compare(other) == 0\n\n def __ne__(self, other):\n return self._compare(other) != 0\n\n def __gt__(self, other):\n return self._compare(other) > 0\n\n def __ge__(self, other):\n return self._compare(other) >= 0\n\n def __repr__(self):\n return f"NumpyVersion({self.vstring})"\n | .venv\Lib\site-packages\numpy\lib\_version.py | _version.py | Python | 5,005 | 0.95 | 0.201299 | 0.01626 | vue-tools | 530 | 2025-03-22T21:03:37.026217 | GPL-3.0 | false | 72f9fe3d8527784a9706ab6463d04fda |
__all__ = ["NumpyVersion"]\n\nclass NumpyVersion:\n vstring: str\n version: str\n major: int\n minor: int\n bugfix: int\n pre_release: str\n is_devversion: bool\n def __init__(self, vstring: str) -> None: ...\n def __lt__(self, other: str | NumpyVersion) -> bool: ...\n def __le__(self, other: str | NumpyVersion) -> bool: ...\n def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]\n def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]\n def __gt__(self, other: str | NumpyVersion) -> bool: ...\n def __ge__(self, other: str | NumpyVersion) -> bool: ...\n | .venv\Lib\site-packages\numpy\lib\_version.pyi | _version.pyi | Other | 658 | 0.95 | 0.470588 | 0 | vue-tools | 288 | 2025-07-08T01:56:37.841622 | BSD-3-Clause | false | 8bce29b1dc8fa2ece35157afdadad88a |
"""\n``numpy.lib`` is mostly a space for implementing functions that don't\nbelong in core or in another NumPy submodule with a clear purpose\n(e.g. ``random``, ``fft``, ``linalg``, ``ma``).\n\n``numpy.lib``'s private submodules contain basic functions that are used by\nother public modules and are useful to have in the main name-space.\n\n"""\n\n# Public submodules\n# Note: recfunctions is public, but not imported\nfrom numpy._core._multiarray_umath import add_docstring, tracemalloc_domain\nfrom numpy._core.function_base import add_newdoc\n\n# Private submodules\n# load module names. See https://github.com/networkx/networkx/issues/5838\nfrom . import (\n _arraypad_impl,\n _arraysetops_impl,\n _arrayterator_impl,\n _function_base_impl,\n _histograms_impl,\n _index_tricks_impl,\n _nanfunctions_impl,\n _npyio_impl,\n _polynomial_impl,\n _shape_base_impl,\n _stride_tricks_impl,\n _twodim_base_impl,\n _type_check_impl,\n _ufunclike_impl,\n _utils_impl,\n _version,\n array_utils,\n format,\n introspect,\n mixins,\n npyio,\n scimath,\n stride_tricks,\n)\n\n# numpy.lib namespace members\nfrom ._arrayterator_impl import Arrayterator\nfrom ._version import NumpyVersion\n\n__all__ = [\n "Arrayterator", "add_docstring", "add_newdoc", "array_utils",\n "format", "introspect", "mixins", "NumpyVersion", "npyio", "scimath",\n "stride_tricks", "tracemalloc_domain",\n]\n\nadd_newdoc.__module__ = "numpy.lib"\n\nfrom numpy._pytesttester import PytestTester\n\ntest = PytestTester(__name__)\ndel PytestTester\n\ndef __getattr__(attr):\n # Warn for deprecated/removed aliases\n import math\n import warnings\n\n if attr == "math":\n warnings.warn(\n "`np.lib.math` is a deprecated alias for the standard library "\n "`math` module (Deprecated Numpy 1.25). Replace usages of "\n "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)\n return math\n elif attr == "emath":\n raise AttributeError(\n "numpy.lib.emath was an alias for emath module that was removed "\n "in NumPy 2.0. Replace usages of numpy.lib.emath with "\n "numpy.emath.",\n name=None\n )\n elif attr in (\n "histograms", "type_check", "nanfunctions", "function_base",\n "arraypad", "arraysetops", "ufunclike", "utils", "twodim_base",\n "shape_base", "polynomial", "index_tricks",\n ):\n raise AttributeError(\n f"numpy.lib.{attr} is now private. If you are using a public "\n "function, it should be available in the main numpy namespace, "\n "otherwise check the NumPy 2.0 migration guide.",\n name=None\n )\n elif attr == "arrayterator":\n raise AttributeError(\n "numpy.lib.arrayterator submodule is now private. To access "\n "Arrayterator class use numpy.lib.Arrayterator.",\n name=None\n )\n else:\n raise AttributeError(f"module {__name__!r} has no attribute {attr!r}")\n | .venv\Lib\site-packages\numpy\lib\__init__.py | __init__.py | Python | 3,101 | 0.95 | 0.082474 | 0.069767 | vue-tools | 383 | 2024-05-14T08:04:35.447261 | MIT | false | 8fe8b00b65c229c3505603235511f601 |
from numpy._core.function_base import add_newdoc\nfrom numpy._core.multiarray import add_docstring, tracemalloc_domain\n\n# all submodules of `lib` are accessible at runtime through `__getattr__`,\n# so we implicitly re-export them here\nfrom . import _array_utils_impl as _array_utils_impl\nfrom . import _arraypad_impl as _arraypad_impl\nfrom . import _arraysetops_impl as _arraysetops_impl\nfrom . import _arrayterator_impl as _arrayterator_impl\nfrom . import _datasource as _datasource\nfrom . import _format_impl as _format_impl\nfrom . import _function_base_impl as _function_base_impl\nfrom . import _histograms_impl as _histograms_impl\nfrom . import _index_tricks_impl as _index_tricks_impl\nfrom . import _iotools as _iotools\nfrom . import _nanfunctions_impl as _nanfunctions_impl\nfrom . import _npyio_impl as _npyio_impl\nfrom . import _polynomial_impl as _polynomial_impl\nfrom . import _scimath_impl as _scimath_impl\nfrom . import _shape_base_impl as _shape_base_impl\nfrom . import _stride_tricks_impl as _stride_tricks_impl\nfrom . import _twodim_base_impl as _twodim_base_impl\nfrom . import _type_check_impl as _type_check_impl\nfrom . import _ufunclike_impl as _ufunclike_impl\nfrom . import _utils_impl as _utils_impl\nfrom . import _version as _version\nfrom . import array_utils, format, introspect, mixins, npyio, scimath, stride_tricks\nfrom ._arrayterator_impl import Arrayterator\nfrom ._version import NumpyVersion\n\n__all__ = [\n "Arrayterator",\n "add_docstring",\n "add_newdoc",\n "array_utils",\n "format",\n "introspect",\n "mixins",\n "NumpyVersion",\n "npyio",\n "scimath",\n "stride_tricks",\n "tracemalloc_domain",\n]\n | .venv\Lib\site-packages\numpy\lib\__init__.pyi | __init__.pyi | Other | 1,695 | 0.95 | 0 | 0.047619 | vue-tools | 442 | 2024-09-22T14:35:31.071514 | Apache-2.0 | false | cddfd62bb23ed6d37e8960678dfe4355 |
"""Tests for the array padding functions.\n\n"""\nimport pytest\n\nimport numpy as np\nfrom numpy.lib._arraypad_impl import _as_pairs\nfrom numpy.testing import assert_allclose, assert_array_equal, assert_equal\n\n_numeric_dtypes = (\n np._core.sctypes["uint"]\n + np._core.sctypes["int"]\n + np._core.sctypes["float"]\n + np._core.sctypes["complex"]\n)\n_all_modes = {\n 'constant': {'constant_values': 0},\n 'edge': {},\n 'linear_ramp': {'end_values': 0},\n 'maximum': {'stat_length': None},\n 'mean': {'stat_length': None},\n 'median': {'stat_length': None},\n 'minimum': {'stat_length': None},\n 'reflect': {'reflect_type': 'even'},\n 'symmetric': {'reflect_type': 'even'},\n 'wrap': {},\n 'empty': {}\n}\n\n\nclass TestAsPairs:\n def test_single_value(self):\n """Test casting for a single value."""\n expected = np.array([[3, 3]] * 10)\n for x in (3, [3], [[3]]):\n result = _as_pairs(x, 10)\n assert_equal(result, expected)\n # Test with dtype=object\n obj = object()\n assert_equal(\n _as_pairs(obj, 10),\n np.array([[obj, obj]] * 10)\n )\n\n def test_two_values(self):\n """Test proper casting for two different values."""\n # Broadcasting in the first dimension with numbers\n expected = np.array([[3, 4]] * 10)\n for x in ([3, 4], [[3, 4]]):\n result = _as_pairs(x, 10)\n assert_equal(result, expected)\n # and with dtype=object\n obj = object()\n assert_equal(\n _as_pairs(["a", obj], 10),\n np.array([["a", obj]] * 10)\n )\n\n # Broadcasting in the second / last dimension with numbers\n assert_equal(\n _as_pairs([[3], [4]], 2),\n np.array([[3, 3], [4, 4]])\n )\n # and with dtype=object\n assert_equal(\n _as_pairs([["a"], [obj]], 2),\n np.array([["a", "a"], [obj, obj]])\n )\n\n def test_with_none(self):\n expected = ((None, None), (None, None), (None, None))\n assert_equal(\n _as_pairs(None, 3, as_index=False),\n expected\n )\n assert_equal(\n _as_pairs(None, 3, as_index=True),\n expected\n )\n\n def test_pass_through(self):\n """Test if `x` already matching desired output are passed through."""\n expected = np.arange(12).reshape((6, 2))\n assert_equal(\n _as_pairs(expected, 6),\n expected\n )\n\n def test_as_index(self):\n """Test results if `as_index=True`."""\n assert_equal(\n _as_pairs([2.6, 3.3], 10, as_index=True),\n np.array([[3, 3]] * 10, dtype=np.intp)\n )\n assert_equal(\n _as_pairs([2.6, 4.49], 10, as_index=True),\n np.array([[3, 4]] * 10, dtype=np.intp)\n )\n for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],\n [[1, 2]] * 9 + [[1, -2]]):\n with pytest.raises(ValueError, match="negative values"):\n _as_pairs(x, 10, as_index=True)\n\n def test_exceptions(self):\n """Ensure faulty usage is discovered."""\n with pytest.raises(ValueError, match="more dimensions than allowed"):\n _as_pairs([[[3]]], 10)\n with pytest.raises(ValueError, match="could not be broadcast"):\n _as_pairs([[1, 2], [3, 4]], 3)\n with pytest.raises(ValueError, match="could not be broadcast"):\n _as_pairs(np.ones((2, 3)), 3)\n\n\nclass TestConditionalShortcuts:\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_zero_padding_shortcuts(self, mode):\n test = np.arange(120).reshape(4, 5, 6)\n pad_amt = [(0, 0) for _ in test.shape]\n assert_array_equal(test, np.pad(test, pad_amt, mode=mode))\n\n @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])\n def test_shallow_statistic_range(self, mode):\n test = np.arange(120).reshape(4, 5, 6)\n pad_amt = [(1, 1) for _ in test.shape]\n assert_array_equal(np.pad(test, pad_amt, mode='edge'),\n np.pad(test, pad_amt, mode=mode, stat_length=1))\n\n @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])\n def test_clip_statistic_range(self, mode):\n test = np.arange(30).reshape(5, 6)\n pad_amt = [(3, 3) for _ in test.shape]\n assert_array_equal(np.pad(test, pad_amt, mode=mode),\n np.pad(test, pad_amt, mode=mode, stat_length=30))\n\n\nclass TestStatistic:\n def test_check_mean_stat_length(self):\n a = np.arange(100).astype('f')\n a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))\n b = np.array(\n [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,\n 0.5, 0.5, 0.5, 0.5, 0.5,\n\n 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,\n\n 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.,\n 98., 98., 98., 98., 98., 98., 98., 98., 98., 98.\n ])\n assert_array_equal(a, b)\n\n def test_check_maximum_1(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'maximum')\n b = np.array(\n [99, 99, 99, 99, 99, 99, 99, 99, 99, 99,\n 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,\n 99, 99, 99, 99, 99,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 99, 99, 99, 99, 99, 99, 99, 99, 99, 99,\n 99, 99, 99, 99, 99, 99, 99, 99, 99, 99]\n )\n assert_array_equal(a, b)\n\n def test_check_maximum_2(self):\n a = np.arange(100) + 1\n a = np.pad(a, (25, 20), 'maximum')\n b = np.array(\n [100, 100, 100, 100, 100, 100, 100, 100, 100, 100,\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,\n 100, 100, 100, 100, 100,\n\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,\n 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,\n 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,\n 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,\n 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,\n 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,\n 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n )\n assert_array_equal(a, b)\n\n def test_check_maximum_stat_length(self):\n a = np.arange(100) + 1\n a = np.pad(a, (25, 20), 'maximum', stat_length=10)\n b = np.array(\n [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n 10, 10, 10, 10, 10,\n\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,\n 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,\n 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,\n 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,\n 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,\n 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,\n 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,\n 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n )\n assert_array_equal(a, b)\n\n def test_check_minimum_1(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'minimum')\n b = np.array(\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )\n assert_array_equal(a, b)\n\n def test_check_minimum_2(self):\n a = np.arange(100) + 2\n a = np.pad(a, (25, 20), 'minimum')\n b = np.array(\n [ 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2,\n\n 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,\n 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31,\n 32, 33, 34, 35, 36, 37, 38, 39, 40, 41,\n 42, 43, 44, 45, 46, 47, 48, 49, 50, 51,\n 52, 53, 54, 55, 56, 57, 58, 59, 60, 61,\n 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,\n 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,\n 82, 83, 84, 85, 86, 87, 88, 89, 90, 91,\n 92, 93, 94, 95, 96, 97, 98, 99, 100, 101,\n\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,\n 2, 2, 2, 2, 2, 2, 2, 2, 2, 2]\n )\n assert_array_equal(a, b)\n\n def test_check_minimum_stat_length(self):\n a = np.arange(100) + 1\n a = np.pad(a, (25, 20), 'minimum', stat_length=10)\n b = np.array(\n [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1,\n\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,\n 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,\n 21, 22, 23, 24, 25, 26, 27, 28, 29, 30,\n 31, 32, 33, 34, 35, 36, 37, 38, 39, 40,\n 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,\n 51, 52, 53, 54, 55, 56, 57, 58, 59, 60,\n 61, 62, 63, 64, 65, 66, 67, 68, 69, 70,\n 71, 72, 73, 74, 75, 76, 77, 78, 79, 80,\n 81, 82, 83, 84, 85, 86, 87, 88, 89, 90,\n 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,\n\n 91, 91, 91, 91, 91, 91, 91, 91, 91, 91,\n 91, 91, 91, 91, 91, 91, 91, 91, 91, 91]\n )\n assert_array_equal(a, b)\n\n def test_check_median(self):\n a = np.arange(100).astype('f')\n a = np.pad(a, (25, 20), 'median')\n b = np.array(\n [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5,\n\n 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,\n\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]\n )\n assert_array_equal(a, b)\n\n def test_check_median_01(self):\n a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])\n a = np.pad(a, 1, 'median')\n b = np.array(\n [[4, 4, 5, 4, 4],\n\n [3, 3, 1, 4, 3],\n [5, 4, 5, 9, 5],\n [8, 9, 8, 2, 8],\n\n [4, 4, 5, 4, 4]]\n )\n assert_array_equal(a, b)\n\n def test_check_median_02(self):\n a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])\n a = np.pad(a.T, 1, 'median').T\n b = np.array(\n [[5, 4, 5, 4, 5],\n\n [3, 3, 1, 4, 3],\n [5, 4, 5, 9, 5],\n [8, 9, 8, 2, 8],\n\n [5, 4, 5, 4, 5]]\n )\n assert_array_equal(a, b)\n\n def test_check_median_stat_length(self):\n a = np.arange(100).astype('f')\n a[1] = 2.\n a[97] = 96.\n a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))\n b = np.array(\n [ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,\n 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,\n 2., 2., 2., 2., 2.,\n\n 0., 2., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 96., 98., 99.,\n\n 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.,\n 96., 96., 96., 96., 96., 96., 96., 96., 96., 96.]\n )\n assert_array_equal(a, b)\n\n def test_check_mean_shape_one(self):\n a = [[4, 5, 6]]\n a = np.pad(a, (5, 7), 'mean', stat_length=2)\n b = np.array(\n [[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],\n [4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6]]\n )\n assert_array_equal(a, b)\n\n def test_check_mean_2(self):\n a = np.arange(100).astype('f')\n a = np.pad(a, (25, 20), 'mean')\n b = np.array(\n [49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5,\n\n 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.,\n 10., 11., 12., 13., 14., 15., 16., 17., 18., 19.,\n 20., 21., 22., 23., 24., 25., 26., 27., 28., 29.,\n 30., 31., 32., 33., 34., 35., 36., 37., 38., 39.,\n 40., 41., 42., 43., 44., 45., 46., 47., 48., 49.,\n 50., 51., 52., 53., 54., 55., 56., 57., 58., 59.,\n 60., 61., 62., 63., 64., 65., 66., 67., 68., 69.,\n 70., 71., 72., 73., 74., 75., 76., 77., 78., 79.,\n 80., 81., 82., 83., 84., 85., 86., 87., 88., 89.,\n 90., 91., 92., 93., 94., 95., 96., 97., 98., 99.,\n\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,\n 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5]\n )\n assert_array_equal(a, b)\n\n @pytest.mark.parametrize("mode", [\n "mean",\n "median",\n "minimum",\n "maximum"\n ])\n def test_same_prepend_append(self, mode):\n """ Test that appended and prepended values are equal """\n # This test is constructed to trigger floating point rounding errors in\n # a way that caused gh-11216 for mode=='mean'\n a = np.array([-1, 2, -1]) + np.array([0, 1e-12, 0], dtype=np.float64)\n a = np.pad(a, (1, 1), mode)\n assert_equal(a[0], a[-1])\n\n @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])\n @pytest.mark.parametrize(\n "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]\n )\n def test_check_negative_stat_length(self, mode, stat_length):\n arr = np.arange(30).reshape((6, 5))\n match = "index can't contain negative values"\n with pytest.raises(ValueError, match=match):\n np.pad(arr, 2, mode, stat_length=stat_length)\n\n def test_simple_stat_length(self):\n a = np.arange(30)\n a = np.reshape(a, (6, 5))\n a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))\n b = np.array(\n [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],\n [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],\n\n [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],\n [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],\n [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],\n [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],\n [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],\n [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],\n\n [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],\n [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],\n [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]\n )\n assert_array_equal(a, b)\n\n @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")\n @pytest.mark.filterwarnings(\n "ignore:invalid value encountered in( scalar)? divide:RuntimeWarning"\n )\n @pytest.mark.parametrize("mode", ["mean", "median"])\n def test_zero_stat_length_valid(self, mode):\n arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)\n expected = np.array([np.nan, 1., 2., np.nan, np.nan])\n assert_equal(arr, expected)\n\n @pytest.mark.parametrize("mode", ["minimum", "maximum"])\n def test_zero_stat_length_invalid(self, mode):\n match = "stat_length of 0 yields no value for padding"\n with pytest.raises(ValueError, match=match):\n np.pad([1., 2.], 0, mode, stat_length=0)\n with pytest.raises(ValueError, match=match):\n np.pad([1., 2.], 0, mode, stat_length=(1, 0))\n with pytest.raises(ValueError, match=match):\n np.pad([1., 2.], 1, mode, stat_length=0)\n with pytest.raises(ValueError, match=match):\n np.pad([1., 2.], 1, mode, stat_length=(1, 0))\n\n\nclass TestConstant:\n def test_check_constant(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))\n b = np.array(\n [10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,\n 10, 10, 10, 10, 10,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 20, 20, 20, 20, 20, 20, 20, 20, 20, 20,\n 20, 20, 20, 20, 20, 20, 20, 20, 20, 20]\n )\n assert_array_equal(a, b)\n\n def test_check_constant_zeros(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'constant')\n b = np.array(\n [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\n )\n assert_array_equal(a, b)\n\n def test_check_constant_float(self):\n # If input array is int, but constant_values are float, the dtype of\n # the array to be padded is kept\n arr = np.arange(30).reshape(5, 6)\n test = np.pad(arr, (1, 2), mode='constant',\n constant_values=1.1)\n expected = np.array(\n [[1, 1, 1, 1, 1, 1, 1, 1, 1],\n\n [1, 0, 1, 2, 3, 4, 5, 1, 1],\n [1, 6, 7, 8, 9, 10, 11, 1, 1],\n [1, 12, 13, 14, 15, 16, 17, 1, 1],\n [1, 18, 19, 20, 21, 22, 23, 1, 1],\n [1, 24, 25, 26, 27, 28, 29, 1, 1],\n\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]]\n )\n assert_allclose(test, expected)\n\n def test_check_constant_float2(self):\n # If input array is float, and constant_values are float, the dtype of\n # the array to be padded is kept - here retaining the float constants\n arr = np.arange(30).reshape(5, 6)\n arr_float = arr.astype(np.float64)\n test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',\n constant_values=1.1)\n expected = np.array(\n [[1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],\n\n [1.1, 0. , 1. , 2. , 3. , 4. , 5. , 1.1, 1.1], # noqa: E203\n [1.1, 6. , 7. , 8. , 9. , 10. , 11. , 1.1, 1.1], # noqa: E203\n [1.1, 12. , 13. , 14. , 15. , 16. , 17. , 1.1, 1.1], # noqa: E203\n [1.1, 18. , 19. , 20. , 21. , 22. , 23. , 1.1, 1.1], # noqa: E203\n [1.1, 24. , 25. , 26. , 27. , 28. , 29. , 1.1, 1.1], # noqa: E203\n\n [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],\n [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]]\n )\n assert_allclose(test, expected)\n\n def test_check_constant_float3(self):\n a = np.arange(100, dtype=float)\n a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))\n b = np.array(\n [-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,\n -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,\n -1.1, -1.1, -1.1, -1.1, -1.1,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2,\n -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2, -1.2]\n )\n assert_allclose(a, b)\n\n def test_check_constant_odd_pad_amount(self):\n arr = np.arange(30).reshape(5, 6)\n test = np.pad(arr, ((1,), (2,)), mode='constant',\n constant_values=3)\n expected = np.array(\n [[3, 3, 3, 3, 3, 3, 3, 3, 3, 3],\n\n [3, 3, 0, 1, 2, 3, 4, 5, 3, 3],\n [3, 3, 6, 7, 8, 9, 10, 11, 3, 3],\n [3, 3, 12, 13, 14, 15, 16, 17, 3, 3],\n [3, 3, 18, 19, 20, 21, 22, 23, 3, 3],\n [3, 3, 24, 25, 26, 27, 28, 29, 3, 3],\n\n [3, 3, 3, 3, 3, 3, 3, 3, 3, 3]]\n )\n assert_allclose(test, expected)\n\n def test_check_constant_pad_2d(self):\n arr = np.arange(4).reshape(2, 2)\n test = np.pad(arr, ((1, 2), (1, 3)), mode='constant',\n constant_values=((1, 2), (3, 4)))\n expected = np.array(\n [[3, 1, 1, 4, 4, 4],\n [3, 0, 1, 4, 4, 4],\n [3, 2, 3, 4, 4, 4],\n [3, 2, 2, 4, 4, 4],\n [3, 2, 2, 4, 4, 4]]\n )\n assert_allclose(test, expected)\n\n def test_check_large_integers(self):\n uint64_max = 2 ** 64 - 1\n arr = np.full(5, uint64_max, dtype=np.uint64)\n test = np.pad(arr, 1, mode="constant", constant_values=arr.min())\n expected = np.full(7, uint64_max, dtype=np.uint64)\n assert_array_equal(test, expected)\n\n int64_max = 2 ** 63 - 1\n arr = np.full(5, int64_max, dtype=np.int64)\n test = np.pad(arr, 1, mode="constant", constant_values=arr.min())\n expected = np.full(7, int64_max, dtype=np.int64)\n assert_array_equal(test, expected)\n\n def test_check_object_array(self):\n arr = np.empty(1, dtype=object)\n obj_a = object()\n arr[0] = obj_a\n obj_b = object()\n obj_c = object()\n arr = np.pad(arr, pad_width=1, mode='constant',\n constant_values=(obj_b, obj_c))\n\n expected = np.empty((3,), dtype=object)\n expected[0] = obj_b\n expected[1] = obj_a\n expected[2] = obj_c\n\n assert_array_equal(arr, expected)\n\n def test_pad_empty_dimension(self):\n arr = np.zeros((3, 0, 2))\n result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")\n assert result.shape == (3, 4, 4)\n\n\nclass TestLinearRamp:\n def test_check_simple(self):\n a = np.arange(100).astype('f')\n a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))\n b = np.array(\n [4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,\n 2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,\n 0.80, 0.64, 0.48, 0.32, 0.16,\n\n 0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,\n 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,\n 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,\n 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,\n 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,\n 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,\n 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,\n 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,\n 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,\n 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,\n\n 94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,\n 47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]\n )\n assert_allclose(a, b, rtol=1e-5, atol=1e-5)\n\n def test_check_2d(self):\n arr = np.arange(20).reshape(4, 5).astype(np.float64)\n test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))\n expected = np.array(\n [[0., 0., 0., 0., 0., 0., 0., 0., 0.],\n [0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],\n [0., 0., 0., 1., 2., 3., 4., 2., 0.],\n [0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],\n [0., 5., 10., 11., 12., 13., 14., 7., 0.],\n [0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],\n [0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],\n [0., 0., 0., 0., 0., 0., 0., 0., 0.]])\n assert_allclose(test, expected)\n\n @pytest.mark.xfail(exceptions=(AssertionError,))\n def test_object_array(self):\n from fractions import Fraction\n arr = np.array([Fraction(1, 2), Fraction(-1, 2)])\n actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)\n\n # deliberately chosen to have a non-power-of-2 denominator such that\n # rounding to floats causes a failure.\n expected = np.array([\n Fraction( 0, 12),\n Fraction( 3, 12),\n Fraction( 6, 12),\n Fraction(-6, 12),\n Fraction(-4, 12),\n Fraction(-2, 12),\n Fraction(-0, 12),\n ])\n assert_equal(actual, expected)\n\n def test_end_values(self):\n """Ensure that end values are exact."""\n a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")\n assert_equal(a[:, 0], 0.)\n assert_equal(a[:, -1], 0.)\n assert_equal(a[0, :], 0.)\n assert_equal(a[-1, :], 0.)\n\n @pytest.mark.parametrize("dtype", _numeric_dtypes)\n def test_negative_difference(self, dtype):\n """\n Check correct behavior of unsigned dtypes if there is a negative\n difference between the edge to pad and `end_values`. Check both cases\n to be independent of implementation. Test behavior for all other dtypes\n in case dtype casting interferes with complex dtypes. See gh-14191.\n """\n x = np.array([3], dtype=dtype)\n result = np.pad(x, 3, mode="linear_ramp", end_values=0)\n expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)\n assert_equal(result, expected)\n\n x = np.array([0], dtype=dtype)\n result = np.pad(x, 3, mode="linear_ramp", end_values=3)\n expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)\n assert_equal(result, expected)\n\n\nclass TestReflect:\n def test_check_simple(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'reflect')\n b = np.array(\n [25, 24, 23, 22, 21, 20, 19, 18, 17, 16,\n 15, 14, 13, 12, 11, 10, 9, 8, 7, 6,\n 5, 4, 3, 2, 1,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 98, 97, 96, 95, 94, 93, 92, 91, 90, 89,\n 88, 87, 86, 85, 84, 83, 82, 81, 80, 79]\n )\n assert_array_equal(a, b)\n\n def test_check_odd_method(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')\n b = np.array(\n [-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,\n -15, -14, -13, -12, -11, -10, -9, -8, -7, -6,\n -5, -4, -3, -2, -1,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,\n 110, 111, 112, 113, 114, 115, 116, 117, 118, 119]\n )\n assert_array_equal(a, b)\n\n def test_check_large_pad(self):\n a = [[4, 5, 6], [6, 7, 8]]\n a = np.pad(a, (5, 7), 'reflect')\n b = np.array(\n [[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]\n )\n assert_array_equal(a, b)\n\n def test_check_shape(self):\n a = [[4, 5, 6]]\n a = np.pad(a, (5, 7), 'reflect')\n b = np.array(\n [[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],\n [5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5]]\n )\n assert_array_equal(a, b)\n\n def test_check_01(self):\n a = np.pad([1, 2, 3], 2, 'reflect')\n b = np.array([3, 2, 1, 2, 3, 2, 1])\n assert_array_equal(a, b)\n\n def test_check_02(self):\n a = np.pad([1, 2, 3], 3, 'reflect')\n b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])\n assert_array_equal(a, b)\n\n def test_check_03(self):\n a = np.pad([1, 2, 3], 4, 'reflect')\n b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])\n assert_array_equal(a, b)\n\n def test_check_04(self):\n a = np.pad([1, 2, 3], [1, 10], 'reflect')\n b = np.array([2, 1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1])\n assert_array_equal(a, b)\n\n def test_check_05(self):\n a = np.pad([1, 2, 3, 4], [45, 10], 'reflect')\n b = np.array(\n [4, 3, 2, 1, 2, 3, 4, 3, 2, 1,\n 2, 3, 4, 3, 2, 1, 2, 3, 4, 3,\n 2, 1, 2, 3, 4, 3, 2, 1, 2, 3,\n 4, 3, 2, 1, 2, 3, 4, 3, 2, 1,\n 2, 3, 4, 3, 2, 1, 2, 3, 4, 3,\n 2, 1, 2, 3, 4, 3, 2, 1, 2])\n assert_array_equal(a, b)\n\n def test_check_06(self):\n a = np.pad([1, 2, 3, 4], [15, 2], 'symmetric')\n b = np.array(\n [2, 3, 4, 4, 3, 2, 1, 1, 2, 3,\n 4, 4, 3, 2, 1, 1, 2, 3, 4, 4,\n 3]\n )\n assert_array_equal(a, b)\n\n def test_check_07(self):\n a = np.pad([1, 2, 3, 4, 5, 6], [45, 3], 'symmetric')\n b = np.array(\n [4, 5, 6, 6, 5, 4, 3, 2, 1, 1,\n 2, 3, 4, 5, 6, 6, 5, 4, 3, 2,\n 1, 1, 2, 3, 4, 5, 6, 6, 5, 4,\n 3, 2, 1, 1, 2, 3, 4, 5, 6, 6,\n 5, 4, 3, 2, 1, 1, 2, 3, 4, 5,\n 6, 6, 5, 4])\n assert_array_equal(a, b)\n\n\nclass TestEmptyArray:\n """Check how padding behaves on arrays with an empty dimension."""\n\n @pytest.mark.parametrize(\n # Keep parametrization ordered, otherwise pytest-xdist might believe\n # that different tests were collected during parallelization\n "mode", sorted(_all_modes.keys() - {"constant", "empty"})\n )\n def test_pad_empty_dimension(self, mode):\n match = ("can't extend empty axis 0 using modes other than 'constant' "\n "or 'empty'")\n with pytest.raises(ValueError, match=match):\n np.pad([], 4, mode=mode)\n with pytest.raises(ValueError, match=match):\n np.pad(np.ndarray(0), 4, mode=mode)\n with pytest.raises(ValueError, match=match):\n np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)\n\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_pad_non_empty_dimension(self, mode):\n result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)\n assert result.shape == (8, 0, 4)\n\n\nclass TestSymmetric:\n def test_check_simple(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'symmetric')\n b = np.array(\n [24, 23, 22, 21, 20, 19, 18, 17, 16, 15,\n 14, 13, 12, 11, 10, 9, 8, 7, 6, 5,\n 4, 3, 2, 1, 0,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 99, 98, 97, 96, 95, 94, 93, 92, 91, 90,\n 89, 88, 87, 86, 85, 84, 83, 82, 81, 80]\n )\n assert_array_equal(a, b)\n\n def test_check_odd_method(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')\n b = np.array(\n [-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,\n -14, -13, -12, -11, -10, -9, -8, -7, -6, -5,\n -4, -3, -2, -1, 0,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 99, 100, 101, 102, 103, 104, 105, 106, 107, 108,\n 109, 110, 111, 112, 113, 114, 115, 116, 117, 118]\n )\n assert_array_equal(a, b)\n\n def test_check_large_pad(self):\n a = [[4, 5, 6], [6, 7, 8]]\n a = np.pad(a, (5, 7), 'symmetric')\n b = np.array(\n [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n [7, 8, 8, 7, 6, 6, 7, 8, 8, 7, 6, 6, 7, 8, 8],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]\n )\n\n assert_array_equal(a, b)\n\n def test_check_large_pad_odd(self):\n a = [[4, 5, 6], [6, 7, 8]]\n a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')\n b = np.array(\n [[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],\n [-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],\n [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],\n [-1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8],\n [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],\n\n [ 1, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10],\n [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],\n\n [ 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12],\n [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],\n [ 5, 6, 6, 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14],\n [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],\n [ 7, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16],\n [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18],\n [ 9, 10, 10, 11, 12, 12, 13, 14, 14, 15, 16, 16, 17, 18, 18]]\n )\n assert_array_equal(a, b)\n\n def test_check_shape(self):\n a = [[4, 5, 6]]\n a = np.pad(a, (5, 7), 'symmetric')\n b = np.array(\n [[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],\n [5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6]]\n )\n assert_array_equal(a, b)\n\n def test_check_01(self):\n a = np.pad([1, 2, 3], 2, 'symmetric')\n b = np.array([2, 1, 1, 2, 3, 3, 2])\n assert_array_equal(a, b)\n\n def test_check_02(self):\n a = np.pad([1, 2, 3], 3, 'symmetric')\n b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])\n assert_array_equal(a, b)\n\n def test_check_03(self):\n a = np.pad([1, 2, 3], 6, 'symmetric')\n b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])\n assert_array_equal(a, b)\n\n\nclass TestWrap:\n def test_check_simple(self):\n a = np.arange(100)\n a = np.pad(a, (25, 20), 'wrap')\n b = np.array(\n [75, 76, 77, 78, 79, 80, 81, 82, 83, 84,\n 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,\n 95, 96, 97, 98, 99,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,\n 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,\n 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,\n 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,\n 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,\n 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,\n 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,\n 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,\n\n 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,\n 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\n )\n assert_array_equal(a, b)\n\n def test_check_large_pad(self):\n a = np.arange(12)\n a = np.reshape(a, (3, 4))\n a = np.pad(a, (10, 12), 'wrap')\n b = np.array(\n [[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11],\n [2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2,\n 3, 0, 1, 2, 3, 0, 1, 2, 3],\n [6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6, 7, 4, 5, 6,\n 7, 4, 5, 6, 7, 4, 5, 6, 7],\n [10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,\n 11, 8, 9, 10, 11, 8, 9, 10, 11]]\n )\n assert_array_equal(a, b)\n\n def test_check_01(self):\n a = np.pad([1, 2, 3], 3, 'wrap')\n b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])\n assert_array_equal(a, b)\n\n def test_check_02(self):\n a = np.pad([1, 2, 3], 4, 'wrap')\n b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])\n assert_array_equal(a, b)\n\n def test_pad_with_zero(self):\n a = np.ones((3, 5))\n b = np.pad(a, (0, 5), mode="wrap")\n assert_array_equal(a, b[:-5, :-5])\n\n def test_repeated_wrapping(self):\n """\n Check wrapping on each side individually if the wrapped area is longer\n than the original array.\n """\n a = np.arange(5)\n b = np.pad(a, (12, 0), mode="wrap")\n assert_array_equal(np.r_[a, a, a, a][3:], b)\n\n a = np.arange(5)\n b = np.pad(a, (0, 12), mode="wrap")\n assert_array_equal(np.r_[a, a, a, a][:-3], b)\n\n def test_repeated_wrapping_multiple_origin(self):\n """\n Assert that 'wrap' pads only with multiples of the original area if\n the pad width is larger than the original array.\n """\n a = np.arange(4).reshape(2, 2)\n a = np.pad(a, [(1, 3), (3, 1)], mode='wrap')\n b = np.array(\n [[3, 2, 3, 2, 3, 2],\n [1, 0, 1, 0, 1, 0],\n [3, 2, 3, 2, 3, 2],\n [1, 0, 1, 0, 1, 0],\n [3, 2, 3, 2, 3, 2],\n [1, 0, 1, 0, 1, 0]]\n )\n assert_array_equal(a, b)\n\n\nclass TestEdge:\n def test_check_simple(self):\n a = np.arange(12)\n a = np.reshape(a, (4, 3))\n a = np.pad(a, ((2, 3), (3, 2)), 'edge')\n b = np.array(\n [[0, 0, 0, 0, 1, 2, 2, 2],\n [0, 0, 0, 0, 1, 2, 2, 2],\n\n [0, 0, 0, 0, 1, 2, 2, 2],\n [3, 3, 3, 3, 4, 5, 5, 5],\n [6, 6, 6, 6, 7, 8, 8, 8],\n [9, 9, 9, 9, 10, 11, 11, 11],\n\n [9, 9, 9, 9, 10, 11, 11, 11],\n [9, 9, 9, 9, 10, 11, 11, 11],\n [9, 9, 9, 9, 10, 11, 11, 11]]\n )\n assert_array_equal(a, b)\n\n def test_check_width_shape_1_2(self):\n # Check a pad_width of the form ((1, 2),).\n # Regression test for issue gh-7808.\n a = np.array([1, 2, 3])\n padded = np.pad(a, ((1, 2),), 'edge')\n expected = np.array([1, 1, 2, 3, 3, 3])\n assert_array_equal(padded, expected)\n\n a = np.array([[1, 2, 3], [4, 5, 6]])\n padded = np.pad(a, ((1, 2),), 'edge')\n expected = np.pad(a, ((1, 2), (1, 2)), 'edge')\n assert_array_equal(padded, expected)\n\n a = np.arange(24).reshape(2, 3, 4)\n padded = np.pad(a, ((1, 2),), 'edge')\n expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')\n assert_array_equal(padded, expected)\n\n\nclass TestEmpty:\n def test_simple(self):\n arr = np.arange(24).reshape(4, 6)\n result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")\n assert result.shape == (9, 10)\n assert_equal(arr, result[2:-3, 3:-1])\n\n def test_pad_empty_dimension(self):\n arr = np.zeros((3, 0, 2))\n result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")\n assert result.shape == (3, 4, 4)\n\n\ndef test_legacy_vector_functionality():\n def _padwithtens(vector, pad_width, iaxis, kwargs):\n vector[:pad_width[0]] = 10\n vector[-pad_width[1]:] = 10\n\n a = np.arange(6).reshape(2, 3)\n a = np.pad(a, 2, _padwithtens)\n b = np.array(\n [[10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10],\n\n [10, 10, 0, 1, 2, 10, 10],\n [10, 10, 3, 4, 5, 10, 10],\n\n [10, 10, 10, 10, 10, 10, 10],\n [10, 10, 10, 10, 10, 10, 10]]\n )\n assert_array_equal(a, b)\n\n\ndef test_unicode_mode():\n a = np.pad([1], 2, mode='constant')\n b = np.array([0, 0, 1, 0, 0])\n assert_array_equal(a, b)\n\n\n@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])\ndef test_object_input(mode):\n # Regression test for issue gh-11395.\n a = np.full((4, 3), fill_value=None)\n pad_amt = ((2, 3), (3, 2))\n b = np.full((9, 8), fill_value=None)\n assert_array_equal(np.pad(a, pad_amt, mode=mode), b)\n\n\nclass TestPadWidth:\n @pytest.mark.parametrize("pad_width", [\n (4, 5, 6, 7),\n ((1,), (2,), (3,)),\n ((1, 2), (3, 4), (5, 6)),\n ((3, 4, 5), (0, 1, 2)),\n ])\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_misshaped_pad_width(self, pad_width, mode):\n arr = np.arange(30).reshape((6, 5))\n match = "operands could not be broadcast together"\n with pytest.raises(ValueError, match=match):\n np.pad(arr, pad_width, mode)\n\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_misshaped_pad_width_2(self, mode):\n arr = np.arange(30).reshape((6, 5))\n match = ("input operand has more dimensions than allowed by the axis "\n "remapping")\n with pytest.raises(ValueError, match=match):\n np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)\n\n @pytest.mark.parametrize(\n "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_negative_pad_width(self, pad_width, mode):\n arr = np.arange(30).reshape((6, 5))\n match = "index can't contain negative values"\n with pytest.raises(ValueError, match=match):\n np.pad(arr, pad_width, mode)\n\n @pytest.mark.parametrize("pad_width, dtype", [\n ("3", None),\n ("word", None),\n (None, None),\n (object(), None),\n (3.4, None),\n (((2, 3, 4), (3, 2)), object),\n (complex(1, -1), None),\n (((-2.1, 3), (3, 2)), None),\n ])\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_bad_type(self, pad_width, dtype, mode):\n arr = np.arange(30).reshape((6, 5))\n match = "`pad_width` must be of integral type."\n if dtype is not None:\n # avoid DeprecationWarning when not specifying dtype\n with pytest.raises(TypeError, match=match):\n np.pad(arr, np.array(pad_width, dtype=dtype), mode)\n else:\n with pytest.raises(TypeError, match=match):\n np.pad(arr, pad_width, mode)\n with pytest.raises(TypeError, match=match):\n np.pad(arr, np.array(pad_width), mode)\n\n def test_pad_width_as_ndarray(self):\n a = np.arange(12)\n a = np.reshape(a, (4, 3))\n a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')\n b = np.array(\n [[0, 0, 0, 0, 1, 2, 2, 2],\n [0, 0, 0, 0, 1, 2, 2, 2],\n\n [0, 0, 0, 0, 1, 2, 2, 2],\n [3, 3, 3, 3, 4, 5, 5, 5],\n [6, 6, 6, 6, 7, 8, 8, 8],\n [9, 9, 9, 9, 10, 11, 11, 11],\n\n [9, 9, 9, 9, 10, 11, 11, 11],\n [9, 9, 9, 9, 10, 11, 11, 11],\n [9, 9, 9, 9, 10, 11, 11, 11]]\n )\n assert_array_equal(a, b)\n\n @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])\n @pytest.mark.parametrize("mode", _all_modes.keys())\n def test_zero_pad_width(self, pad_width, mode):\n arr = np.arange(30).reshape(6, 5)\n assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))\n\n\n@pytest.mark.parametrize("mode", _all_modes.keys())\ndef test_kwargs(mode):\n """Test behavior of pad's kwargs for the given mode."""\n allowed = _all_modes[mode]\n not_allowed = {}\n for kwargs in _all_modes.values():\n if kwargs != allowed:\n not_allowed.update(kwargs)\n # Test if allowed keyword arguments pass\n np.pad([1, 2, 3], 1, mode, **allowed)\n # Test if prohibited keyword arguments of other modes raise an error\n for key, value in not_allowed.items():\n match = f"unsupported keyword arguments for mode '{mode}'"\n with pytest.raises(ValueError, match=match):\n np.pad([1, 2, 3], 1, mode, **{key: value})\n\n\ndef test_constant_zero_default():\n arr = np.array([1, 1])\n assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])\n\n\n@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])\ndef test_unsupported_mode(mode):\n match = f"mode '{mode}' is not supported"\n with pytest.raises(ValueError, match=match):\n np.pad([1, 2, 3], 4, mode=mode)\n\n\n@pytest.mark.parametrize("mode", _all_modes.keys())\ndef test_non_contiguous_array(mode):\n arr = np.arange(24).reshape(4, 6)[::2, ::2]\n result = np.pad(arr, (2, 3), mode)\n assert result.shape == (7, 8)\n assert_equal(result[2:-3, 2:-3], arr)\n\n\n@pytest.mark.parametrize("mode", _all_modes.keys())\ndef test_memory_layout_persistence(mode):\n """Test if C and F order is preserved for all pad modes."""\n x = np.ones((5, 10), order='C')\n assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]\n x = np.ones((5, 10), order='F')\n assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]\n\n\n@pytest.mark.parametrize("dtype", _numeric_dtypes)\n@pytest.mark.parametrize("mode", _all_modes.keys())\ndef test_dtype_persistence(dtype, mode):\n arr = np.zeros((3, 2, 1), dtype=dtype)\n result = np.pad(arr, 1, mode=mode)\n assert result.dtype == dtype\n | .venv\Lib\site-packages\numpy\lib\tests\test_arraypad.py | test_arraypad.py | Python | 57,570 | 0.75 | 0.09258 | 0.017213 | react-lib | 429 | 2024-12-08T21:21:10.677200 | BSD-3-Clause | true | 7ceada4e32f033ed143f9aa75e1a2a00 |
"""Test functions for 1D array set operations.\n\n"""\nimport pytest\n\nimport numpy as np\nfrom numpy import ediff1d, intersect1d, isin, setdiff1d, setxor1d, union1d, unique\nfrom numpy.exceptions import AxisError\nfrom numpy.testing import (\n assert_array_equal,\n assert_equal,\n assert_raises,\n assert_raises_regex,\n)\n\n\nclass TestSetOps:\n\n def test_intersect1d(self):\n # unique inputs\n a = np.array([5, 7, 1, 2])\n b = np.array([2, 4, 3, 1, 5])\n\n ec = np.array([1, 2, 5])\n c = intersect1d(a, b, assume_unique=True)\n assert_array_equal(c, ec)\n\n # non-unique inputs\n a = np.array([5, 5, 7, 1, 2])\n b = np.array([2, 1, 4, 3, 3, 1, 5])\n\n ed = np.array([1, 2, 5])\n c = intersect1d(a, b)\n assert_array_equal(c, ed)\n assert_array_equal([], intersect1d([], []))\n\n def test_intersect1d_array_like(self):\n # See gh-11772\n class Test:\n def __array__(self, dtype=None, copy=None):\n return np.arange(3)\n\n a = Test()\n res = intersect1d(a, a)\n assert_array_equal(res, a)\n res = intersect1d([1, 2, 3], [1, 2, 3])\n assert_array_equal(res, [1, 2, 3])\n\n def test_intersect1d_indices(self):\n # unique inputs\n a = np.array([1, 2, 3, 4])\n b = np.array([2, 1, 4, 6])\n c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)\n ee = np.array([1, 2, 4])\n assert_array_equal(c, ee)\n assert_array_equal(a[i1], ee)\n assert_array_equal(b[i2], ee)\n\n # non-unique inputs\n a = np.array([1, 2, 2, 3, 4, 3, 2])\n b = np.array([1, 8, 4, 2, 2, 3, 2, 3])\n c, i1, i2 = intersect1d(a, b, return_indices=True)\n ef = np.array([1, 2, 3, 4])\n assert_array_equal(c, ef)\n assert_array_equal(a[i1], ef)\n assert_array_equal(b[i2], ef)\n\n # non1d, unique inputs\n a = np.array([[2, 4, 5, 6], [7, 8, 1, 15]])\n b = np.array([[3, 2, 7, 6], [10, 12, 8, 9]])\n c, i1, i2 = intersect1d(a, b, assume_unique=True, return_indices=True)\n ui1 = np.unravel_index(i1, a.shape)\n ui2 = np.unravel_index(i2, b.shape)\n ea = np.array([2, 6, 7, 8])\n assert_array_equal(ea, a[ui1])\n assert_array_equal(ea, b[ui2])\n\n # non1d, not assumed to be uniqueinputs\n a = np.array([[2, 4, 5, 6, 6], [4, 7, 8, 7, 2]])\n b = np.array([[3, 2, 7, 7], [10, 12, 8, 7]])\n c, i1, i2 = intersect1d(a, b, return_indices=True)\n ui1 = np.unravel_index(i1, a.shape)\n ui2 = np.unravel_index(i2, b.shape)\n ea = np.array([2, 7, 8])\n assert_array_equal(ea, a[ui1])\n assert_array_equal(ea, b[ui2])\n\n def test_setxor1d(self):\n a = np.array([5, 7, 1, 2])\n b = np.array([2, 4, 3, 1, 5])\n\n ec = np.array([3, 4, 7])\n c = setxor1d(a, b)\n assert_array_equal(c, ec)\n\n a = np.array([1, 2, 3])\n b = np.array([6, 5, 4])\n\n ec = np.array([1, 2, 3, 4, 5, 6])\n c = setxor1d(a, b)\n assert_array_equal(c, ec)\n\n a = np.array([1, 8, 2, 3])\n b = np.array([6, 5, 4, 8])\n\n ec = np.array([1, 2, 3, 4, 5, 6])\n c = setxor1d(a, b)\n assert_array_equal(c, ec)\n\n assert_array_equal([], setxor1d([], []))\n\n def test_setxor1d_unique(self):\n a = np.array([1, 8, 2, 3])\n b = np.array([6, 5, 4, 8])\n\n ec = np.array([1, 2, 3, 4, 5, 6])\n c = setxor1d(a, b, assume_unique=True)\n assert_array_equal(c, ec)\n\n a = np.array([[1], [8], [2], [3]])\n b = np.array([[6, 5], [4, 8]])\n\n ec = np.array([1, 2, 3, 4, 5, 6])\n c = setxor1d(a, b, assume_unique=True)\n assert_array_equal(c, ec)\n\n def test_ediff1d(self):\n zero_elem = np.array([])\n one_elem = np.array([1])\n two_elem = np.array([1, 2])\n\n assert_array_equal([], ediff1d(zero_elem))\n assert_array_equal([0], ediff1d(zero_elem, to_begin=0))\n assert_array_equal([0], ediff1d(zero_elem, to_end=0))\n assert_array_equal([-1, 0], ediff1d(zero_elem, to_begin=-1, to_end=0))\n assert_array_equal([], ediff1d(one_elem))\n assert_array_equal([1], ediff1d(two_elem))\n assert_array_equal([7, 1, 9], ediff1d(two_elem, to_begin=7, to_end=9))\n assert_array_equal([5, 6, 1, 7, 8],\n ediff1d(two_elem, to_begin=[5, 6], to_end=[7, 8]))\n assert_array_equal([1, 9], ediff1d(two_elem, to_end=9))\n assert_array_equal([1, 7, 8], ediff1d(two_elem, to_end=[7, 8]))\n assert_array_equal([7, 1], ediff1d(two_elem, to_begin=7))\n assert_array_equal([5, 6, 1], ediff1d(two_elem, to_begin=[5, 6]))\n\n @pytest.mark.parametrize("ary, prepend, append, expected", [\n # should fail because trying to cast\n # np.nan standard floating point value\n # into an integer array:\n (np.array([1, 2, 3], dtype=np.int64),\n None,\n np.nan,\n 'to_end'),\n # should fail because attempting\n # to downcast to int type:\n (np.array([1, 2, 3], dtype=np.int64),\n np.array([5, 7, 2], dtype=np.float32),\n None,\n 'to_begin'),\n # should fail because attempting to cast\n # two special floating point values\n # to integers (on both sides of ary),\n # `to_begin` is in the error message as the impl checks this first:\n (np.array([1., 3., 9.], dtype=np.int8),\n np.nan,\n np.nan,\n 'to_begin'),\n ])\n def test_ediff1d_forbidden_type_casts(self, ary, prepend, append, expected):\n # verify resolution of gh-11490\n\n # specifically, raise an appropriate\n # Exception when attempting to append or\n # prepend with an incompatible type\n msg = f'dtype of `{expected}` must be compatible'\n with assert_raises_regex(TypeError, msg):\n ediff1d(ary=ary,\n to_end=append,\n to_begin=prepend)\n\n @pytest.mark.parametrize(\n "ary,prepend,append,expected",\n [\n (np.array([1, 2, 3], dtype=np.int16),\n 2**16, # will be cast to int16 under same kind rule.\n 2**16 + 4,\n np.array([0, 1, 1, 4], dtype=np.int16)),\n (np.array([1, 2, 3], dtype=np.float32),\n np.array([5], dtype=np.float64),\n None,\n np.array([5, 1, 1], dtype=np.float32)),\n (np.array([1, 2, 3], dtype=np.int32),\n 0,\n 0,\n np.array([0, 1, 1, 0], dtype=np.int32)),\n (np.array([1, 2, 3], dtype=np.int64),\n 3,\n -9,\n np.array([3, 1, 1, -9], dtype=np.int64)),\n ]\n )\n def test_ediff1d_scalar_handling(self,\n ary,\n prepend,\n append,\n expected):\n # maintain backwards-compatibility\n # of scalar prepend / append behavior\n # in ediff1d following fix for gh-11490\n actual = np.ediff1d(ary=ary,\n to_end=append,\n to_begin=prepend)\n assert_equal(actual, expected)\n assert actual.dtype == expected.dtype\n\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin(self, kind):\n def _isin_slow(a, b):\n b = np.asarray(b).flatten().tolist()\n return a in b\n isin_slow = np.vectorize(_isin_slow, otypes=[bool], excluded={1})\n\n def assert_isin_equal(a, b):\n x = isin(a, b, kind=kind)\n y = isin_slow(a, b)\n assert_array_equal(x, y)\n\n # multidimensional arrays in both arguments\n a = np.arange(24).reshape([2, 3, 4])\n b = np.array([[10, 20, 30], [0, 1, 3], [11, 22, 33]])\n assert_isin_equal(a, b)\n\n # array-likes as both arguments\n c = [(9, 8), (7, 6)]\n d = (9, 7)\n assert_isin_equal(c, d)\n\n # zero-d array:\n f = np.array(3)\n assert_isin_equal(f, b)\n assert_isin_equal(a, f)\n assert_isin_equal(f, f)\n\n # scalar:\n assert_isin_equal(5, b)\n assert_isin_equal(a, 6)\n assert_isin_equal(5, 6)\n\n # empty array-like:\n if kind != "table":\n # An empty list will become float64,\n # which is invalid for kind="table"\n x = []\n assert_isin_equal(x, b)\n assert_isin_equal(a, x)\n assert_isin_equal(x, x)\n\n # empty array with various types:\n for dtype in [bool, np.int64, np.float64]:\n if kind == "table" and dtype == np.float64:\n continue\n\n if dtype in {np.int64, np.float64}:\n ar = np.array([10, 20, 30], dtype=dtype)\n elif dtype in {bool}:\n ar = np.array([True, False, False])\n\n empty_array = np.array([], dtype=dtype)\n\n assert_isin_equal(empty_array, ar)\n assert_isin_equal(ar, empty_array)\n assert_isin_equal(empty_array, empty_array)\n\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_additional(self, kind):\n # we use two different sizes for the b array here to test the\n # two different paths in isin().\n for mult in (1, 10):\n # One check without np.array to make sure lists are handled correct\n a = [5, 7, 1, 2]\n b = [2, 4, 3, 1, 5] * mult\n ec = np.array([True, False, True, True])\n c = isin(a, b, assume_unique=True, kind=kind)\n assert_array_equal(c, ec)\n\n a[0] = 8\n ec = np.array([False, False, True, True])\n c = isin(a, b, assume_unique=True, kind=kind)\n assert_array_equal(c, ec)\n\n a[0], a[3] = 4, 8\n ec = np.array([True, False, True, False])\n c = isin(a, b, assume_unique=True, kind=kind)\n assert_array_equal(c, ec)\n\n a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])\n b = [2, 3, 4] * mult\n ec = [False, True, False, True, True, True, True, True, True,\n False, True, False, False, False]\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n b = b + [5, 5, 4] * mult\n ec = [True, True, True, True, True, True, True, True, True, True,\n True, False, True, True]\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n a = np.array([5, 7, 1, 2])\n b = np.array([2, 4, 3, 1, 5] * mult)\n ec = np.array([True, False, True, True])\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n a = np.array([5, 7, 1, 1, 2])\n b = np.array([2, 4, 3, 3, 1, 5] * mult)\n ec = np.array([True, False, True, True, True])\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n a = np.array([5, 5])\n b = np.array([2, 2] * mult)\n ec = np.array([False, False])\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n a = np.array([5])\n b = np.array([2])\n ec = np.array([False])\n c = isin(a, b, kind=kind)\n assert_array_equal(c, ec)\n\n if kind in {None, "sort"}:\n assert_array_equal(isin([], [], kind=kind), [])\n\n def test_isin_char_array(self):\n a = np.array(['a', 'b', 'c', 'd', 'e', 'c', 'e', 'b'])\n b = np.array(['a', 'c'])\n\n ec = np.array([True, False, True, False, False, True, False, False])\n c = isin(a, b)\n\n assert_array_equal(c, ec)\n\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_invert(self, kind):\n "Test isin's invert parameter"\n # We use two different sizes for the b array here to test the\n # two different paths in isin().\n for mult in (1, 10):\n a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5])\n b = [2, 3, 4] * mult\n assert_array_equal(np.invert(isin(a, b, kind=kind)),\n isin(a, b, invert=True, kind=kind))\n\n # float:\n if kind in {None, "sort"}:\n for mult in (1, 10):\n a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5],\n dtype=np.float32)\n b = [2, 3, 4] * mult\n b = np.array(b, dtype=np.float32)\n assert_array_equal(np.invert(isin(a, b, kind=kind)),\n isin(a, b, invert=True, kind=kind))\n\n def test_isin_hit_alternate_algorithm(self):\n """Hit the standard isin code with integers"""\n # Need extreme range to hit standard code\n # This hits it without the use of kind='table'\n a = np.array([5, 4, 5, 3, 4, 4, 1e9], dtype=np.int64)\n b = np.array([2, 3, 4, 1e9], dtype=np.int64)\n expected = np.array([0, 1, 0, 1, 1, 1, 1], dtype=bool)\n assert_array_equal(expected, isin(a, b))\n assert_array_equal(np.invert(expected), isin(a, b, invert=True))\n\n a = np.array([5, 7, 1, 2], dtype=np.int64)\n b = np.array([2, 4, 3, 1, 5, 1e9], dtype=np.int64)\n ec = np.array([True, False, True, True])\n c = isin(a, b, assume_unique=True)\n assert_array_equal(c, ec)\n\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_boolean(self, kind):\n """Test that isin works for boolean input"""\n a = np.array([True, False])\n b = np.array([False, False, False])\n expected = np.array([False, True])\n assert_array_equal(expected,\n isin(a, b, kind=kind))\n assert_array_equal(np.invert(expected),\n isin(a, b, invert=True, kind=kind))\n\n @pytest.mark.parametrize("kind", [None, "sort"])\n def test_isin_timedelta(self, kind):\n """Test that isin works for timedelta input"""\n rstate = np.random.RandomState(0)\n a = rstate.randint(0, 100, size=10)\n b = rstate.randint(0, 100, size=10)\n truth = isin(a, b)\n a_timedelta = a.astype("timedelta64[s]")\n b_timedelta = b.astype("timedelta64[s]")\n assert_array_equal(truth, isin(a_timedelta, b_timedelta, kind=kind))\n\n def test_isin_table_timedelta_fails(self):\n a = np.array([0, 1, 2], dtype="timedelta64[s]")\n b = a\n # Make sure it raises a value error:\n with pytest.raises(ValueError):\n isin(a, b, kind="table")\n\n @pytest.mark.parametrize(\n "dtype1,dtype2",\n [\n (np.int8, np.int16),\n (np.int16, np.int8),\n (np.uint8, np.uint16),\n (np.uint16, np.uint8),\n (np.uint8, np.int16),\n (np.int16, np.uint8),\n (np.uint64, np.int64),\n ]\n )\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_mixed_dtype(self, dtype1, dtype2, kind):\n """Test that isin works as expected for mixed dtype input."""\n is_dtype2_signed = np.issubdtype(dtype2, np.signedinteger)\n ar1 = np.array([0, 0, 1, 1], dtype=dtype1)\n\n if is_dtype2_signed:\n ar2 = np.array([-128, 0, 127], dtype=dtype2)\n else:\n ar2 = np.array([127, 0, 255], dtype=dtype2)\n\n expected = np.array([True, True, False, False])\n\n expect_failure = kind == "table" and (\n dtype1 == np.int16 and dtype2 == np.int8)\n\n if expect_failure:\n with pytest.raises(RuntimeError, match="exceed the maximum"):\n isin(ar1, ar2, kind=kind)\n else:\n assert_array_equal(isin(ar1, ar2, kind=kind), expected)\n\n @pytest.mark.parametrize("data", [\n np.array([2**63, 2**63 + 1], dtype=np.uint64),\n np.array([-2**62, -2**62 - 1], dtype=np.int64),\n ])\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_mixed_huge_vals(self, kind, data):\n """Test values outside intp range (negative ones if 32bit system)"""\n query = data[1]\n res = np.isin(data, query, kind=kind)\n assert_array_equal(res, [False, True])\n # Also check that nothing weird happens for values can't possibly\n # in range.\n data = data.astype(np.int32) # clearly different values\n res = np.isin(data, query, kind=kind)\n assert_array_equal(res, [False, False])\n\n @pytest.mark.parametrize("kind", [None, "sort", "table"])\n def test_isin_mixed_boolean(self, kind):\n """Test that isin works as expected for bool/int input."""\n for dtype in np.typecodes["AllInteger"]:\n a = np.array([True, False, False], dtype=bool)\n b = np.array([0, 0, 0, 0], dtype=dtype)\n expected = np.array([False, True, True], dtype=bool)\n assert_array_equal(isin(a, b, kind=kind), expected)\n\n a, b = b, a\n expected = np.array([True, True, True, True], dtype=bool)\n assert_array_equal(isin(a, b, kind=kind), expected)\n\n def test_isin_first_array_is_object(self):\n ar1 = [None]\n ar2 = np.array([1] * 10)\n expected = np.array([False])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n\n def test_isin_second_array_is_object(self):\n ar1 = 1\n ar2 = np.array([None] * 10)\n expected = np.array([False])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n\n def test_isin_both_arrays_are_object(self):\n ar1 = [None]\n ar2 = np.array([None] * 10)\n expected = np.array([True])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n\n def test_isin_both_arrays_have_structured_dtype(self):\n # Test arrays of a structured data type containing an integer field\n # and a field of dtype `object` allowing for arbitrary Python objects\n dt = np.dtype([('field1', int), ('field2', object)])\n ar1 = np.array([(1, None)], dtype=dt)\n ar2 = np.array([(1, None)] * 10, dtype=dt)\n expected = np.array([True])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n\n def test_isin_with_arrays_containing_tuples(self):\n ar1 = np.array([(1,), 2], dtype=object)\n ar2 = np.array([(1,), 2], dtype=object)\n expected = np.array([True, True])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n result = np.isin(ar1, ar2, invert=True)\n assert_array_equal(result, np.invert(expected))\n\n # An integer is added at the end of the array to make sure\n # that the array builder will create the array with tuples\n # and after it's created the integer is removed.\n # There's a bug in the array constructor that doesn't handle\n # tuples properly and adding the integer fixes that.\n ar1 = np.array([(1,), (2, 1), 1], dtype=object)\n ar1 = ar1[:-1]\n ar2 = np.array([(1,), (2, 1), 1], dtype=object)\n ar2 = ar2[:-1]\n expected = np.array([True, True])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n result = np.isin(ar1, ar2, invert=True)\n assert_array_equal(result, np.invert(expected))\n\n ar1 = np.array([(1,), (2, 3), 1], dtype=object)\n ar1 = ar1[:-1]\n ar2 = np.array([(1,), 2], dtype=object)\n expected = np.array([True, False])\n result = np.isin(ar1, ar2)\n assert_array_equal(result, expected)\n result = np.isin(ar1, ar2, invert=True)\n assert_array_equal(result, np.invert(expected))\n\n def test_isin_errors(self):\n """Test that isin raises expected errors."""\n\n # Error 1: `kind` is not one of 'sort' 'table' or None.\n ar1 = np.array([1, 2, 3, 4, 5])\n ar2 = np.array([2, 4, 6, 8, 10])\n assert_raises(ValueError, isin, ar1, ar2, kind='quicksort')\n\n # Error 2: `kind="table"` does not work for non-integral arrays.\n obj_ar1 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n obj_ar2 = np.array([1, 'a', 3, 'b', 5], dtype=object)\n assert_raises(ValueError, isin, obj_ar1, obj_ar2, kind='table')\n\n for dtype in [np.int32, np.int64]:\n ar1 = np.array([-1, 2, 3, 4, 5], dtype=dtype)\n # The range of this array will overflow:\n overflow_ar2 = np.array([-1, np.iinfo(dtype).max], dtype=dtype)\n\n # Error 3: `kind="table"` will trigger a runtime error\n # if there is an integer overflow expected when computing the\n # range of ar2\n assert_raises(\n RuntimeError,\n isin, ar1, overflow_ar2, kind='table'\n )\n\n # Non-error: `kind=None` will *not* trigger a runtime error\n # if there is an integer overflow, it will switch to\n # the `sort` algorithm.\n result = np.isin(ar1, overflow_ar2, kind=None)\n assert_array_equal(result, [True] + [False] * 4)\n result = np.isin(ar1, overflow_ar2, kind='sort')\n assert_array_equal(result, [True] + [False] * 4)\n\n def test_union1d(self):\n a = np.array([5, 4, 7, 1, 2])\n b = np.array([2, 4, 3, 3, 2, 1, 5])\n\n ec = np.array([1, 2, 3, 4, 5, 7])\n c = union1d(a, b)\n assert_array_equal(c, ec)\n\n # Tests gh-10340, arguments to union1d should be\n # flattened if they are not already 1D\n x = np.array([[0, 1, 2], [3, 4, 5]])\n y = np.array([0, 1, 2, 3, 4])\n ez = np.array([0, 1, 2, 3, 4, 5])\n z = union1d(x, y)\n assert_array_equal(z, ez)\n\n assert_array_equal([], union1d([], []))\n\n def test_setdiff1d(self):\n a = np.array([6, 5, 4, 7, 1, 2, 7, 4])\n b = np.array([2, 4, 3, 3, 2, 1, 5])\n\n ec = np.array([6, 7])\n c = setdiff1d(a, b)\n assert_array_equal(c, ec)\n\n a = np.arange(21)\n b = np.arange(19)\n ec = np.array([19, 20])\n c = setdiff1d(a, b)\n assert_array_equal(c, ec)\n\n assert_array_equal([], setdiff1d([], []))\n a = np.array((), np.uint32)\n assert_equal(setdiff1d(a, []).dtype, np.uint32)\n\n def test_setdiff1d_unique(self):\n a = np.array([3, 2, 1])\n b = np.array([7, 5, 2])\n expected = np.array([3, 1])\n actual = setdiff1d(a, b, assume_unique=True)\n assert_equal(actual, expected)\n\n def test_setdiff1d_char_array(self):\n a = np.array(['a', 'b', 'c'])\n b = np.array(['a', 'b', 's'])\n assert_array_equal(setdiff1d(a, b), np.array(['c']))\n\n def test_manyways(self):\n a = np.array([5, 7, 1, 2, 8])\n b = np.array([9, 8, 2, 4, 3, 1, 5])\n\n c1 = setxor1d(a, b)\n aux1 = intersect1d(a, b)\n aux2 = union1d(a, b)\n c2 = setdiff1d(aux2, aux1)\n assert_array_equal(c1, c2)\n\n\nclass TestUnique:\n\n def check_all(self, a, b, i1, i2, c, dt):\n base_msg = 'check {0} failed for type {1}'\n\n msg = base_msg.format('values', dt)\n v = unique(a)\n assert_array_equal(v, b, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_index', dt)\n v, j = unique(a, True, False, False)\n assert_array_equal(v, b, msg)\n assert_array_equal(j, i1, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_inverse', dt)\n v, j = unique(a, False, True, False)\n assert_array_equal(v, b, msg)\n assert_array_equal(j, i2, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_counts', dt)\n v, j = unique(a, False, False, True)\n assert_array_equal(v, b, msg)\n assert_array_equal(j, c, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_index and return_inverse', dt)\n v, j1, j2 = unique(a, True, True, False)\n assert_array_equal(v, b, msg)\n assert_array_equal(j1, i1, msg)\n assert_array_equal(j2, i2, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_index and return_counts', dt)\n v, j1, j2 = unique(a, True, False, True)\n assert_array_equal(v, b, msg)\n assert_array_equal(j1, i1, msg)\n assert_array_equal(j2, c, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format('return_inverse and return_counts', dt)\n v, j1, j2 = unique(a, False, True, True)\n assert_array_equal(v, b, msg)\n assert_array_equal(j1, i2, msg)\n assert_array_equal(j2, c, msg)\n assert type(v) == type(b)\n\n msg = base_msg.format(('return_index, return_inverse '\n 'and return_counts'), dt)\n v, j1, j2, j3 = unique(a, True, True, True)\n assert_array_equal(v, b, msg)\n assert_array_equal(j1, i1, msg)\n assert_array_equal(j2, i2, msg)\n assert_array_equal(j3, c, msg)\n assert type(v) == type(b)\n\n def get_types(self):\n types = []\n types.extend(np.typecodes['AllInteger'])\n types.extend(np.typecodes['AllFloat'])\n types.append('datetime64[D]')\n types.append('timedelta64[D]')\n return types\n\n def test_unique_1d(self):\n\n a = [5, 7, 1, 2, 1, 5, 7] * 10\n b = [1, 2, 5, 7]\n i1 = [2, 3, 0, 1]\n i2 = [2, 3, 0, 1, 0, 2, 3] * 10\n c = np.multiply([2, 1, 2, 2], 10)\n\n # test for numeric arrays\n types = self.get_types()\n for dt in types:\n aa = np.array(a, dt)\n bb = np.array(b, dt)\n self.check_all(aa, bb, i1, i2, c, dt)\n\n # test for object arrays\n dt = 'O'\n aa = np.empty(len(a), dt)\n aa[:] = a\n bb = np.empty(len(b), dt)\n bb[:] = b\n self.check_all(aa, bb, i1, i2, c, dt)\n\n # test for structured arrays\n dt = [('', 'i'), ('', 'i')]\n aa = np.array(list(zip(a, a)), dt)\n bb = np.array(list(zip(b, b)), dt)\n self.check_all(aa, bb, i1, i2, c, dt)\n\n # test for ticket #2799\n aa = [1. + 0.j, 1 - 1.j, 1]\n assert_array_equal(np.unique(aa), [1. - 1.j, 1. + 0.j])\n\n # test for ticket #4785\n a = [(1, 2), (1, 2), (2, 3)]\n unq = [1, 2, 3]\n inv = [[0, 1], [0, 1], [1, 2]]\n a1 = unique(a)\n assert_array_equal(a1, unq)\n a2, a2_inv = unique(a, return_inverse=True)\n assert_array_equal(a2, unq)\n assert_array_equal(a2_inv, inv)\n\n # test for chararrays with return_inverse (gh-5099)\n a = np.char.chararray(5)\n a[...] = ''\n a2, a2_inv = np.unique(a, return_inverse=True)\n assert_array_equal(a2_inv, np.zeros(5))\n\n # test for ticket #9137\n a = []\n a1_idx = np.unique(a, return_index=True)[1]\n a2_inv = np.unique(a, return_inverse=True)[1]\n a3_idx, a3_inv = np.unique(a, return_index=True,\n return_inverse=True)[1:]\n assert_equal(a1_idx.dtype, np.intp)\n assert_equal(a2_inv.dtype, np.intp)\n assert_equal(a3_idx.dtype, np.intp)\n assert_equal(a3_inv.dtype, np.intp)\n\n # test for ticket 2111 - float\n a = [2.0, np.nan, 1.0, np.nan]\n ua = [1.0, 2.0, np.nan]\n ua_idx = [2, 0, 1]\n ua_inv = [1, 2, 0, 2]\n ua_cnt = [1, 1, 2]\n assert_equal(np.unique(a), ua)\n assert_equal(np.unique(a, return_index=True), (ua, ua_idx))\n assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))\n assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))\n\n # test for ticket 2111 - complex\n a = [2.0 - 1j, np.nan, 1.0 + 1j, complex(0.0, np.nan), complex(1.0, np.nan)]\n ua = [1.0 + 1j, 2.0 - 1j, complex(0.0, np.nan)]\n ua_idx = [2, 0, 3]\n ua_inv = [1, 2, 0, 2, 2]\n ua_cnt = [1, 1, 3]\n assert_equal(np.unique(a), ua)\n assert_equal(np.unique(a, return_index=True), (ua, ua_idx))\n assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))\n assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))\n\n # test for ticket 2111 - datetime64\n nat = np.datetime64('nat')\n a = [np.datetime64('2020-12-26'), nat, np.datetime64('2020-12-24'), nat]\n ua = [np.datetime64('2020-12-24'), np.datetime64('2020-12-26'), nat]\n ua_idx = [2, 0, 1]\n ua_inv = [1, 2, 0, 2]\n ua_cnt = [1, 1, 2]\n assert_equal(np.unique(a), ua)\n assert_equal(np.unique(a, return_index=True), (ua, ua_idx))\n assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))\n assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))\n\n # test for ticket 2111 - timedelta\n nat = np.timedelta64('nat')\n a = [np.timedelta64(1, 'D'), nat, np.timedelta64(1, 'h'), nat]\n ua = [np.timedelta64(1, 'h'), np.timedelta64(1, 'D'), nat]\n ua_idx = [2, 0, 1]\n ua_inv = [1, 2, 0, 2]\n ua_cnt = [1, 1, 2]\n assert_equal(np.unique(a), ua)\n assert_equal(np.unique(a, return_index=True), (ua, ua_idx))\n assert_equal(np.unique(a, return_inverse=True), (ua, ua_inv))\n assert_equal(np.unique(a, return_counts=True), (ua, ua_cnt))\n\n # test for gh-19300\n all_nans = [np.nan] * 4\n ua = [np.nan]\n ua_idx = [0]\n ua_inv = [0, 0, 0, 0]\n ua_cnt = [4]\n assert_equal(np.unique(all_nans), ua)\n assert_equal(np.unique(all_nans, return_index=True), (ua, ua_idx))\n assert_equal(np.unique(all_nans, return_inverse=True), (ua, ua_inv))\n assert_equal(np.unique(all_nans, return_counts=True), (ua, ua_cnt))\n\n def test_unique_zero_sized(self):\n # test for zero-sized arrays\n for dt in self.get_types():\n a = np.array([], dt)\n b = np.array([], dt)\n i1 = np.array([], np.int64)\n i2 = np.array([], np.int64)\n c = np.array([], np.int64)\n self.check_all(a, b, i1, i2, c, dt)\n\n def test_unique_subclass(self):\n class Subclass(np.ndarray):\n pass\n\n i1 = [2, 3, 0, 1]\n i2 = [2, 3, 0, 1, 0, 2, 3] * 10\n c = np.multiply([2, 1, 2, 2], 10)\n\n # test for numeric arrays\n types = self.get_types()\n for dt in types:\n a = np.array([5, 7, 1, 2, 1, 5, 7] * 10, dtype=dt)\n b = np.array([1, 2, 5, 7], dtype=dt)\n aa = Subclass(a.shape, dtype=dt, buffer=a)\n bb = Subclass(b.shape, dtype=dt, buffer=b)\n self.check_all(aa, bb, i1, i2, c, dt)\n\n @pytest.mark.parametrize("arg", ["return_index", "return_inverse", "return_counts"])\n def test_unsupported_hash_based(self, arg):\n """These currently never use the hash-based solution. However,\n it seems easier to just allow it.\n\n When the hash-based solution is added, this test should fail and be\n replaced with something more comprehensive.\n """\n a = np.array([1, 5, 2, 3, 4, 8, 199, 1, 3, 5])\n\n res_not_sorted = np.unique([1, 1], sorted=False, **{arg: True})\n res_sorted = np.unique([1, 1], sorted=True, **{arg: True})\n # The following should fail without first sorting `res_not_sorted`.\n for arr, expected in zip(res_not_sorted, res_sorted):\n assert_array_equal(arr, expected)\n\n def test_unique_axis_errors(self):\n assert_raises(TypeError, self._run_axis_tests, object)\n assert_raises(TypeError, self._run_axis_tests,\n [('a', int), ('b', object)])\n\n assert_raises(AxisError, unique, np.arange(10), axis=2)\n assert_raises(AxisError, unique, np.arange(10), axis=-2)\n\n def test_unique_axis_list(self):\n msg = "Unique failed on list of lists"\n inp = [[0, 1, 0], [0, 1, 0]]\n inp_arr = np.asarray(inp)\n assert_array_equal(unique(inp, axis=0), unique(inp_arr, axis=0), msg)\n assert_array_equal(unique(inp, axis=1), unique(inp_arr, axis=1), msg)\n\n def test_unique_axis(self):\n types = []\n types.extend(np.typecodes['AllInteger'])\n types.extend(np.typecodes['AllFloat'])\n types.append('datetime64[D]')\n types.append('timedelta64[D]')\n types.append([('a', int), ('b', int)])\n types.append([('a', int), ('b', float)])\n\n for dtype in types:\n self._run_axis_tests(dtype)\n\n msg = 'Non-bitwise-equal booleans test failed'\n data = np.arange(10, dtype=np.uint8).reshape(-1, 2).view(bool)\n result = np.array([[False, True], [True, True]], dtype=bool)\n assert_array_equal(unique(data, axis=0), result, msg)\n\n msg = 'Negative zero equality test failed'\n data = np.array([[-0.0, 0.0], [0.0, -0.0], [-0.0, 0.0], [0.0, -0.0]])\n result = np.array([[-0.0, 0.0]])\n assert_array_equal(unique(data, axis=0), result, msg)\n\n @pytest.mark.parametrize("axis", [0, -1])\n def test_unique_1d_with_axis(self, axis):\n x = np.array([4, 3, 2, 3, 2, 1, 2, 2])\n uniq = unique(x, axis=axis)\n assert_array_equal(uniq, [1, 2, 3, 4])\n\n @pytest.mark.parametrize("axis", [None, 0, -1])\n def test_unique_inverse_with_axis(self, axis):\n x = np.array([[4, 4, 3], [2, 2, 1], [2, 2, 1], [4, 4, 3]])\n uniq, inv = unique(x, return_inverse=True, axis=axis)\n assert_equal(inv.ndim, x.ndim if axis is None else 1)\n assert_array_equal(x, np.take(uniq, inv, axis=axis))\n\n def test_unique_axis_zeros(self):\n # issue 15559\n single_zero = np.empty(shape=(2, 0), dtype=np.int8)\n uniq, idx, inv, cnt = unique(single_zero, axis=0, return_index=True,\n return_inverse=True, return_counts=True)\n\n # there's 1 element of shape (0,) along axis 0\n assert_equal(uniq.dtype, single_zero.dtype)\n assert_array_equal(uniq, np.empty(shape=(1, 0)))\n assert_array_equal(idx, np.array([0]))\n assert_array_equal(inv, np.array([0, 0]))\n assert_array_equal(cnt, np.array([2]))\n\n # there's 0 elements of shape (2,) along axis 1\n uniq, idx, inv, cnt = unique(single_zero, axis=1, return_index=True,\n return_inverse=True, return_counts=True)\n\n assert_equal(uniq.dtype, single_zero.dtype)\n assert_array_equal(uniq, np.empty(shape=(2, 0)))\n assert_array_equal(idx, np.array([]))\n assert_array_equal(inv, np.array([]))\n assert_array_equal(cnt, np.array([]))\n\n # test a "complicated" shape\n shape = (0, 2, 0, 3, 0, 4, 0)\n multiple_zeros = np.empty(shape=shape)\n for axis in range(len(shape)):\n expected_shape = list(shape)\n if shape[axis] == 0:\n expected_shape[axis] = 0\n else:\n expected_shape[axis] = 1\n\n assert_array_equal(unique(multiple_zeros, axis=axis),\n np.empty(shape=expected_shape))\n\n def test_unique_masked(self):\n # issue 8664\n x = np.array([64, 0, 1, 2, 3, 63, 63, 0, 0, 0, 1, 2, 0, 63, 0],\n dtype='uint8')\n y = np.ma.masked_equal(x, 0)\n\n v = np.unique(y)\n v2, i, c = np.unique(y, return_index=True, return_counts=True)\n\n msg = 'Unique returned different results when asked for index'\n assert_array_equal(v.data, v2.data, msg)\n assert_array_equal(v.mask, v2.mask, msg)\n\n def test_unique_sort_order_with_axis(self):\n # These tests fail if sorting along axis is done by treating subarrays\n # as unsigned byte strings. See gh-10495.\n fmt = "sort order incorrect for integer type '%s'"\n for dt in 'bhilq':\n a = np.array([[-1], [0]], dt)\n b = np.unique(a, axis=0)\n assert_array_equal(a, b, fmt % dt)\n\n def _run_axis_tests(self, dtype):\n data = np.array([[0, 1, 0, 0],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [1, 0, 0, 0]]).astype(dtype)\n\n msg = 'Unique with 1d array and axis=0 failed'\n result = np.array([0, 1])\n assert_array_equal(unique(data), result.astype(dtype), msg)\n\n msg = 'Unique with 2d array and axis=0 failed'\n result = np.array([[0, 1, 0, 0], [1, 0, 0, 0]])\n assert_array_equal(unique(data, axis=0), result.astype(dtype), msg)\n\n msg = 'Unique with 2d array and axis=1 failed'\n result = np.array([[0, 0, 1], [0, 1, 0], [0, 0, 1], [0, 1, 0]])\n assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)\n\n msg = 'Unique with 3d array and axis=2 failed'\n data3d = np.array([[[1, 1],\n [1, 0]],\n [[0, 1],\n [0, 0]]]).astype(dtype)\n result = np.take(data3d, [1, 0], axis=2)\n assert_array_equal(unique(data3d, axis=2), result, msg)\n\n uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,\n return_inverse=True, return_counts=True)\n msg = "Unique's return_index=True failed with axis=0"\n assert_array_equal(data[idx], uniq, msg)\n msg = "Unique's return_inverse=True failed with axis=0"\n assert_array_equal(np.take(uniq, inv, axis=0), data)\n msg = "Unique's return_counts=True failed with axis=0"\n assert_array_equal(cnt, np.array([2, 2]), msg)\n\n uniq, idx, inv, cnt = unique(data, axis=1, return_index=True,\n return_inverse=True, return_counts=True)\n msg = "Unique's return_index=True failed with axis=1"\n assert_array_equal(data[:, idx], uniq)\n msg = "Unique's return_inverse=True failed with axis=1"\n assert_array_equal(np.take(uniq, inv, axis=1), data)\n msg = "Unique's return_counts=True failed with axis=1"\n assert_array_equal(cnt, np.array([2, 1, 1]), msg)\n\n def test_unique_nanequals(self):\n # issue 20326\n a = np.array([1, 1, np.nan, np.nan, np.nan])\n unq = np.unique(a)\n not_unq = np.unique(a, equal_nan=False)\n assert_array_equal(unq, np.array([1, np.nan]))\n assert_array_equal(not_unq, np.array([1, np.nan, np.nan, np.nan]))\n\n def test_unique_array_api_functions(self):\n arr = np.array([np.nan, 1, 4, 1, 3, 4, np.nan, 5, 1])\n\n for res_unique_array_api, res_unique in [\n (\n np.unique_values(arr),\n np.unique(arr, equal_nan=False)\n ),\n (\n np.unique_counts(arr),\n np.unique(arr, return_counts=True, equal_nan=False)\n ),\n (\n np.unique_inverse(arr),\n np.unique(arr, return_inverse=True, equal_nan=False)\n ),\n (\n np.unique_all(arr),\n np.unique(\n arr,\n return_index=True,\n return_inverse=True,\n return_counts=True,\n equal_nan=False\n )\n )\n ]:\n assert len(res_unique_array_api) == len(res_unique)\n for actual, expected in zip(res_unique_array_api, res_unique):\n assert_array_equal(actual, expected)\n\n def test_unique_inverse_shape(self):\n # Regression test for https://github.com/numpy/numpy/issues/25552\n arr = np.array([[1, 2, 3], [2, 3, 1]])\n expected_values, expected_inverse = np.unique(arr, return_inverse=True)\n expected_inverse = expected_inverse.reshape(arr.shape)\n for func in np.unique_inverse, np.unique_all:\n result = func(arr)\n assert_array_equal(expected_values, result.values)\n assert_array_equal(expected_inverse, result.inverse_indices)\n assert_array_equal(arr, result.values[result.inverse_indices])\n\n @pytest.mark.parametrize(\n 'data',\n [[[1, 1, 1],\n [1, 1, 1]],\n [1, 3, 2],\n 1],\n )\n @pytest.mark.parametrize('transpose', [False, True])\n @pytest.mark.parametrize('dtype', [np.int32, np.float64])\n def test_unique_with_matrix(self, data, transpose, dtype):\n mat = np.matrix(data).astype(dtype)\n if transpose:\n mat = mat.T\n u = np.unique(mat)\n expected = np.unique(np.asarray(mat))\n assert_array_equal(u, expected, strict=True)\n | .venv\Lib\site-packages\numpy\lib\tests\test_arraysetops.py | test_arraysetops.py | Python | 41,519 | 0.95 | 0.10987 | 0.091703 | python-kit | 167 | 2024-01-16T10:15:12.506368 | Apache-2.0 | true | 048501a9659b6293ac2e548d7a6b3b6d |
from functools import reduce\nfrom operator import mul\n\nimport numpy as np\nfrom numpy.lib import Arrayterator\nfrom numpy.random import randint\nfrom numpy.testing import assert_\n\n\ndef test():\n np.random.seed(np.arange(10))\n\n # Create a random array\n ndims = randint(5) + 1\n shape = tuple(randint(10) + 1 for dim in range(ndims))\n els = reduce(mul, shape)\n a = np.arange(els)\n a.shape = shape\n\n buf_size = randint(2 * els)\n b = Arrayterator(a, buf_size)\n\n # Check that each block has at most ``buf_size`` elements\n for block in b:\n assert_(len(block.flat) <= (buf_size or els))\n\n # Check that all elements are iterated correctly\n assert_(list(b.flat) == list(a.flat))\n\n # Slice arrayterator\n start = [randint(dim) for dim in shape]\n stop = [randint(dim) + 1 for dim in shape]\n step = [randint(dim) + 1 for dim in shape]\n slice_ = tuple(slice(*t) for t in zip(start, stop, step))\n c = b[slice_]\n d = a[slice_]\n\n # Check that each block has at most ``buf_size`` elements\n for block in c:\n assert_(len(block.flat) <= (buf_size or els))\n\n # Check that the arrayterator is sliced correctly\n assert_(np.all(c.__array__() == d))\n\n # Check that all elements are iterated correctly\n assert_(list(c.flat) == list(d.flat))\n | .venv\Lib\site-packages\numpy\lib\tests\test_arrayterator.py | test_arrayterator.py | Python | 1,347 | 0.95 | 0.173913 | 0.2 | awesome-app | 839 | 2024-04-07T19:47:36.484572 | GPL-3.0 | true | 300e63a788c3f4058752bcdbfc39cedc |
import numpy as np\nfrom numpy.lib import array_utils\nfrom numpy.testing import assert_equal\n\n\nclass TestByteBounds:\n def test_byte_bounds(self):\n # pointer difference matches size * itemsize\n # due to contiguity\n a = np.arange(12).reshape(3, 4)\n low, high = array_utils.byte_bounds(a)\n assert_equal(high - low, a.size * a.itemsize)\n\n def test_unusual_order_positive_stride(self):\n a = np.arange(12).reshape(3, 4)\n b = a.T\n low, high = array_utils.byte_bounds(b)\n assert_equal(high - low, b.size * b.itemsize)\n\n def test_unusual_order_negative_stride(self):\n a = np.arange(12).reshape(3, 4)\n b = a.T[::-1]\n low, high = array_utils.byte_bounds(b)\n assert_equal(high - low, b.size * b.itemsize)\n\n def test_strided(self):\n a = np.arange(12)\n b = a[::2]\n low, high = array_utils.byte_bounds(b)\n # the largest pointer address is lost (even numbers only in the\n # stride), and compensate addresses for striding by 2\n assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)\n | .venv\Lib\site-packages\numpy\lib\tests\test_array_utils.py | test_array_utils.py | Python | 1,150 | 0.95 | 0.1875 | 0.148148 | node-utils | 248 | 2024-08-06T10:22:22.081342 | Apache-2.0 | true | 0f0a4c924874dd153c8799d44e35bf76 |
# doctest\nr''' Test the .npy file format.\n\nSet up:\n\n >>> import sys\n >>> from io import BytesIO\n >>> from numpy.lib import format\n >>>\n >>> scalars = [\n ... np.uint8,\n ... np.int8,\n ... np.uint16,\n ... np.int16,\n ... np.uint32,\n ... np.int32,\n ... np.uint64,\n ... np.int64,\n ... np.float32,\n ... np.float64,\n ... np.complex64,\n ... np.complex128,\n ... object,\n ... ]\n >>>\n >>> basic_arrays = []\n >>>\n >>> for scalar in scalars:\n ... for endian in '<>':\n ... dtype = np.dtype(scalar).newbyteorder(endian)\n ... basic = np.arange(15).astype(dtype)\n ... basic_arrays.extend([\n ... np.array([], dtype=dtype),\n ... np.array(10, dtype=dtype),\n ... basic,\n ... basic.reshape((3,5)),\n ... basic.reshape((3,5)).T,\n ... basic.reshape((3,5))[::-1,::2],\n ... ])\n ...\n >>>\n >>> Pdescr = [\n ... ('x', 'i4', (2,)),\n ... ('y', 'f8', (2, 2)),\n ... ('z', 'u1')]\n >>>\n >>>\n >>> PbufferT = [\n ... ([3,2], [[6.,4.],[6.,4.]], 8),\n ... ([4,3], [[7.,5.],[7.,5.]], 9),\n ... ]\n >>>\n >>>\n >>> Ndescr = [\n ... ('x', 'i4', (2,)),\n ... ('Info', [\n ... ('value', 'c16'),\n ... ('y2', 'f8'),\n ... ('Info2', [\n ... ('name', 'S2'),\n ... ('value', 'c16', (2,)),\n ... ('y3', 'f8', (2,)),\n ... ('z3', 'u4', (2,))]),\n ... ('name', 'S2'),\n ... ('z2', 'b1')]),\n ... ('color', 'S2'),\n ... ('info', [\n ... ('Name', 'U8'),\n ... ('Value', 'c16')]),\n ... ('y', 'f8', (2, 2)),\n ... ('z', 'u1')]\n >>>\n >>>\n >>> NbufferT = [\n ... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),\n ... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),\n ... ]\n >>>\n >>>\n >>> record_arrays = [\n ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),\n ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),\n ... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),\n ... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),\n ... ]\n\nTest the magic string writing.\n\n >>> format.magic(1, 0)\n '\x93NUMPY\x01\x00'\n >>> format.magic(0, 0)\n '\x93NUMPY\x00\x00'\n >>> format.magic(255, 255)\n '\x93NUMPY\xff\xff'\n >>> format.magic(2, 5)\n '\x93NUMPY\x02\x05'\n\nTest the magic string reading.\n\n >>> format.read_magic(BytesIO(format.magic(1, 0)))\n (1, 0)\n >>> format.read_magic(BytesIO(format.magic(0, 0)))\n (0, 0)\n >>> format.read_magic(BytesIO(format.magic(255, 255)))\n (255, 255)\n >>> format.read_magic(BytesIO(format.magic(2, 5)))\n (2, 5)\n\nTest the header writing.\n\n >>> for arr in basic_arrays + record_arrays:\n ... f = BytesIO()\n ... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it\n ... print(repr(f.getvalue()))\n ...\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"\n "F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"\n "F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"\n "v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"\n "\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"\n "v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"\n "\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"\n'''\nimport os\nimport sys\nimport warnings\nfrom io import BytesIO\n\nimport pytest\n\nimport numpy as np\nfrom numpy.lib import format\nfrom numpy.testing import (\n IS_64BIT,\n IS_PYPY,\n IS_WASM,\n assert_,\n assert_array_equal,\n assert_raises,\n assert_raises_regex,\n assert_warns,\n)\nfrom numpy.testing._private.utils import requires_memory\n\n# Generate some basic arrays to test with.\nscalars = [\n np.uint8,\n np.int8,\n np.uint16,\n np.int16,\n np.uint32,\n np.int32,\n np.uint64,\n np.int64,\n np.float32,\n np.float64,\n np.complex64,\n np.complex128,\n object,\n]\nbasic_arrays = []\nfor scalar in scalars:\n for endian in '<>':\n dtype = np.dtype(scalar).newbyteorder(endian)\n basic = np.arange(1500).astype(dtype)\n basic_arrays.extend([\n # Empty\n np.array([], dtype=dtype),\n # Rank-0\n np.array(10, dtype=dtype),\n # 1-D\n basic,\n # 2-D C-contiguous\n basic.reshape((30, 50)),\n # 2-D F-contiguous\n basic.reshape((30, 50)).T,\n # 2-D non-contiguous\n basic.reshape((30, 50))[::-1, ::2],\n ])\n\n# More complicated record arrays.\n# This is the structure of the table used for plain objects:\n#\n# +-+-+-+\n# |x|y|z|\n# +-+-+-+\n\n# Structure of a plain array description:\nPdescr = [\n ('x', 'i4', (2,)),\n ('y', 'f8', (2, 2)),\n ('z', 'u1')]\n\n# A plain list of tuples with values for testing:\nPbufferT = [\n # x y z\n ([3, 2], [[6., 4.], [6., 4.]], 8),\n ([4, 3], [[7., 5.], [7., 5.]], 9),\n ]\n\n\n# This is the structure of the table used for nested objects (DON'T PANIC!):\n#\n# +-+---------------------------------+-----+----------+-+-+\n# |x|Info |color|info |y|z|\n# | +-----+--+----------------+----+--+ +----+-----+ | |\n# | |value|y2|Info2 |name|z2| |Name|Value| | |\n# | | | +----+-----+--+--+ | | | | | | |\n# | | | |name|value|y3|z3| | | | | | | |\n# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+\n#\n\n# The corresponding nested array description:\nNdescr = [\n ('x', 'i4', (2,)),\n ('Info', [\n ('value', 'c16'),\n ('y2', 'f8'),\n ('Info2', [\n ('name', 'S2'),\n ('value', 'c16', (2,)),\n ('y3', 'f8', (2,)),\n ('z3', 'u4', (2,))]),\n ('name', 'S2'),\n ('z2', 'b1')]),\n ('color', 'S2'),\n ('info', [\n ('Name', 'U8'),\n ('Value', 'c16')]),\n ('y', 'f8', (2, 2)),\n ('z', 'u1')]\n\nNbufferT = [\n # x Info color info y z\n # value y2 Info2 name z2 Name Value\n # name value y3 z3\n ([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),\n 'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),\n ([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),\n 'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),\n ]\n\nrecord_arrays = [\n np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),\n np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),\n np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),\n np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),\n np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])\n]\n\n\n# BytesIO that reads a random number of bytes at a time\nclass BytesIOSRandomSize(BytesIO):\n def read(self, size=None):\n import random\n size = random.randint(1, size)\n return super().read(size)\n\n\ndef roundtrip(arr):\n f = BytesIO()\n format.write_array(f, arr)\n f2 = BytesIO(f.getvalue())\n arr2 = format.read_array(f2, allow_pickle=True)\n return arr2\n\n\ndef roundtrip_randsize(arr):\n f = BytesIO()\n format.write_array(f, arr)\n f2 = BytesIOSRandomSize(f.getvalue())\n arr2 = format.read_array(f2)\n return arr2\n\n\ndef roundtrip_truncated(arr):\n f = BytesIO()\n format.write_array(f, arr)\n # BytesIO is one byte short\n f2 = BytesIO(f.getvalue()[0:-1])\n arr2 = format.read_array(f2)\n return arr2\n\ndef assert_equal_(o1, o2):\n assert_(o1 == o2)\n\n\ndef test_roundtrip():\n for arr in basic_arrays + record_arrays:\n arr2 = roundtrip(arr)\n assert_array_equal(arr, arr2)\n\n\ndef test_roundtrip_randsize():\n for arr in basic_arrays + record_arrays:\n if arr.dtype != object:\n arr2 = roundtrip_randsize(arr)\n assert_array_equal(arr, arr2)\n\n\ndef test_roundtrip_truncated():\n for arr in basic_arrays:\n if arr.dtype != object:\n assert_raises(ValueError, roundtrip_truncated, arr)\n\ndef test_file_truncated(tmp_path):\n path = tmp_path / "a.npy"\n for arr in basic_arrays:\n if arr.dtype != object:\n with open(path, 'wb') as f:\n format.write_array(f, arr)\n # truncate the file by one byte\n with open(path, 'rb+') as f:\n f.seek(-1, os.SEEK_END)\n f.truncate()\n with open(path, 'rb') as f:\n with pytest.raises(\n ValueError,\n match=(\n r"EOF: reading array header, "\n r"expected (\d+) bytes got (\d+)"\n ) if arr.size == 0 else (\n r"Failed to read all data for array\. "\n r"Expected \(.*?\) = (\d+) elements, "\n r"could only read (\d+) elements\. "\n r"\(file seems not fully written\?\)"\n )\n ):\n _ = format.read_array(f)\n\ndef test_long_str():\n # check items larger than internal buffer size, gh-4027\n long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))\n long_str_arr2 = roundtrip(long_str_arr)\n assert_array_equal(long_str_arr, long_str_arr2)\n\n\n@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")\n@pytest.mark.slow\ndef test_memmap_roundtrip(tmpdir):\n for i, arr in enumerate(basic_arrays + record_arrays):\n if arr.dtype.hasobject:\n # Skip these since they can't be mmap'ed.\n continue\n # Write it out normally and through mmap.\n nfn = os.path.join(tmpdir, f'normal{i}.npy')\n mfn = os.path.join(tmpdir, f'memmap{i}.npy')\n with open(nfn, 'wb') as fp:\n format.write_array(fp, arr)\n\n fortran_order = (\n arr.flags.f_contiguous and not arr.flags.c_contiguous)\n ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,\n shape=arr.shape, fortran_order=fortran_order)\n ma[...] = arr\n ma.flush()\n\n # Check that both of these files' contents are the same.\n with open(nfn, 'rb') as fp:\n normal_bytes = fp.read()\n with open(mfn, 'rb') as fp:\n memmap_bytes = fp.read()\n assert_equal_(normal_bytes, memmap_bytes)\n\n # Check that reading the file using memmap works.\n ma = format.open_memmap(nfn, mode='r')\n ma.flush()\n\n\ndef test_compressed_roundtrip(tmpdir):\n arr = np.random.rand(200, 200)\n npz_file = os.path.join(tmpdir, 'compressed.npz')\n np.savez_compressed(npz_file, arr=arr)\n with np.load(npz_file) as npz:\n arr1 = npz['arr']\n assert_array_equal(arr, arr1)\n\n\n# aligned\ndt1 = np.dtype('i1, i4, i1', align=True)\n# non-aligned, explicit offsets\ndt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],\n 'offsets': [1, 6]})\n# nested struct-in-struct\ndt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})\n# field with '' name\ndt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4'] * 3})\n# titles\ndt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],\n 'offsets': [1, 6], 'titles': ['aa', 'bb']})\n# empty\ndt6 = np.dtype({'names': [], 'formats': [], 'itemsize': 8})\n\n@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5, dt6])\ndef test_load_padded_dtype(tmpdir, dt):\n arr = np.zeros(3, dt)\n for i in range(3):\n arr[i] = i + 5\n npz_file = os.path.join(tmpdir, 'aligned.npz')\n np.savez(npz_file, arr=arr)\n with np.load(npz_file) as npz:\n arr1 = npz['arr']\n assert_array_equal(arr, arr1)\n\n\n@pytest.mark.skipif(sys.version_info >= (3, 12), reason="see gh-23988")\n@pytest.mark.xfail(IS_WASM, reason="Emscripten NODEFS has a buggy dup")\ndef test_python2_python3_interoperability():\n fname = 'win64python2.npy'\n path = os.path.join(os.path.dirname(__file__), 'data', fname)\n with pytest.warns(UserWarning, match="Reading.*this warning\\."):\n data = np.load(path)\n assert_array_equal(data, np.ones(2))\n\n\ndef test_pickle_python2_python3():\n # Test that loading object arrays saved on Python 2 works both on\n # Python 2 and Python 3 and vice versa\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n\n expected = np.array([None, range, '\u512a\u826f',\n b'\xe4\xb8\x8d\xe8\x89\xaf'],\n dtype=object)\n\n for fname in ['py2-np0-objarr.npy', 'py2-objarr.npy', 'py2-objarr.npz',\n 'py3-objarr.npy', 'py3-objarr.npz']:\n path = os.path.join(data_dir, fname)\n\n for encoding in ['bytes', 'latin1']:\n data_f = np.load(path, allow_pickle=True, encoding=encoding)\n if fname.endswith('.npz'):\n data = data_f['x']\n data_f.close()\n else:\n data = data_f\n\n if encoding == 'latin1' and fname.startswith('py2'):\n assert_(isinstance(data[3], str))\n assert_array_equal(data[:-1], expected[:-1])\n # mojibake occurs\n assert_array_equal(data[-1].encode(encoding), expected[-1])\n else:\n assert_(isinstance(data[3], bytes))\n assert_array_equal(data, expected)\n\n if fname.startswith('py2'):\n if fname.endswith('.npz'):\n data = np.load(path, allow_pickle=True)\n assert_raises(UnicodeError, data.__getitem__, 'x')\n data.close()\n data = np.load(path, allow_pickle=True, fix_imports=False,\n encoding='latin1')\n assert_raises(ImportError, data.__getitem__, 'x')\n data.close()\n else:\n assert_raises(UnicodeError, np.load, path,\n allow_pickle=True)\n assert_raises(ImportError, np.load, path,\n allow_pickle=True, fix_imports=False,\n encoding='latin1')\n\n\ndef test_pickle_disallow(tmpdir):\n data_dir = os.path.join(os.path.dirname(__file__), 'data')\n\n path = os.path.join(data_dir, 'py2-objarr.npy')\n assert_raises(ValueError, np.load, path,\n allow_pickle=False, encoding='latin1')\n\n path = os.path.join(data_dir, 'py2-objarr.npz')\n with np.load(path, allow_pickle=False, encoding='latin1') as f:\n assert_raises(ValueError, f.__getitem__, 'x')\n\n path = os.path.join(tmpdir, 'pickle-disabled.npy')\n assert_raises(ValueError, np.save, path, np.array([None], dtype=object),\n allow_pickle=False)\n\n@pytest.mark.parametrize('dt', [\n np.dtype(np.dtype([('a', np.int8),\n ('b', np.int16),\n ('c', np.int32),\n ], align=True),\n (3,)),\n np.dtype([('x', np.dtype({'names': ['a', 'b'],\n 'formats': ['i1', 'i1'],\n 'offsets': [0, 4],\n 'itemsize': 8,\n },\n (3,)),\n (4,),\n )]),\n np.dtype([('x',\n ('<f8', (5,)),\n (2,),\n )]),\n np.dtype([('x', np.dtype((\n np.dtype((\n np.dtype({'names': ['a', 'b'],\n 'formats': ['i1', 'i1'],\n 'offsets': [0, 4],\n 'itemsize': 8}),\n (3,)\n )),\n (4,)\n )))\n ]),\n np.dtype([\n ('a', np.dtype((\n np.dtype((\n np.dtype((\n np.dtype([\n ('a', int),\n ('b', np.dtype({'names': ['a', 'b'],\n 'formats': ['i1', 'i1'],\n 'offsets': [0, 4],\n 'itemsize': 8})),\n ]),\n (3,),\n )),\n (4,),\n )),\n (5,),\n )))\n ]),\n ])\ndef test_descr_to_dtype(dt):\n dt1 = format.descr_to_dtype(dt.descr)\n assert_equal_(dt1, dt)\n arr1 = np.zeros(3, dt)\n arr2 = roundtrip(arr1)\n assert_array_equal(arr1, arr2)\n\ndef test_version_2_0():\n f = BytesIO()\n # requires more than 2 byte for header\n dt = [(("%d" % i) * 100, float) for i in range(500)]\n d = np.ones(1000, dtype=dt)\n\n format.write_array(f, d, version=(2, 0))\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', UserWarning)\n format.write_array(f, d)\n assert_(w[0].category is UserWarning)\n\n # check alignment of data portion\n f.seek(0)\n header = f.readline()\n assert_(len(header) % format.ARRAY_ALIGN == 0)\n\n f.seek(0)\n n = format.read_array(f, max_header_size=200000)\n assert_array_equal(d, n)\n\n # 1.0 requested but data cannot be saved this way\n assert_raises(ValueError, format.write_array, f, d, (1, 0))\n\n\n@pytest.mark.skipif(IS_WASM, reason="memmap doesn't work correctly")\ndef test_version_2_0_memmap(tmpdir):\n # requires more than 2 byte for header\n dt = [(("%d" % i) * 100, float) for i in range(500)]\n d = np.ones(1000, dtype=dt)\n tf1 = os.path.join(tmpdir, 'version2_01.npy')\n tf2 = os.path.join(tmpdir, 'version2_02.npy')\n\n # 1.0 requested but data cannot be saved this way\n assert_raises(ValueError, format.open_memmap, tf1, mode='w+', dtype=d.dtype,\n shape=d.shape, version=(1, 0))\n\n ma = format.open_memmap(tf1, mode='w+', dtype=d.dtype,\n shape=d.shape, version=(2, 0))\n ma[...] = d\n ma.flush()\n ma = format.open_memmap(tf1, mode='r', max_header_size=200000)\n assert_array_equal(ma, d)\n\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', UserWarning)\n ma = format.open_memmap(tf2, mode='w+', dtype=d.dtype,\n shape=d.shape, version=None)\n assert_(w[0].category is UserWarning)\n ma[...] = d\n ma.flush()\n\n ma = format.open_memmap(tf2, mode='r', max_header_size=200000)\n\n assert_array_equal(ma, d)\n\n@pytest.mark.parametrize("mmap_mode", ["r", None])\ndef test_huge_header(tmpdir, mmap_mode):\n f = os.path.join(tmpdir, 'large_header.npy')\n arr = np.array(1, dtype="i," * 10000 + "i")\n\n with pytest.warns(UserWarning, match=".*format 2.0"):\n np.save(f, arr)\n\n with pytest.raises(ValueError, match="Header.*large"):\n np.load(f, mmap_mode=mmap_mode)\n\n with pytest.raises(ValueError, match="Header.*large"):\n np.load(f, mmap_mode=mmap_mode, max_header_size=20000)\n\n res = np.load(f, mmap_mode=mmap_mode, allow_pickle=True)\n assert_array_equal(res, arr)\n\n res = np.load(f, mmap_mode=mmap_mode, max_header_size=180000)\n assert_array_equal(res, arr)\n\ndef test_huge_header_npz(tmpdir):\n f = os.path.join(tmpdir, 'large_header.npz')\n arr = np.array(1, dtype="i," * 10000 + "i")\n\n with pytest.warns(UserWarning, match=".*format 2.0"):\n np.savez(f, arr=arr)\n\n # Only getting the array from the file actually reads it\n with pytest.raises(ValueError, match="Header.*large"):\n np.load(f)["arr"]\n\n with pytest.raises(ValueError, match="Header.*large"):\n np.load(f, max_header_size=20000)["arr"]\n\n res = np.load(f, allow_pickle=True)["arr"]\n assert_array_equal(res, arr)\n\n res = np.load(f, max_header_size=180000)["arr"]\n assert_array_equal(res, arr)\n\ndef test_write_version():\n f = BytesIO()\n arr = np.arange(1)\n # These should pass.\n format.write_array(f, arr, version=(1, 0))\n format.write_array(f, arr)\n\n format.write_array(f, arr, version=None)\n format.write_array(f, arr)\n\n format.write_array(f, arr, version=(2, 0))\n format.write_array(f, arr)\n\n # These should all fail.\n bad_versions = [\n (1, 1),\n (0, 0),\n (0, 1),\n (2, 2),\n (255, 255),\n ]\n for version in bad_versions:\n with assert_raises_regex(ValueError,\n 'we only support format version.*'):\n format.write_array(f, arr, version=version)\n\n\nbad_version_magic = [\n b'\x93NUMPY\x01\x01',\n b'\x93NUMPY\x00\x00',\n b'\x93NUMPY\x00\x01',\n b'\x93NUMPY\x02\x00',\n b'\x93NUMPY\x02\x02',\n b'\x93NUMPY\xff\xff',\n]\nmalformed_magic = [\n b'\x92NUMPY\x01\x00',\n b'\x00NUMPY\x01\x00',\n b'\x93numpy\x01\x00',\n b'\x93MATLB\x01\x00',\n b'\x93NUMPY\x01',\n b'\x93NUMPY',\n b'',\n]\n\ndef test_read_magic():\n s1 = BytesIO()\n s2 = BytesIO()\n\n arr = np.ones((3, 6), dtype=float)\n\n format.write_array(s1, arr, version=(1, 0))\n format.write_array(s2, arr, version=(2, 0))\n\n s1.seek(0)\n s2.seek(0)\n\n version1 = format.read_magic(s1)\n version2 = format.read_magic(s2)\n\n assert_(version1 == (1, 0))\n assert_(version2 == (2, 0))\n\n assert_(s1.tell() == format.MAGIC_LEN)\n assert_(s2.tell() == format.MAGIC_LEN)\n\ndef test_read_magic_bad_magic():\n for magic in malformed_magic:\n f = BytesIO(magic)\n assert_raises(ValueError, format.read_array, f)\n\n\ndef test_read_version_1_0_bad_magic():\n for magic in bad_version_magic + malformed_magic:\n f = BytesIO(magic)\n assert_raises(ValueError, format.read_array, f)\n\n\ndef test_bad_magic_args():\n assert_raises(ValueError, format.magic, -1, 1)\n assert_raises(ValueError, format.magic, 256, 1)\n assert_raises(ValueError, format.magic, 1, -1)\n assert_raises(ValueError, format.magic, 1, 256)\n\n\ndef test_large_header():\n s = BytesIO()\n d = {'shape': (), 'fortran_order': False, 'descr': '<i8'}\n format.write_array_header_1_0(s, d)\n\n s = BytesIO()\n d['descr'] = [('x' * 256 * 256, '<i8')]\n assert_raises(ValueError, format.write_array_header_1_0, s, d)\n\n\ndef test_read_array_header_1_0():\n s = BytesIO()\n\n arr = np.ones((3, 6), dtype=float)\n format.write_array(s, arr, version=(1, 0))\n\n s.seek(format.MAGIC_LEN)\n shape, fortran, dtype = format.read_array_header_1_0(s)\n\n assert_(s.tell() % format.ARRAY_ALIGN == 0)\n assert_((shape, fortran, dtype) == ((3, 6), False, float))\n\n\ndef test_read_array_header_2_0():\n s = BytesIO()\n\n arr = np.ones((3, 6), dtype=float)\n format.write_array(s, arr, version=(2, 0))\n\n s.seek(format.MAGIC_LEN)\n shape, fortran, dtype = format.read_array_header_2_0(s)\n\n assert_(s.tell() % format.ARRAY_ALIGN == 0)\n assert_((shape, fortran, dtype) == ((3, 6), False, float))\n\n\ndef test_bad_header():\n # header of length less than 2 should fail\n s = BytesIO()\n assert_raises(ValueError, format.read_array_header_1_0, s)\n s = BytesIO(b'1')\n assert_raises(ValueError, format.read_array_header_1_0, s)\n\n # header shorter than indicated size should fail\n s = BytesIO(b'\x01\x00')\n assert_raises(ValueError, format.read_array_header_1_0, s)\n\n # headers without the exact keys required should fail\n # d = {"shape": (1, 2),\n # "descr": "x"}\n s = BytesIO(\n b"\x93NUMPY\x01\x006\x00{'descr': 'x', 'shape': (1, 2), }"\n b" \n"\n )\n assert_raises(ValueError, format.read_array_header_1_0, s)\n\n d = {"shape": (1, 2),\n "fortran_order": False,\n "descr": "x",\n "extrakey": -1}\n s = BytesIO()\n format.write_array_header_1_0(s, d)\n assert_raises(ValueError, format.read_array_header_1_0, s)\n\n\ndef test_large_file_support(tmpdir):\n if (sys.platform == 'win32' or sys.platform == 'cygwin'):\n pytest.skip("Unknown if Windows has sparse filesystems")\n # try creating a large sparse file\n tf_name = os.path.join(tmpdir, 'sparse_file')\n try:\n # seek past end would work too, but linux truncate somewhat\n # increases the chances that we have a sparse filesystem and can\n # avoid actually writing 5GB\n import subprocess as sp\n sp.check_call(["truncate", "-s", "5368709120", tf_name])\n except Exception:\n pytest.skip("Could not create 5GB large file")\n # write a small array to the end\n with open(tf_name, "wb") as f:\n f.seek(5368709120)\n d = np.arange(5)\n np.save(f, d)\n # read it back\n with open(tf_name, "rb") as f:\n f.seek(5368709120)\n r = np.load(f)\n assert_array_equal(r, d)\n\n\n@pytest.mark.skipif(IS_PYPY, reason="flaky on PyPy")\n@pytest.mark.skipif(not IS_64BIT, reason="test requires 64-bit system")\n@pytest.mark.slow\n@requires_memory(free_bytes=2 * 2**30)\ndef test_large_archive(tmpdir):\n # Regression test for product of saving arrays with dimensions of array\n # having a product that doesn't fit in int32. See gh-7598 for details.\n shape = (2**30, 2)\n try:\n a = np.empty(shape, dtype=np.uint8)\n except MemoryError:\n pytest.skip("Could not create large file")\n\n fname = os.path.join(tmpdir, "large_archive")\n\n with open(fname, "wb") as f:\n np.savez(f, arr=a)\n\n del a\n\n with open(fname, "rb") as f:\n new_a = np.load(f)["arr"]\n\n assert new_a.shape == shape\n\n\ndef test_empty_npz(tmpdir):\n # Test for gh-9989\n fname = os.path.join(tmpdir, "nothing.npz")\n np.savez(fname)\n with np.load(fname) as nps:\n pass\n\n\ndef test_unicode_field_names(tmpdir):\n # gh-7391\n arr = np.array([\n (1, 3),\n (1, 2),\n (1, 3),\n (1, 2)\n ], dtype=[\n ('int', int),\n ('\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)\n ])\n fname = os.path.join(tmpdir, "unicode.npy")\n with open(fname, 'wb') as f:\n format.write_array(f, arr, version=(3, 0))\n with open(fname, 'rb') as f:\n arr2 = format.read_array(f)\n assert_array_equal(arr, arr2)\n\n # notifies the user that 3.0 is selected\n with open(fname, 'wb') as f:\n with assert_warns(UserWarning):\n format.write_array(f, arr, version=None)\n\ndef test_header_growth_axis():\n for is_fortran_array, dtype_space, expected_header_length in [\n [False, 22, 128], [False, 23, 192], [True, 23, 128], [True, 24, 192]\n ]:\n for size in [10**i for i in range(format.GROWTH_AXIS_MAX_DIGITS)]:\n fp = BytesIO()\n format.write_array_header_1_0(fp, {\n 'shape': (2, size) if is_fortran_array else (size, 2),\n 'fortran_order': is_fortran_array,\n 'descr': np.dtype([(' ' * dtype_space, int)])\n })\n\n assert len(fp.getvalue()) == expected_header_length\n\n@pytest.mark.parametrize('dt', [\n np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',\n metadata={'some': 'stuff'})]}),\n np.dtype(int, metadata={'some': 'stuff'}),\n np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}),\n # recursive: metadata on the field of a dtype\n np.dtype({'names': ['a', 'b'], 'formats': [\n float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})\n ]}),\n ])\n@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),\n reason="PyPy bug in error formatting")\ndef test_metadata_dtype(dt):\n # gh-14142\n arr = np.ones(10, dtype=dt)\n buf = BytesIO()\n with assert_warns(UserWarning):\n np.save(buf, arr)\n buf.seek(0)\n\n # Loading should work (metadata was stripped):\n arr2 = np.load(buf)\n # BUG: assert_array_equal does not check metadata\n from numpy.lib._utils_impl import drop_metadata\n assert_array_equal(arr, arr2)\n assert drop_metadata(arr.dtype) is not arr.dtype\n assert drop_metadata(arr2.dtype) is arr2.dtype\n | .venv\Lib\site-packages\numpy\lib\tests\test_format.py | test_format.py | Python | 43,010 | 0.95 | 0.077799 | 0.08306 | vue-tools | 850 | 2023-12-16T04:32:22.083783 | MIT | true | a530290e6c9811de9b71745e29c998f9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.