Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- janus/lib/python3.10/site-packages/numpy/__pycache__/__config__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/conftest.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/__init__.py +180 -0
- janus/lib/python3.10/site-packages/numpy/_core/__init__.pyi +2 -0
- janus/lib/python3.10/site-packages/numpy/_core/_add_newdocs.py +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/_asarray.pyi +41 -0
- janus/lib/python3.10/site-packages/numpy/_core/_dtype.py +374 -0
- janus/lib/python3.10/site-packages/numpy/_core/_internal.pyi +30 -0
- janus/lib/python3.10/site-packages/numpy/_core/_methods.py +256 -0
- janus/lib/python3.10/site-packages/numpy/_core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/_struct_ufunc_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/_type_aliases.pyi +96 -0
- janus/lib/python3.10/site-packages/numpy/_core/_ufunc_config.py +483 -0
- janus/lib/python3.10/site-packages/numpy/_core/_umath_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/arrayprint.pyi +137 -0
- janus/lib/python3.10/site-packages/numpy/_core/cversions.py +13 -0
- janus/lib/python3.10/site-packages/numpy/_core/defchararray.py +1414 -0
- janus/lib/python3.10/site-packages/numpy/_core/defchararray.pyi +1096 -0
- janus/lib/python3.10/site-packages/numpy/_core/einsumfunc.py +1499 -0
- janus/lib/python3.10/site-packages/numpy/_core/einsumfunc.pyi +184 -0
- janus/lib/python3.10/site-packages/numpy/_core/fromnumeric.py +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/fromnumeric.pyi +1512 -0
- janus/lib/python3.10/site-packages/numpy/_core/function_base.pyi +202 -0
- janus/lib/python3.10/site-packages/numpy/_core/getlimits.py +747 -0
- janus/lib/python3.10/site-packages/numpy/_core/getlimits.pyi +3 -0
- janus/lib/python3.10/site-packages/numpy/_core/memmap.pyi +3 -0
- janus/lib/python3.10/site-packages/numpy/_core/multiarray.pyi +1348 -0
- janus/lib/python3.10/site-packages/numpy/_core/numeric.pyi +886 -0
- janus/lib/python3.10/site-packages/numpy/_core/numerictypes.py +629 -0
- janus/lib/python3.10/site-packages/numpy/_core/numerictypes.pyi +217 -0
- janus/lib/python3.10/site-packages/numpy/_core/printoptions.py +32 -0
- janus/lib/python3.10/site-packages/numpy/_core/records.pyi +347 -0
- janus/lib/python3.10/site-packages/numpy/_core/shape_base.py +1004 -0
- janus/lib/python3.10/site-packages/numpy/_core/shape_base.pyi +147 -0
- janus/lib/python3.10/site-packages/numpy/_core/strings.pyi +478 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/_locales.py +72 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test__exceptions.py +89 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_api.py +616 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_array_api_info.py +112 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_arrayprint.py +1281 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py +154 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_conversion_utils.py +209 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_custom_dtypes.py +311 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_datetime.py +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_dlpack.py +178 -0
- janus/lib/python3.10/site-packages/numpy/_core/tests/test_errstate.py +129 -0
janus/lib/python3.10/site-packages/numpy/__pycache__/__config__.cpython-310.pyc
ADDED
|
Binary file (4.07 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (22.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/_distributor_init.cpython-310.pyc
ADDED
|
Binary file (601 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-310.pyc
ADDED
|
Binary file (3.76 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (6.26 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/exceptions.cpython-310.pyc
ADDED
|
Binary file (8.21 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/__init__.py
ADDED
|
@@ -0,0 +1,180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
|
| 3 |
+
|
| 4 |
+
Please note that this module is private. All functions and objects
|
| 5 |
+
are available in the main ``numpy`` namespace - use that instead.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
|
| 11 |
+
from numpy.version import version as __version__
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
# disables OpenBLAS affinity setting of the main thread that limits
|
| 15 |
+
# python threads or processes to one core
|
| 16 |
+
env_added = []
|
| 17 |
+
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
|
| 18 |
+
if envkey not in os.environ:
|
| 19 |
+
os.environ[envkey] = '1'
|
| 20 |
+
env_added.append(envkey)
|
| 21 |
+
|
| 22 |
+
try:
|
| 23 |
+
from . import multiarray
|
| 24 |
+
except ImportError as exc:
|
| 25 |
+
import sys
|
| 26 |
+
msg = """
|
| 27 |
+
|
| 28 |
+
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
|
| 29 |
+
|
| 30 |
+
Importing the numpy C-extensions failed. This error can happen for
|
| 31 |
+
many reasons, often due to issues with your setup or how NumPy was
|
| 32 |
+
installed.
|
| 33 |
+
|
| 34 |
+
We have compiled some common reasons and troubleshooting tips at:
|
| 35 |
+
|
| 36 |
+
https://numpy.org/devdocs/user/troubleshooting-importerror.html
|
| 37 |
+
|
| 38 |
+
Please note and check the following:
|
| 39 |
+
|
| 40 |
+
* The Python version is: Python%d.%d from "%s"
|
| 41 |
+
* The NumPy version is: "%s"
|
| 42 |
+
|
| 43 |
+
and make sure that they are the versions you expect.
|
| 44 |
+
Please carefully study the documentation linked above for further help.
|
| 45 |
+
|
| 46 |
+
Original error was: %s
|
| 47 |
+
""" % (sys.version_info[0], sys.version_info[1], sys.executable,
|
| 48 |
+
__version__, exc)
|
| 49 |
+
raise ImportError(msg)
|
| 50 |
+
finally:
|
| 51 |
+
for envkey in env_added:
|
| 52 |
+
del os.environ[envkey]
|
| 53 |
+
del envkey
|
| 54 |
+
del env_added
|
| 55 |
+
del os
|
| 56 |
+
|
| 57 |
+
from . import umath
|
| 58 |
+
|
| 59 |
+
# Check that multiarray,umath are pure python modules wrapping
|
| 60 |
+
# _multiarray_umath and not either of the old c-extension modules
|
| 61 |
+
if not (hasattr(multiarray, '_multiarray_umath') and
|
| 62 |
+
hasattr(umath, '_multiarray_umath')):
|
| 63 |
+
import sys
|
| 64 |
+
path = sys.modules['numpy'].__path__
|
| 65 |
+
msg = ("Something is wrong with the numpy installation. "
|
| 66 |
+
"While importing we detected an older version of "
|
| 67 |
+
"numpy in {}. One method of fixing this is to repeatedly uninstall "
|
| 68 |
+
"numpy until none is found, then reinstall this version.")
|
| 69 |
+
raise ImportError(msg.format(path))
|
| 70 |
+
|
| 71 |
+
from . import numerictypes as nt
|
| 72 |
+
from .numerictypes import sctypes, sctypeDict
|
| 73 |
+
multiarray.set_typeDict(nt.sctypeDict)
|
| 74 |
+
from . import numeric
|
| 75 |
+
from .numeric import *
|
| 76 |
+
from . import fromnumeric
|
| 77 |
+
from .fromnumeric import *
|
| 78 |
+
from .records import record, recarray
|
| 79 |
+
# Note: module name memmap is overwritten by a class with same name
|
| 80 |
+
from .memmap import *
|
| 81 |
+
from . import function_base
|
| 82 |
+
from .function_base import *
|
| 83 |
+
from . import _machar
|
| 84 |
+
from . import getlimits
|
| 85 |
+
from .getlimits import *
|
| 86 |
+
from . import shape_base
|
| 87 |
+
from .shape_base import *
|
| 88 |
+
from . import einsumfunc
|
| 89 |
+
from .einsumfunc import *
|
| 90 |
+
del nt
|
| 91 |
+
|
| 92 |
+
from .numeric import absolute as abs
|
| 93 |
+
|
| 94 |
+
# do this after everything else, to minimize the chance of this misleadingly
|
| 95 |
+
# appearing in an import-time traceback
|
| 96 |
+
from . import _add_newdocs
|
| 97 |
+
from . import _add_newdocs_scalars
|
| 98 |
+
# add these for module-freeze analysis (like PyInstaller)
|
| 99 |
+
from . import _dtype_ctypes
|
| 100 |
+
from . import _internal
|
| 101 |
+
from . import _dtype
|
| 102 |
+
from . import _methods
|
| 103 |
+
|
| 104 |
+
acos = numeric.arccos
|
| 105 |
+
acosh = numeric.arccosh
|
| 106 |
+
asin = numeric.arcsin
|
| 107 |
+
asinh = numeric.arcsinh
|
| 108 |
+
atan = numeric.arctan
|
| 109 |
+
atanh = numeric.arctanh
|
| 110 |
+
atan2 = numeric.arctan2
|
| 111 |
+
concat = numeric.concatenate
|
| 112 |
+
bitwise_left_shift = numeric.left_shift
|
| 113 |
+
bitwise_invert = numeric.invert
|
| 114 |
+
bitwise_right_shift = numeric.right_shift
|
| 115 |
+
permute_dims = numeric.transpose
|
| 116 |
+
pow = numeric.power
|
| 117 |
+
|
| 118 |
+
__all__ = [
|
| 119 |
+
"abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
|
| 120 |
+
"bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
|
| 121 |
+
"pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
|
| 122 |
+
]
|
| 123 |
+
__all__ += numeric.__all__
|
| 124 |
+
__all__ += function_base.__all__
|
| 125 |
+
__all__ += getlimits.__all__
|
| 126 |
+
__all__ += shape_base.__all__
|
| 127 |
+
__all__ += einsumfunc.__all__
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _ufunc_reduce(func):
|
| 131 |
+
# Report the `__name__`. pickle will try to find the module. Note that
|
| 132 |
+
# pickle supports for this `__name__` to be a `__qualname__`. It may
|
| 133 |
+
# make sense to add a `__qualname__` to ufuncs, to allow this more
|
| 134 |
+
# explicitly (Numba has ufuncs as attributes).
|
| 135 |
+
# See also: https://github.com/dask/distributed/issues/3450
|
| 136 |
+
return func.__name__
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def _DType_reconstruct(scalar_type):
|
| 140 |
+
# This is a work-around to pickle type(np.dtype(np.float64)), etc.
|
| 141 |
+
# and it should eventually be replaced with a better solution, e.g. when
|
| 142 |
+
# DTypes become HeapTypes.
|
| 143 |
+
return type(dtype(scalar_type))
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def _DType_reduce(DType):
|
| 147 |
+
# As types/classes, most DTypes can simply be pickled by their name:
|
| 148 |
+
if not DType._legacy or DType.__module__ == "numpy.dtypes":
|
| 149 |
+
return DType.__name__
|
| 150 |
+
|
| 151 |
+
# However, user defined legacy dtypes (like rational) do not end up in
|
| 152 |
+
# `numpy.dtypes` as module and do not have a public class at all.
|
| 153 |
+
# For these, we pickle them by reconstructing them from the scalar type:
|
| 154 |
+
scalar_type = DType.type
|
| 155 |
+
return _DType_reconstruct, (scalar_type,)
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def __getattr__(name):
|
| 159 |
+
# Deprecated 2022-11-22, NumPy 1.25.
|
| 160 |
+
if name == "MachAr":
|
| 161 |
+
import warnings
|
| 162 |
+
warnings.warn(
|
| 163 |
+
"The `np._core.MachAr` is considered private API (NumPy 1.24)",
|
| 164 |
+
DeprecationWarning, stacklevel=2,
|
| 165 |
+
)
|
| 166 |
+
return _machar.MachAr
|
| 167 |
+
raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
import copyreg
|
| 171 |
+
|
| 172 |
+
copyreg.pickle(ufunc, _ufunc_reduce)
|
| 173 |
+
copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
|
| 174 |
+
|
| 175 |
+
# Unclutter namespace (must keep _*_reconstruct for unpickling)
|
| 176 |
+
del copyreg, _ufunc_reduce, _DType_reduce
|
| 177 |
+
|
| 178 |
+
from numpy._pytesttester import PytestTester
|
| 179 |
+
test = PytestTester(__name__)
|
| 180 |
+
del PytestTester
|
janus/lib/python3.10/site-packages/numpy/_core/__init__.pyi
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NOTE: The `np._core` namespace is deliberately kept empty due to it
|
| 2 |
+
# being private
|
janus/lib/python3.10/site-packages/numpy/_core/_add_newdocs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/_asarray.pyi
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Iterable
|
| 2 |
+
from typing import Any, TypeAlias, TypeVar, overload, Literal
|
| 3 |
+
|
| 4 |
+
from numpy._typing import NDArray, DTypeLike, _SupportsArrayFunc
|
| 5 |
+
|
| 6 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
| 7 |
+
|
| 8 |
+
_Requirements: TypeAlias = Literal[
|
| 9 |
+
"C", "C_CONTIGUOUS", "CONTIGUOUS",
|
| 10 |
+
"F", "F_CONTIGUOUS", "FORTRAN",
|
| 11 |
+
"A", "ALIGNED",
|
| 12 |
+
"W", "WRITEABLE",
|
| 13 |
+
"O", "OWNDATA"
|
| 14 |
+
]
|
| 15 |
+
_E: TypeAlias = Literal["E", "ENSUREARRAY"]
|
| 16 |
+
_RequirementsWithE: TypeAlias = _Requirements | _E
|
| 17 |
+
|
| 18 |
+
@overload
|
| 19 |
+
def require(
|
| 20 |
+
a: _ArrayType,
|
| 21 |
+
dtype: None = ...,
|
| 22 |
+
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
| 23 |
+
*,
|
| 24 |
+
like: _SupportsArrayFunc = ...
|
| 25 |
+
) -> _ArrayType: ...
|
| 26 |
+
@overload
|
| 27 |
+
def require(
|
| 28 |
+
a: object,
|
| 29 |
+
dtype: DTypeLike = ...,
|
| 30 |
+
requirements: _E | Iterable[_RequirementsWithE] = ...,
|
| 31 |
+
*,
|
| 32 |
+
like: _SupportsArrayFunc = ...
|
| 33 |
+
) -> NDArray[Any]: ...
|
| 34 |
+
@overload
|
| 35 |
+
def require(
|
| 36 |
+
a: object,
|
| 37 |
+
dtype: DTypeLike = ...,
|
| 38 |
+
requirements: None | _Requirements | Iterable[_Requirements] = ...,
|
| 39 |
+
*,
|
| 40 |
+
like: _SupportsArrayFunc = ...
|
| 41 |
+
) -> NDArray[Any]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/_dtype.py
ADDED
|
@@ -0,0 +1,374 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A place for code to be called from the implementation of np.dtype
|
| 3 |
+
|
| 4 |
+
String handling is much easier to do correctly in python.
|
| 5 |
+
"""
|
| 6 |
+
import numpy as np
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
_kind_to_stem = {
|
| 10 |
+
'u': 'uint',
|
| 11 |
+
'i': 'int',
|
| 12 |
+
'c': 'complex',
|
| 13 |
+
'f': 'float',
|
| 14 |
+
'b': 'bool',
|
| 15 |
+
'V': 'void',
|
| 16 |
+
'O': 'object',
|
| 17 |
+
'M': 'datetime',
|
| 18 |
+
'm': 'timedelta',
|
| 19 |
+
'S': 'bytes',
|
| 20 |
+
'U': 'str',
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def _kind_name(dtype):
|
| 25 |
+
try:
|
| 26 |
+
return _kind_to_stem[dtype.kind]
|
| 27 |
+
except KeyError as e:
|
| 28 |
+
raise RuntimeError(
|
| 29 |
+
"internal dtype error, unknown kind {!r}"
|
| 30 |
+
.format(dtype.kind)
|
| 31 |
+
) from None
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def __str__(dtype):
|
| 35 |
+
if dtype.fields is not None:
|
| 36 |
+
return _struct_str(dtype, include_align=True)
|
| 37 |
+
elif dtype.subdtype:
|
| 38 |
+
return _subarray_str(dtype)
|
| 39 |
+
elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
|
| 40 |
+
return dtype.str
|
| 41 |
+
else:
|
| 42 |
+
return dtype.name
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def __repr__(dtype):
|
| 46 |
+
arg_str = _construction_repr(dtype, include_align=False)
|
| 47 |
+
if dtype.isalignedstruct:
|
| 48 |
+
arg_str = arg_str + ", align=True"
|
| 49 |
+
return "dtype({})".format(arg_str)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def _unpack_field(dtype, offset, title=None):
|
| 53 |
+
"""
|
| 54 |
+
Helper function to normalize the items in dtype.fields.
|
| 55 |
+
|
| 56 |
+
Call as:
|
| 57 |
+
|
| 58 |
+
dtype, offset, title = _unpack_field(*dtype.fields[name])
|
| 59 |
+
"""
|
| 60 |
+
return dtype, offset, title
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _isunsized(dtype):
|
| 64 |
+
# PyDataType_ISUNSIZED
|
| 65 |
+
return dtype.itemsize == 0
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def _construction_repr(dtype, include_align=False, short=False):
|
| 69 |
+
"""
|
| 70 |
+
Creates a string repr of the dtype, excluding the 'dtype()' part
|
| 71 |
+
surrounding the object. This object may be a string, a list, or
|
| 72 |
+
a dict depending on the nature of the dtype. This
|
| 73 |
+
is the object passed as the first parameter to the dtype
|
| 74 |
+
constructor, and if no additional constructor parameters are
|
| 75 |
+
given, will reproduce the exact memory layout.
|
| 76 |
+
|
| 77 |
+
Parameters
|
| 78 |
+
----------
|
| 79 |
+
short : bool
|
| 80 |
+
If true, this creates a shorter repr using 'kind' and 'itemsize',
|
| 81 |
+
instead of the longer type name.
|
| 82 |
+
|
| 83 |
+
include_align : bool
|
| 84 |
+
If true, this includes the 'align=True' parameter
|
| 85 |
+
inside the struct dtype construction dict when needed. Use this flag
|
| 86 |
+
if you want a proper repr string without the 'dtype()' part around it.
|
| 87 |
+
|
| 88 |
+
If false, this does not preserve the
|
| 89 |
+
'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
|
| 90 |
+
struct arrays like the regular repr does, because the 'align'
|
| 91 |
+
flag is not part of first dtype constructor parameter. This
|
| 92 |
+
mode is intended for a full 'repr', where the 'align=True' is
|
| 93 |
+
provided as the second parameter.
|
| 94 |
+
"""
|
| 95 |
+
if dtype.fields is not None:
|
| 96 |
+
return _struct_str(dtype, include_align=include_align)
|
| 97 |
+
elif dtype.subdtype:
|
| 98 |
+
return _subarray_str(dtype)
|
| 99 |
+
else:
|
| 100 |
+
return _scalar_str(dtype, short=short)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _scalar_str(dtype, short):
|
| 104 |
+
byteorder = _byte_order_str(dtype)
|
| 105 |
+
|
| 106 |
+
if dtype.type == np.bool:
|
| 107 |
+
if short:
|
| 108 |
+
return "'?'"
|
| 109 |
+
else:
|
| 110 |
+
return "'bool'"
|
| 111 |
+
|
| 112 |
+
elif dtype.type == np.object_:
|
| 113 |
+
# The object reference may be different sizes on different
|
| 114 |
+
# platforms, so it should never include the itemsize here.
|
| 115 |
+
return "'O'"
|
| 116 |
+
|
| 117 |
+
elif dtype.type == np.bytes_:
|
| 118 |
+
if _isunsized(dtype):
|
| 119 |
+
return "'S'"
|
| 120 |
+
else:
|
| 121 |
+
return "'S%d'" % dtype.itemsize
|
| 122 |
+
|
| 123 |
+
elif dtype.type == np.str_:
|
| 124 |
+
if _isunsized(dtype):
|
| 125 |
+
return "'%sU'" % byteorder
|
| 126 |
+
else:
|
| 127 |
+
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
|
| 128 |
+
|
| 129 |
+
elif dtype.type == str:
|
| 130 |
+
return "'T'"
|
| 131 |
+
|
| 132 |
+
elif not type(dtype)._legacy:
|
| 133 |
+
return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
|
| 134 |
+
|
| 135 |
+
# unlike the other types, subclasses of void are preserved - but
|
| 136 |
+
# historically the repr does not actually reveal the subclass
|
| 137 |
+
elif issubclass(dtype.type, np.void):
|
| 138 |
+
if _isunsized(dtype):
|
| 139 |
+
return "'V'"
|
| 140 |
+
else:
|
| 141 |
+
return "'V%d'" % dtype.itemsize
|
| 142 |
+
|
| 143 |
+
elif dtype.type == np.datetime64:
|
| 144 |
+
return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
| 145 |
+
|
| 146 |
+
elif dtype.type == np.timedelta64:
|
| 147 |
+
return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
|
| 148 |
+
|
| 149 |
+
elif np.issubdtype(dtype, np.number):
|
| 150 |
+
# Short repr with endianness, like '<f8'
|
| 151 |
+
if short or dtype.byteorder not in ('=', '|'):
|
| 152 |
+
return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
|
| 153 |
+
|
| 154 |
+
# Longer repr, like 'float64'
|
| 155 |
+
else:
|
| 156 |
+
return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
|
| 157 |
+
|
| 158 |
+
elif dtype.isbuiltin == 2:
|
| 159 |
+
return dtype.type.__name__
|
| 160 |
+
|
| 161 |
+
else:
|
| 162 |
+
raise RuntimeError(
|
| 163 |
+
"Internal error: NumPy dtype unrecognized type number")
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def _byte_order_str(dtype):
|
| 167 |
+
""" Normalize byteorder to '<' or '>' """
|
| 168 |
+
# hack to obtain the native and swapped byte order characters
|
| 169 |
+
swapped = np.dtype(int).newbyteorder('S')
|
| 170 |
+
native = swapped.newbyteorder('S')
|
| 171 |
+
|
| 172 |
+
byteorder = dtype.byteorder
|
| 173 |
+
if byteorder == '=':
|
| 174 |
+
return native.byteorder
|
| 175 |
+
if byteorder == 'S':
|
| 176 |
+
# TODO: this path can never be reached
|
| 177 |
+
return swapped.byteorder
|
| 178 |
+
elif byteorder == '|':
|
| 179 |
+
return ''
|
| 180 |
+
else:
|
| 181 |
+
return byteorder
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
def _datetime_metadata_str(dtype):
|
| 185 |
+
# TODO: this duplicates the C metastr_to_unicode functionality
|
| 186 |
+
unit, count = np.datetime_data(dtype)
|
| 187 |
+
if unit == 'generic':
|
| 188 |
+
return ''
|
| 189 |
+
elif count == 1:
|
| 190 |
+
return '[{}]'.format(unit)
|
| 191 |
+
else:
|
| 192 |
+
return '[{}{}]'.format(count, unit)
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
def _struct_dict_str(dtype, includealignedflag):
|
| 196 |
+
# unpack the fields dictionary into ls
|
| 197 |
+
names = dtype.names
|
| 198 |
+
fld_dtypes = []
|
| 199 |
+
offsets = []
|
| 200 |
+
titles = []
|
| 201 |
+
for name in names:
|
| 202 |
+
fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
|
| 203 |
+
fld_dtypes.append(fld_dtype)
|
| 204 |
+
offsets.append(offset)
|
| 205 |
+
titles.append(title)
|
| 206 |
+
|
| 207 |
+
# Build up a string to make the dictionary
|
| 208 |
+
|
| 209 |
+
if np._core.arrayprint._get_legacy_print_mode() <= 121:
|
| 210 |
+
colon = ":"
|
| 211 |
+
fieldsep = ","
|
| 212 |
+
else:
|
| 213 |
+
colon = ": "
|
| 214 |
+
fieldsep = ", "
|
| 215 |
+
|
| 216 |
+
# First, the names
|
| 217 |
+
ret = "{'names'%s[" % colon
|
| 218 |
+
ret += fieldsep.join(repr(name) for name in names)
|
| 219 |
+
|
| 220 |
+
# Second, the formats
|
| 221 |
+
ret += "], 'formats'%s[" % colon
|
| 222 |
+
ret += fieldsep.join(
|
| 223 |
+
_construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
|
| 224 |
+
|
| 225 |
+
# Third, the offsets
|
| 226 |
+
ret += "], 'offsets'%s[" % colon
|
| 227 |
+
ret += fieldsep.join("%d" % offset for offset in offsets)
|
| 228 |
+
|
| 229 |
+
# Fourth, the titles
|
| 230 |
+
if any(title is not None for title in titles):
|
| 231 |
+
ret += "], 'titles'%s[" % colon
|
| 232 |
+
ret += fieldsep.join(repr(title) for title in titles)
|
| 233 |
+
|
| 234 |
+
# Fifth, the itemsize
|
| 235 |
+
ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
|
| 236 |
+
|
| 237 |
+
if (includealignedflag and dtype.isalignedstruct):
|
| 238 |
+
# Finally, the aligned flag
|
| 239 |
+
ret += ", 'aligned'%sTrue}" % colon
|
| 240 |
+
else:
|
| 241 |
+
ret += "}"
|
| 242 |
+
|
| 243 |
+
return ret
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def _aligned_offset(offset, alignment):
|
| 247 |
+
# round up offset:
|
| 248 |
+
return - (-offset // alignment) * alignment
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def _is_packed(dtype):
|
| 252 |
+
"""
|
| 253 |
+
Checks whether the structured data type in 'dtype'
|
| 254 |
+
has a simple layout, where all the fields are in order,
|
| 255 |
+
and follow each other with no alignment padding.
|
| 256 |
+
|
| 257 |
+
When this returns true, the dtype can be reconstructed
|
| 258 |
+
from a list of the field names and dtypes with no additional
|
| 259 |
+
dtype parameters.
|
| 260 |
+
|
| 261 |
+
Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
|
| 262 |
+
"""
|
| 263 |
+
align = dtype.isalignedstruct
|
| 264 |
+
max_alignment = 1
|
| 265 |
+
total_offset = 0
|
| 266 |
+
for name in dtype.names:
|
| 267 |
+
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
| 268 |
+
|
| 269 |
+
if align:
|
| 270 |
+
total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
|
| 271 |
+
max_alignment = max(max_alignment, fld_dtype.alignment)
|
| 272 |
+
|
| 273 |
+
if fld_offset != total_offset:
|
| 274 |
+
return False
|
| 275 |
+
total_offset += fld_dtype.itemsize
|
| 276 |
+
|
| 277 |
+
if align:
|
| 278 |
+
total_offset = _aligned_offset(total_offset, max_alignment)
|
| 279 |
+
|
| 280 |
+
return total_offset == dtype.itemsize
|
| 281 |
+
|
| 282 |
+
|
| 283 |
+
def _struct_list_str(dtype):
|
| 284 |
+
items = []
|
| 285 |
+
for name in dtype.names:
|
| 286 |
+
fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
|
| 287 |
+
|
| 288 |
+
item = "("
|
| 289 |
+
if title is not None:
|
| 290 |
+
item += "({!r}, {!r}), ".format(title, name)
|
| 291 |
+
else:
|
| 292 |
+
item += "{!r}, ".format(name)
|
| 293 |
+
# Special case subarray handling here
|
| 294 |
+
if fld_dtype.subdtype is not None:
|
| 295 |
+
base, shape = fld_dtype.subdtype
|
| 296 |
+
item += "{}, {}".format(
|
| 297 |
+
_construction_repr(base, short=True),
|
| 298 |
+
shape
|
| 299 |
+
)
|
| 300 |
+
else:
|
| 301 |
+
item += _construction_repr(fld_dtype, short=True)
|
| 302 |
+
|
| 303 |
+
item += ")"
|
| 304 |
+
items.append(item)
|
| 305 |
+
|
| 306 |
+
return "[" + ", ".join(items) + "]"
|
| 307 |
+
|
| 308 |
+
|
| 309 |
+
def _struct_str(dtype, include_align):
|
| 310 |
+
# The list str representation can't include the 'align=' flag,
|
| 311 |
+
# so if it is requested and the struct has the aligned flag set,
|
| 312 |
+
# we must use the dict str instead.
|
| 313 |
+
if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
|
| 314 |
+
sub = _struct_list_str(dtype)
|
| 315 |
+
|
| 316 |
+
else:
|
| 317 |
+
sub = _struct_dict_str(dtype, include_align)
|
| 318 |
+
|
| 319 |
+
# If the data type isn't the default, void, show it
|
| 320 |
+
if dtype.type != np.void:
|
| 321 |
+
return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
|
| 322 |
+
else:
|
| 323 |
+
return sub
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def _subarray_str(dtype):
|
| 327 |
+
base, shape = dtype.subdtype
|
| 328 |
+
return "({}, {})".format(
|
| 329 |
+
_construction_repr(base, short=True),
|
| 330 |
+
shape
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
|
| 334 |
+
def _name_includes_bit_suffix(dtype):
|
| 335 |
+
if dtype.type == np.object_:
|
| 336 |
+
# pointer size varies by system, best to omit it
|
| 337 |
+
return False
|
| 338 |
+
elif dtype.type == np.bool:
|
| 339 |
+
# implied
|
| 340 |
+
return False
|
| 341 |
+
elif dtype.type is None:
|
| 342 |
+
return True
|
| 343 |
+
elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
|
| 344 |
+
# unspecified
|
| 345 |
+
return False
|
| 346 |
+
else:
|
| 347 |
+
return True
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _name_get(dtype):
|
| 351 |
+
# provides dtype.name.__get__, documented as returning a "bit name"
|
| 352 |
+
|
| 353 |
+
if dtype.isbuiltin == 2:
|
| 354 |
+
# user dtypes don't promise to do anything special
|
| 355 |
+
return dtype.type.__name__
|
| 356 |
+
|
| 357 |
+
if not type(dtype)._legacy:
|
| 358 |
+
name = type(dtype).__name__
|
| 359 |
+
|
| 360 |
+
elif issubclass(dtype.type, np.void):
|
| 361 |
+
# historically, void subclasses preserve their name, eg `record64`
|
| 362 |
+
name = dtype.type.__name__
|
| 363 |
+
else:
|
| 364 |
+
name = _kind_name(dtype)
|
| 365 |
+
|
| 366 |
+
# append bit counts
|
| 367 |
+
if _name_includes_bit_suffix(dtype):
|
| 368 |
+
name += "{}".format(dtype.itemsize * 8)
|
| 369 |
+
|
| 370 |
+
# append metadata to datetimes
|
| 371 |
+
if dtype.type in (np.datetime64, np.timedelta64):
|
| 372 |
+
name += _datetime_metadata_str(dtype)
|
| 373 |
+
|
| 374 |
+
return name
|
janus/lib/python3.10/site-packages/numpy/_core/_internal.pyi
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, TypeVar, overload, Generic
|
| 2 |
+
import ctypes as ct
|
| 3 |
+
|
| 4 |
+
from numpy.typing import NDArray
|
| 5 |
+
from numpy.ctypeslib import c_intp
|
| 6 |
+
|
| 7 |
+
_CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast`
|
| 8 |
+
_CT = TypeVar("_CT", bound=ct._CData)
|
| 9 |
+
_PT = TypeVar("_PT", bound=int)
|
| 10 |
+
|
| 11 |
+
# TODO: Let the likes of `shape_as` and `strides_as` return `None`
|
| 12 |
+
# for 0D arrays once we've got shape-support
|
| 13 |
+
|
| 14 |
+
class _ctypes(Generic[_PT]):
|
| 15 |
+
@overload
|
| 16 |
+
def __new__(cls, array: NDArray[Any], ptr: None = ...) -> _ctypes[None]: ...
|
| 17 |
+
@overload
|
| 18 |
+
def __new__(cls, array: NDArray[Any], ptr: _PT) -> _ctypes[_PT]: ...
|
| 19 |
+
@property
|
| 20 |
+
def data(self) -> _PT: ...
|
| 21 |
+
@property
|
| 22 |
+
def shape(self) -> ct.Array[c_intp]: ...
|
| 23 |
+
@property
|
| 24 |
+
def strides(self) -> ct.Array[c_intp]: ...
|
| 25 |
+
@property
|
| 26 |
+
def _as_parameter_(self) -> ct.c_void_p: ...
|
| 27 |
+
|
| 28 |
+
def data_as(self, obj: type[_CastT]) -> _CastT: ...
|
| 29 |
+
def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
|
| 30 |
+
def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/_methods.py
ADDED
|
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Array methods which are called by both the C-code for the method
|
| 3 |
+
and the Python code for the NumPy-namespace function
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
import os
|
| 7 |
+
import pickle
|
| 8 |
+
import warnings
|
| 9 |
+
from contextlib import nullcontext
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
from numpy._core import multiarray as mu
|
| 13 |
+
from numpy._core import umath as um
|
| 14 |
+
from numpy._core.multiarray import asanyarray
|
| 15 |
+
from numpy._core import numerictypes as nt
|
| 16 |
+
from numpy._core import _exceptions
|
| 17 |
+
from numpy._globals import _NoValue
|
| 18 |
+
|
| 19 |
+
# save those O(100) nanoseconds!
|
| 20 |
+
bool_dt = mu.dtype("bool")
|
| 21 |
+
umr_maximum = um.maximum.reduce
|
| 22 |
+
umr_minimum = um.minimum.reduce
|
| 23 |
+
umr_sum = um.add.reduce
|
| 24 |
+
umr_prod = um.multiply.reduce
|
| 25 |
+
umr_bitwise_count = um.bitwise_count
|
| 26 |
+
umr_any = um.logical_or.reduce
|
| 27 |
+
umr_all = um.logical_and.reduce
|
| 28 |
+
|
| 29 |
+
# Complex types to -> (2,)float view for fast-path computation in _var()
|
| 30 |
+
_complex_to_float = {
|
| 31 |
+
nt.dtype(nt.csingle) : nt.dtype(nt.single),
|
| 32 |
+
nt.dtype(nt.cdouble) : nt.dtype(nt.double),
|
| 33 |
+
}
|
| 34 |
+
# Special case for windows: ensure double takes precedence
|
| 35 |
+
if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
|
| 36 |
+
_complex_to_float.update({
|
| 37 |
+
nt.dtype(nt.clongdouble) : nt.dtype(nt.longdouble),
|
| 38 |
+
})
|
| 39 |
+
|
| 40 |
+
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
|
| 41 |
+
# small reductions
|
| 42 |
+
def _amax(a, axis=None, out=None, keepdims=False,
|
| 43 |
+
initial=_NoValue, where=True):
|
| 44 |
+
return umr_maximum(a, axis, None, out, keepdims, initial, where)
|
| 45 |
+
|
| 46 |
+
def _amin(a, axis=None, out=None, keepdims=False,
|
| 47 |
+
initial=_NoValue, where=True):
|
| 48 |
+
return umr_minimum(a, axis, None, out, keepdims, initial, where)
|
| 49 |
+
|
| 50 |
+
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
|
| 51 |
+
initial=_NoValue, where=True):
|
| 52 |
+
return umr_sum(a, axis, dtype, out, keepdims, initial, where)
|
| 53 |
+
|
| 54 |
+
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
|
| 55 |
+
initial=_NoValue, where=True):
|
| 56 |
+
return umr_prod(a, axis, dtype, out, keepdims, initial, where)
|
| 57 |
+
|
| 58 |
+
def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
| 59 |
+
# By default, return a boolean for any and all
|
| 60 |
+
if dtype is None:
|
| 61 |
+
dtype = bool_dt
|
| 62 |
+
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
| 63 |
+
if where is True:
|
| 64 |
+
return umr_any(a, axis, dtype, out, keepdims)
|
| 65 |
+
return umr_any(a, axis, dtype, out, keepdims, where=where)
|
| 66 |
+
|
| 67 |
+
def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
| 68 |
+
# By default, return a boolean for any and all
|
| 69 |
+
if dtype is None:
|
| 70 |
+
dtype = bool_dt
|
| 71 |
+
# Parsing keyword arguments is currently fairly slow, so avoid it for now
|
| 72 |
+
if where is True:
|
| 73 |
+
return umr_all(a, axis, dtype, out, keepdims)
|
| 74 |
+
return umr_all(a, axis, dtype, out, keepdims, where=where)
|
| 75 |
+
|
| 76 |
+
def _count_reduce_items(arr, axis, keepdims=False, where=True):
|
| 77 |
+
# fast-path for the default case
|
| 78 |
+
if where is True:
|
| 79 |
+
# no boolean mask given, calculate items according to axis
|
| 80 |
+
if axis is None:
|
| 81 |
+
axis = tuple(range(arr.ndim))
|
| 82 |
+
elif not isinstance(axis, tuple):
|
| 83 |
+
axis = (axis,)
|
| 84 |
+
items = 1
|
| 85 |
+
for ax in axis:
|
| 86 |
+
items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
|
| 87 |
+
items = nt.intp(items)
|
| 88 |
+
else:
|
| 89 |
+
# TODO: Optimize case when `where` is broadcast along a non-reduction
|
| 90 |
+
# axis and full sum is more excessive than needed.
|
| 91 |
+
|
| 92 |
+
# guarded to protect circular imports
|
| 93 |
+
from numpy.lib._stride_tricks_impl import broadcast_to
|
| 94 |
+
# count True values in (potentially broadcasted) boolean mask
|
| 95 |
+
items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
|
| 96 |
+
keepdims)
|
| 97 |
+
return items
|
| 98 |
+
|
| 99 |
+
def _clip(a, min=None, max=None, out=None, **kwargs):
|
| 100 |
+
if a.dtype.kind in "iu":
|
| 101 |
+
# If min/max is a Python integer, deal with out-of-bound values here.
|
| 102 |
+
# (This enforces NEP 50 rules as no value based promotion is done.)
|
| 103 |
+
if type(min) is int and min <= np.iinfo(a.dtype).min:
|
| 104 |
+
min = None
|
| 105 |
+
if type(max) is int and max >= np.iinfo(a.dtype).max:
|
| 106 |
+
max = None
|
| 107 |
+
|
| 108 |
+
if min is None and max is None:
|
| 109 |
+
# return identity
|
| 110 |
+
return um.positive(a, out=out, **kwargs)
|
| 111 |
+
elif min is None:
|
| 112 |
+
return um.minimum(a, max, out=out, **kwargs)
|
| 113 |
+
elif max is None:
|
| 114 |
+
return um.maximum(a, min, out=out, **kwargs)
|
| 115 |
+
else:
|
| 116 |
+
return um.clip(a, min, max, out=out, **kwargs)
|
| 117 |
+
|
| 118 |
+
def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
|
| 119 |
+
arr = asanyarray(a)
|
| 120 |
+
|
| 121 |
+
is_float16_result = False
|
| 122 |
+
|
| 123 |
+
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
| 124 |
+
if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
|
| 125 |
+
warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
|
| 126 |
+
|
| 127 |
+
# Cast bool, unsigned int, and int to float64 by default
|
| 128 |
+
if dtype is None:
|
| 129 |
+
if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
| 130 |
+
dtype = mu.dtype('f8')
|
| 131 |
+
elif issubclass(arr.dtype.type, nt.float16):
|
| 132 |
+
dtype = mu.dtype('f4')
|
| 133 |
+
is_float16_result = True
|
| 134 |
+
|
| 135 |
+
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
|
| 136 |
+
if isinstance(ret, mu.ndarray):
|
| 137 |
+
ret = um.true_divide(
|
| 138 |
+
ret, rcount, out=ret, casting='unsafe', subok=False)
|
| 139 |
+
if is_float16_result and out is None:
|
| 140 |
+
ret = arr.dtype.type(ret)
|
| 141 |
+
elif hasattr(ret, 'dtype'):
|
| 142 |
+
if is_float16_result:
|
| 143 |
+
ret = arr.dtype.type(ret / rcount)
|
| 144 |
+
else:
|
| 145 |
+
ret = ret.dtype.type(ret / rcount)
|
| 146 |
+
else:
|
| 147 |
+
ret = ret / rcount
|
| 148 |
+
|
| 149 |
+
return ret
|
| 150 |
+
|
| 151 |
+
def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
| 152 |
+
where=True, mean=None):
|
| 153 |
+
arr = asanyarray(a)
|
| 154 |
+
|
| 155 |
+
rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
|
| 156 |
+
# Make this warning show up on top.
|
| 157 |
+
if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
|
| 158 |
+
warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
|
| 159 |
+
stacklevel=2)
|
| 160 |
+
|
| 161 |
+
# Cast bool, unsigned int, and int to float64 by default
|
| 162 |
+
if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
|
| 163 |
+
dtype = mu.dtype('f8')
|
| 164 |
+
|
| 165 |
+
if mean is not None:
|
| 166 |
+
arrmean = mean
|
| 167 |
+
else:
|
| 168 |
+
# Compute the mean.
|
| 169 |
+
# Note that if dtype is not of inexact type then arraymean will
|
| 170 |
+
# not be either.
|
| 171 |
+
arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
|
| 172 |
+
# The shape of rcount has to match arrmean to not change the shape of
|
| 173 |
+
# out in broadcasting. Otherwise, it cannot be stored back to arrmean.
|
| 174 |
+
if rcount.ndim == 0:
|
| 175 |
+
# fast-path for default case when where is True
|
| 176 |
+
div = rcount
|
| 177 |
+
else:
|
| 178 |
+
# matching rcount to arrmean when where is specified as array
|
| 179 |
+
div = rcount.reshape(arrmean.shape)
|
| 180 |
+
if isinstance(arrmean, mu.ndarray):
|
| 181 |
+
arrmean = um.true_divide(arrmean, div, out=arrmean,
|
| 182 |
+
casting='unsafe', subok=False)
|
| 183 |
+
elif hasattr(arrmean, "dtype"):
|
| 184 |
+
arrmean = arrmean.dtype.type(arrmean / rcount)
|
| 185 |
+
else:
|
| 186 |
+
arrmean = arrmean / rcount
|
| 187 |
+
|
| 188 |
+
# Compute sum of squared deviations from mean
|
| 189 |
+
# Note that x may not be inexact and that we need it to be an array,
|
| 190 |
+
# not a scalar.
|
| 191 |
+
x = asanyarray(arr - arrmean)
|
| 192 |
+
|
| 193 |
+
if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
|
| 194 |
+
x = um.multiply(x, x, out=x)
|
| 195 |
+
# Fast-paths for built-in complex types
|
| 196 |
+
elif x.dtype in _complex_to_float:
|
| 197 |
+
xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
|
| 198 |
+
um.multiply(xv, xv, out=xv)
|
| 199 |
+
x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
|
| 200 |
+
# Most general case; includes handling object arrays containing imaginary
|
| 201 |
+
# numbers and complex types with non-native byteorder
|
| 202 |
+
else:
|
| 203 |
+
x = um.multiply(x, um.conjugate(x), out=x).real
|
| 204 |
+
|
| 205 |
+
ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
|
| 206 |
+
|
| 207 |
+
# Compute degrees of freedom and make sure it is not negative.
|
| 208 |
+
rcount = um.maximum(rcount - ddof, 0)
|
| 209 |
+
|
| 210 |
+
# divide by degrees of freedom
|
| 211 |
+
if isinstance(ret, mu.ndarray):
|
| 212 |
+
ret = um.true_divide(
|
| 213 |
+
ret, rcount, out=ret, casting='unsafe', subok=False)
|
| 214 |
+
elif hasattr(ret, 'dtype'):
|
| 215 |
+
ret = ret.dtype.type(ret / rcount)
|
| 216 |
+
else:
|
| 217 |
+
ret = ret / rcount
|
| 218 |
+
|
| 219 |
+
return ret
|
| 220 |
+
|
| 221 |
+
def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
|
| 222 |
+
where=True, mean=None):
|
| 223 |
+
ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
|
| 224 |
+
keepdims=keepdims, where=where, mean=mean)
|
| 225 |
+
|
| 226 |
+
if isinstance(ret, mu.ndarray):
|
| 227 |
+
ret = um.sqrt(ret, out=ret)
|
| 228 |
+
elif hasattr(ret, 'dtype'):
|
| 229 |
+
ret = ret.dtype.type(um.sqrt(ret))
|
| 230 |
+
else:
|
| 231 |
+
ret = um.sqrt(ret)
|
| 232 |
+
|
| 233 |
+
return ret
|
| 234 |
+
|
| 235 |
+
def _ptp(a, axis=None, out=None, keepdims=False):
|
| 236 |
+
return um.subtract(
|
| 237 |
+
umr_maximum(a, axis, None, out, keepdims),
|
| 238 |
+
umr_minimum(a, axis, None, None, keepdims),
|
| 239 |
+
out
|
| 240 |
+
)
|
| 241 |
+
|
| 242 |
+
def _dump(self, file, protocol=2):
|
| 243 |
+
if hasattr(file, 'write'):
|
| 244 |
+
ctx = nullcontext(file)
|
| 245 |
+
else:
|
| 246 |
+
ctx = open(os.fspath(file), "wb")
|
| 247 |
+
with ctx as f:
|
| 248 |
+
pickle.dump(self, f, protocol=protocol)
|
| 249 |
+
|
| 250 |
+
def _dumps(self, protocol=2):
|
| 251 |
+
return pickle.dumps(self, protocol=protocol)
|
| 252 |
+
|
| 253 |
+
def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
|
| 254 |
+
order='K', dtype=None, subok=True):
|
| 255 |
+
return umr_bitwise_count(a, out, where=where, casting=casting,
|
| 256 |
+
order=order, dtype=dtype, subok=subok)
|
janus/lib/python3.10/site-packages/numpy/_core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (17 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/_struct_ufunc_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (17.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/_type_aliases.pyi
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Collection
|
| 2 |
+
from typing import Any, Final, Literal as L, TypeAlias, TypedDict, type_check_only
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
__all__ = (
|
| 7 |
+
"_abstract_type_names",
|
| 8 |
+
"_aliases",
|
| 9 |
+
"_extra_aliases",
|
| 10 |
+
"allTypes",
|
| 11 |
+
"c_names_dict",
|
| 12 |
+
"sctypeDict",
|
| 13 |
+
"sctypes",
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
sctypeDict: Final[dict[str, type[np.generic]]]
|
| 17 |
+
allTypes: Final[dict[str, type[np.generic]]]
|
| 18 |
+
|
| 19 |
+
@type_check_only
|
| 20 |
+
class _CNamesDict(TypedDict):
|
| 21 |
+
BOOL: np.dtype[np.bool]
|
| 22 |
+
HALF: np.dtype[np.half]
|
| 23 |
+
FLOAT: np.dtype[np.single]
|
| 24 |
+
DOUBLE: np.dtype[np.double]
|
| 25 |
+
LONGDOUBLE: np.dtype[np.longdouble]
|
| 26 |
+
CFLOAT: np.dtype[np.csingle]
|
| 27 |
+
CDOUBLE: np.dtype[np.cdouble]
|
| 28 |
+
CLONGDOUBLE: np.dtype[np.clongdouble]
|
| 29 |
+
STRING: np.dtype[np.bytes_]
|
| 30 |
+
UNICODE: np.dtype[np.str_]
|
| 31 |
+
VOID: np.dtype[np.void]
|
| 32 |
+
OBJECT: np.dtype[np.object_]
|
| 33 |
+
DATETIME: np.dtype[np.datetime64]
|
| 34 |
+
TIMEDELTA: np.dtype[np.timedelta64]
|
| 35 |
+
BYTE: np.dtype[np.byte]
|
| 36 |
+
UBYTE: np.dtype[np.ubyte]
|
| 37 |
+
SHORT: np.dtype[np.short]
|
| 38 |
+
USHORT: np.dtype[np.ushort]
|
| 39 |
+
INT: np.dtype[np.intc]
|
| 40 |
+
UINT: np.dtype[np.uintc]
|
| 41 |
+
LONG: np.dtype[np.long]
|
| 42 |
+
ULONG: np.dtype[np.ulong]
|
| 43 |
+
LONGLONG: np.dtype[np.longlong]
|
| 44 |
+
ULONGLONG: np.dtype[np.ulonglong]
|
| 45 |
+
|
| 46 |
+
c_names_dict: Final[_CNamesDict]
|
| 47 |
+
|
| 48 |
+
_AbstractTypeName: TypeAlias = L[
|
| 49 |
+
"generic",
|
| 50 |
+
"flexible",
|
| 51 |
+
"character",
|
| 52 |
+
"number",
|
| 53 |
+
"integer",
|
| 54 |
+
"inexact",
|
| 55 |
+
"unsignedinteger",
|
| 56 |
+
"signedinteger",
|
| 57 |
+
"floating",
|
| 58 |
+
"complexfloating",
|
| 59 |
+
]
|
| 60 |
+
_abstract_type_names: Final[set[_AbstractTypeName]]
|
| 61 |
+
|
| 62 |
+
@type_check_only
|
| 63 |
+
class _AliasesType(TypedDict):
|
| 64 |
+
double: L["float64"]
|
| 65 |
+
cdouble: L["complex128"]
|
| 66 |
+
single: L["float32"]
|
| 67 |
+
csingle: L["complex64"]
|
| 68 |
+
half: L["float16"]
|
| 69 |
+
bool_: L["bool"]
|
| 70 |
+
int_: L["intp"]
|
| 71 |
+
uint: L["intp"]
|
| 72 |
+
|
| 73 |
+
_aliases: Final[_AliasesType]
|
| 74 |
+
|
| 75 |
+
@type_check_only
|
| 76 |
+
class _ExtraAliasesType(TypedDict):
|
| 77 |
+
float: L["float64"]
|
| 78 |
+
complex: L["complex128"]
|
| 79 |
+
object: L["object_"]
|
| 80 |
+
bytes: L["bytes_"]
|
| 81 |
+
a: L["bytes_"]
|
| 82 |
+
int: L["int_"]
|
| 83 |
+
str: L["str_"]
|
| 84 |
+
unicode: L["str_"]
|
| 85 |
+
|
| 86 |
+
_extra_aliases: Final[_ExtraAliasesType]
|
| 87 |
+
|
| 88 |
+
@type_check_only
|
| 89 |
+
class _SCTypes(TypedDict):
|
| 90 |
+
int: Collection[type[np.signedinteger[Any]]]
|
| 91 |
+
uint: Collection[type[np.unsignedinteger[Any]]]
|
| 92 |
+
float: Collection[type[np.floating[Any]]]
|
| 93 |
+
complex: Collection[type[np.complexfloating[Any, Any]]]
|
| 94 |
+
others: Collection[type[np.flexible | np.bool | np.object_]]
|
| 95 |
+
|
| 96 |
+
sctypes: Final[_SCTypes]
|
janus/lib/python3.10/site-packages/numpy/_core/_ufunc_config.py
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions for changing global ufunc configuration
|
| 3 |
+
|
| 4 |
+
This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and
|
| 5 |
+
`_extobj_contextvar` from umath.
|
| 6 |
+
"""
|
| 7 |
+
import contextlib
|
| 8 |
+
import contextvars
|
| 9 |
+
import functools
|
| 10 |
+
|
| 11 |
+
from .._utils import set_module
|
| 12 |
+
from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar
|
| 13 |
+
|
| 14 |
+
__all__ = [
|
| 15 |
+
"seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
|
| 16 |
+
"errstate"
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@set_module('numpy')
|
| 21 |
+
def seterr(all=None, divide=None, over=None, under=None, invalid=None):
|
| 22 |
+
"""
|
| 23 |
+
Set how floating-point errors are handled.
|
| 24 |
+
|
| 25 |
+
Note that operations on integer scalar types (such as `int16`) are
|
| 26 |
+
handled like floating point, and are affected by these settings.
|
| 27 |
+
|
| 28 |
+
Parameters
|
| 29 |
+
----------
|
| 30 |
+
all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 31 |
+
Set treatment for all types of floating-point errors at once:
|
| 32 |
+
|
| 33 |
+
- ignore: Take no action when the exception occurs.
|
| 34 |
+
- warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`
|
| 35 |
+
module).
|
| 36 |
+
- raise: Raise a :exc:`FloatingPointError`.
|
| 37 |
+
- call: Call a function specified using the `seterrcall` function.
|
| 38 |
+
- print: Print a warning directly to ``stdout``.
|
| 39 |
+
- log: Record error in a Log object specified by `seterrcall`.
|
| 40 |
+
|
| 41 |
+
The default is not to change the current behavior.
|
| 42 |
+
divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 43 |
+
Treatment for division by zero.
|
| 44 |
+
over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 45 |
+
Treatment for floating-point overflow.
|
| 46 |
+
under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 47 |
+
Treatment for floating-point underflow.
|
| 48 |
+
invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
|
| 49 |
+
Treatment for invalid floating-point operation.
|
| 50 |
+
|
| 51 |
+
Returns
|
| 52 |
+
-------
|
| 53 |
+
old_settings : dict
|
| 54 |
+
Dictionary containing the old settings.
|
| 55 |
+
|
| 56 |
+
See also
|
| 57 |
+
--------
|
| 58 |
+
seterrcall : Set a callback function for the 'call' mode.
|
| 59 |
+
geterr, geterrcall, errstate
|
| 60 |
+
|
| 61 |
+
Notes
|
| 62 |
+
-----
|
| 63 |
+
The floating-point exceptions are defined in the IEEE 754 standard [1]_:
|
| 64 |
+
|
| 65 |
+
- Division by zero: infinite result obtained from finite numbers.
|
| 66 |
+
- Overflow: result too large to be expressed.
|
| 67 |
+
- Underflow: result so close to zero that some precision
|
| 68 |
+
was lost.
|
| 69 |
+
- Invalid operation: result is not an expressible number, typically
|
| 70 |
+
indicates that a NaN was produced.
|
| 71 |
+
|
| 72 |
+
.. [1] https://en.wikipedia.org/wiki/IEEE_754
|
| 73 |
+
|
| 74 |
+
Examples
|
| 75 |
+
--------
|
| 76 |
+
>>> import numpy as np
|
| 77 |
+
>>> orig_settings = np.seterr(all='ignore') # seterr to known value
|
| 78 |
+
>>> np.int16(32000) * np.int16(3)
|
| 79 |
+
np.int16(30464)
|
| 80 |
+
>>> np.seterr(over='raise')
|
| 81 |
+
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
| 82 |
+
>>> old_settings = np.seterr(all='warn', over='raise')
|
| 83 |
+
>>> np.int16(32000) * np.int16(3)
|
| 84 |
+
Traceback (most recent call last):
|
| 85 |
+
File "<stdin>", line 1, in <module>
|
| 86 |
+
FloatingPointError: overflow encountered in scalar multiply
|
| 87 |
+
|
| 88 |
+
>>> old_settings = np.seterr(all='print')
|
| 89 |
+
>>> np.geterr()
|
| 90 |
+
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
|
| 91 |
+
>>> np.int16(32000) * np.int16(3)
|
| 92 |
+
np.int16(30464)
|
| 93 |
+
>>> np.seterr(**orig_settings) # restore original
|
| 94 |
+
{'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
|
| 95 |
+
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
old = _get_extobj_dict()
|
| 99 |
+
# The errstate doesn't include call and bufsize, so pop them:
|
| 100 |
+
old.pop("call", None)
|
| 101 |
+
old.pop("bufsize", None)
|
| 102 |
+
|
| 103 |
+
extobj = _make_extobj(
|
| 104 |
+
all=all, divide=divide, over=over, under=under, invalid=invalid)
|
| 105 |
+
_extobj_contextvar.set(extobj)
|
| 106 |
+
return old
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
@set_module('numpy')
|
| 110 |
+
def geterr():
|
| 111 |
+
"""
|
| 112 |
+
Get the current way of handling floating-point errors.
|
| 113 |
+
|
| 114 |
+
Returns
|
| 115 |
+
-------
|
| 116 |
+
res : dict
|
| 117 |
+
A dictionary with keys "divide", "over", "under", and "invalid",
|
| 118 |
+
whose values are from the strings "ignore", "print", "log", "warn",
|
| 119 |
+
"raise", and "call". The keys represent possible floating-point
|
| 120 |
+
exceptions, and the values define how these exceptions are handled.
|
| 121 |
+
|
| 122 |
+
See Also
|
| 123 |
+
--------
|
| 124 |
+
geterrcall, seterr, seterrcall
|
| 125 |
+
|
| 126 |
+
Notes
|
| 127 |
+
-----
|
| 128 |
+
For complete documentation of the types of floating-point exceptions and
|
| 129 |
+
treatment options, see `seterr`.
|
| 130 |
+
|
| 131 |
+
Examples
|
| 132 |
+
--------
|
| 133 |
+
>>> import numpy as np
|
| 134 |
+
>>> np.geterr()
|
| 135 |
+
{'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
|
| 136 |
+
>>> np.arange(3.) / np.arange(3.) # doctest: +SKIP
|
| 137 |
+
array([nan, 1., 1.])
|
| 138 |
+
RuntimeWarning: invalid value encountered in divide
|
| 139 |
+
|
| 140 |
+
>>> oldsettings = np.seterr(all='warn', invalid='raise')
|
| 141 |
+
>>> np.geterr()
|
| 142 |
+
{'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}
|
| 143 |
+
>>> np.arange(3.) / np.arange(3.)
|
| 144 |
+
Traceback (most recent call last):
|
| 145 |
+
...
|
| 146 |
+
FloatingPointError: invalid value encountered in divide
|
| 147 |
+
>>> oldsettings = np.seterr(**oldsettings) # restore original
|
| 148 |
+
|
| 149 |
+
"""
|
| 150 |
+
res = _get_extobj_dict()
|
| 151 |
+
# The "geterr" doesn't include call and bufsize,:
|
| 152 |
+
res.pop("call", None)
|
| 153 |
+
res.pop("bufsize", None)
|
| 154 |
+
return res
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
@set_module('numpy')
|
| 158 |
+
def setbufsize(size):
|
| 159 |
+
"""
|
| 160 |
+
Set the size of the buffer used in ufuncs.
|
| 161 |
+
|
| 162 |
+
.. versionchanged:: 2.0
|
| 163 |
+
The scope of setting the buffer is tied to the `numpy.errstate`
|
| 164 |
+
context. Exiting a ``with errstate():`` will also restore the bufsize.
|
| 165 |
+
|
| 166 |
+
Parameters
|
| 167 |
+
----------
|
| 168 |
+
size : int
|
| 169 |
+
Size of buffer.
|
| 170 |
+
|
| 171 |
+
Returns
|
| 172 |
+
-------
|
| 173 |
+
bufsize : int
|
| 174 |
+
Previous size of ufunc buffer in bytes.
|
| 175 |
+
|
| 176 |
+
Examples
|
| 177 |
+
--------
|
| 178 |
+
When exiting a `numpy.errstate` context manager the bufsize is restored:
|
| 179 |
+
|
| 180 |
+
>>> import numpy as np
|
| 181 |
+
>>> with np.errstate():
|
| 182 |
+
... np.setbufsize(4096)
|
| 183 |
+
... print(np.getbufsize())
|
| 184 |
+
...
|
| 185 |
+
8192
|
| 186 |
+
4096
|
| 187 |
+
>>> np.getbufsize()
|
| 188 |
+
8192
|
| 189 |
+
|
| 190 |
+
"""
|
| 191 |
+
old = _get_extobj_dict()["bufsize"]
|
| 192 |
+
extobj = _make_extobj(bufsize=size)
|
| 193 |
+
_extobj_contextvar.set(extobj)
|
| 194 |
+
return old
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@set_module('numpy')
|
| 198 |
+
def getbufsize():
|
| 199 |
+
"""
|
| 200 |
+
Return the size of the buffer used in ufuncs.
|
| 201 |
+
|
| 202 |
+
Returns
|
| 203 |
+
-------
|
| 204 |
+
getbufsize : int
|
| 205 |
+
Size of ufunc buffer in bytes.
|
| 206 |
+
|
| 207 |
+
Examples
|
| 208 |
+
--------
|
| 209 |
+
>>> import numpy as np
|
| 210 |
+
>>> np.getbufsize()
|
| 211 |
+
8192
|
| 212 |
+
|
| 213 |
+
"""
|
| 214 |
+
return _get_extobj_dict()["bufsize"]
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
@set_module('numpy')
|
| 218 |
+
def seterrcall(func):
|
| 219 |
+
"""
|
| 220 |
+
Set the floating-point error callback function or log object.
|
| 221 |
+
|
| 222 |
+
There are two ways to capture floating-point error messages. The first
|
| 223 |
+
is to set the error-handler to 'call', using `seterr`. Then, set
|
| 224 |
+
the function to call using this function.
|
| 225 |
+
|
| 226 |
+
The second is to set the error-handler to 'log', using `seterr`.
|
| 227 |
+
Floating-point errors then trigger a call to the 'write' method of
|
| 228 |
+
the provided object.
|
| 229 |
+
|
| 230 |
+
Parameters
|
| 231 |
+
----------
|
| 232 |
+
func : callable f(err, flag) or object with write method
|
| 233 |
+
Function to call upon floating-point errors ('call'-mode) or
|
| 234 |
+
object whose 'write' method is used to log such message ('log'-mode).
|
| 235 |
+
|
| 236 |
+
The call function takes two arguments. The first is a string describing
|
| 237 |
+
the type of error (such as "divide by zero", "overflow", "underflow",
|
| 238 |
+
or "invalid value"), and the second is the status flag. The flag is a
|
| 239 |
+
byte, whose four least-significant bits indicate the type of error, one
|
| 240 |
+
of "divide", "over", "under", "invalid"::
|
| 241 |
+
|
| 242 |
+
[0 0 0 0 divide over under invalid]
|
| 243 |
+
|
| 244 |
+
In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
|
| 245 |
+
|
| 246 |
+
If an object is provided, its write method should take one argument,
|
| 247 |
+
a string.
|
| 248 |
+
|
| 249 |
+
Returns
|
| 250 |
+
-------
|
| 251 |
+
h : callable, log instance or None
|
| 252 |
+
The old error handler.
|
| 253 |
+
|
| 254 |
+
See Also
|
| 255 |
+
--------
|
| 256 |
+
seterr, geterr, geterrcall
|
| 257 |
+
|
| 258 |
+
Examples
|
| 259 |
+
--------
|
| 260 |
+
Callback upon error:
|
| 261 |
+
|
| 262 |
+
>>> def err_handler(type, flag):
|
| 263 |
+
... print("Floating point error (%s), with flag %s" % (type, flag))
|
| 264 |
+
...
|
| 265 |
+
|
| 266 |
+
>>> import numpy as np
|
| 267 |
+
|
| 268 |
+
>>> orig_handler = np.seterrcall(err_handler)
|
| 269 |
+
>>> orig_err = np.seterr(all='call')
|
| 270 |
+
|
| 271 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 272 |
+
Floating point error (divide by zero), with flag 1
|
| 273 |
+
array([inf, inf, inf])
|
| 274 |
+
|
| 275 |
+
>>> np.seterrcall(orig_handler)
|
| 276 |
+
<function err_handler at 0x...>
|
| 277 |
+
>>> np.seterr(**orig_err)
|
| 278 |
+
{'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
|
| 279 |
+
|
| 280 |
+
Log error message:
|
| 281 |
+
|
| 282 |
+
>>> class Log:
|
| 283 |
+
... def write(self, msg):
|
| 284 |
+
... print("LOG: %s" % msg)
|
| 285 |
+
...
|
| 286 |
+
|
| 287 |
+
>>> log = Log()
|
| 288 |
+
>>> saved_handler = np.seterrcall(log)
|
| 289 |
+
>>> save_err = np.seterr(all='log')
|
| 290 |
+
|
| 291 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 292 |
+
LOG: Warning: divide by zero encountered in divide
|
| 293 |
+
array([inf, inf, inf])
|
| 294 |
+
|
| 295 |
+
>>> np.seterrcall(orig_handler)
|
| 296 |
+
<numpy.Log object at 0x...>
|
| 297 |
+
>>> np.seterr(**orig_err)
|
| 298 |
+
{'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
|
| 299 |
+
|
| 300 |
+
"""
|
| 301 |
+
old = _get_extobj_dict()["call"]
|
| 302 |
+
extobj = _make_extobj(call=func)
|
| 303 |
+
_extobj_contextvar.set(extobj)
|
| 304 |
+
return old
|
| 305 |
+
|
| 306 |
+
|
| 307 |
+
@set_module('numpy')
|
| 308 |
+
def geterrcall():
|
| 309 |
+
"""
|
| 310 |
+
Return the current callback function used on floating-point errors.
|
| 311 |
+
|
| 312 |
+
When the error handling for a floating-point error (one of "divide",
|
| 313 |
+
"over", "under", or "invalid") is set to 'call' or 'log', the function
|
| 314 |
+
that is called or the log instance that is written to is returned by
|
| 315 |
+
`geterrcall`. This function or log instance has been set with
|
| 316 |
+
`seterrcall`.
|
| 317 |
+
|
| 318 |
+
Returns
|
| 319 |
+
-------
|
| 320 |
+
errobj : callable, log instance or None
|
| 321 |
+
The current error handler. If no handler was set through `seterrcall`,
|
| 322 |
+
``None`` is returned.
|
| 323 |
+
|
| 324 |
+
See Also
|
| 325 |
+
--------
|
| 326 |
+
seterrcall, seterr, geterr
|
| 327 |
+
|
| 328 |
+
Notes
|
| 329 |
+
-----
|
| 330 |
+
For complete documentation of the types of floating-point exceptions and
|
| 331 |
+
treatment options, see `seterr`.
|
| 332 |
+
|
| 333 |
+
Examples
|
| 334 |
+
--------
|
| 335 |
+
>>> import numpy as np
|
| 336 |
+
>>> np.geterrcall() # we did not yet set a handler, returns None
|
| 337 |
+
|
| 338 |
+
>>> orig_settings = np.seterr(all='call')
|
| 339 |
+
>>> def err_handler(type, flag):
|
| 340 |
+
... print("Floating point error (%s), with flag %s" % (type, flag))
|
| 341 |
+
>>> old_handler = np.seterrcall(err_handler)
|
| 342 |
+
>>> np.array([1, 2, 3]) / 0.0
|
| 343 |
+
Floating point error (divide by zero), with flag 1
|
| 344 |
+
array([inf, inf, inf])
|
| 345 |
+
|
| 346 |
+
>>> cur_handler = np.geterrcall()
|
| 347 |
+
>>> cur_handler is err_handler
|
| 348 |
+
True
|
| 349 |
+
>>> old_settings = np.seterr(**orig_settings) # restore original
|
| 350 |
+
>>> old_handler = np.seterrcall(None) # restore original
|
| 351 |
+
|
| 352 |
+
"""
|
| 353 |
+
return _get_extobj_dict()["call"]
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class _unspecified:
|
| 357 |
+
pass
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
_Unspecified = _unspecified()
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
@set_module('numpy')
|
| 364 |
+
class errstate:
|
| 365 |
+
"""
|
| 366 |
+
errstate(**kwargs)
|
| 367 |
+
|
| 368 |
+
Context manager for floating-point error handling.
|
| 369 |
+
|
| 370 |
+
Using an instance of `errstate` as a context manager allows statements in
|
| 371 |
+
that context to execute with a known error handling behavior. Upon entering
|
| 372 |
+
the context the error handling is set with `seterr` and `seterrcall`, and
|
| 373 |
+
upon exiting it is reset to what it was before.
|
| 374 |
+
|
| 375 |
+
.. versionchanged:: 1.17.0
|
| 376 |
+
`errstate` is also usable as a function decorator, saving
|
| 377 |
+
a level of indentation if an entire function is wrapped.
|
| 378 |
+
|
| 379 |
+
.. versionchanged:: 2.0
|
| 380 |
+
`errstate` is now fully thread and asyncio safe, but may not be
|
| 381 |
+
entered more than once.
|
| 382 |
+
It is not safe to decorate async functions using ``errstate``.
|
| 383 |
+
|
| 384 |
+
Parameters
|
| 385 |
+
----------
|
| 386 |
+
kwargs : {divide, over, under, invalid}
|
| 387 |
+
Keyword arguments. The valid keywords are the possible floating-point
|
| 388 |
+
exceptions. Each keyword should have a string value that defines the
|
| 389 |
+
treatment for the particular error. Possible values are
|
| 390 |
+
{'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
|
| 391 |
+
|
| 392 |
+
See Also
|
| 393 |
+
--------
|
| 394 |
+
seterr, geterr, seterrcall, geterrcall
|
| 395 |
+
|
| 396 |
+
Notes
|
| 397 |
+
-----
|
| 398 |
+
For complete documentation of the types of floating-point exceptions and
|
| 399 |
+
treatment options, see `seterr`.
|
| 400 |
+
|
| 401 |
+
Examples
|
| 402 |
+
--------
|
| 403 |
+
>>> import numpy as np
|
| 404 |
+
>>> olderr = np.seterr(all='ignore') # Set error handling to known state.
|
| 405 |
+
|
| 406 |
+
>>> np.arange(3) / 0.
|
| 407 |
+
array([nan, inf, inf])
|
| 408 |
+
>>> with np.errstate(divide='ignore'):
|
| 409 |
+
... np.arange(3) / 0.
|
| 410 |
+
array([nan, inf, inf])
|
| 411 |
+
|
| 412 |
+
>>> np.sqrt(-1)
|
| 413 |
+
np.float64(nan)
|
| 414 |
+
>>> with np.errstate(invalid='raise'):
|
| 415 |
+
... np.sqrt(-1)
|
| 416 |
+
Traceback (most recent call last):
|
| 417 |
+
File "<stdin>", line 2, in <module>
|
| 418 |
+
FloatingPointError: invalid value encountered in sqrt
|
| 419 |
+
|
| 420 |
+
Outside the context the error handling behavior has not changed:
|
| 421 |
+
|
| 422 |
+
>>> np.geterr()
|
| 423 |
+
{'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
|
| 424 |
+
>>> olderr = np.seterr(**olderr) # restore original state
|
| 425 |
+
|
| 426 |
+
"""
|
| 427 |
+
__slots__ = (
|
| 428 |
+
"_call", "_all", "_divide", "_over", "_under", "_invalid", "_token")
|
| 429 |
+
|
| 430 |
+
def __init__(self, *, call=_Unspecified,
|
| 431 |
+
all=None, divide=None, over=None, under=None, invalid=None):
|
| 432 |
+
self._token = None
|
| 433 |
+
self._call = call
|
| 434 |
+
self._all = all
|
| 435 |
+
self._divide = divide
|
| 436 |
+
self._over = over
|
| 437 |
+
self._under = under
|
| 438 |
+
self._invalid = invalid
|
| 439 |
+
|
| 440 |
+
def __enter__(self):
|
| 441 |
+
# Note that __call__ duplicates much of this logic
|
| 442 |
+
if self._token is not None:
|
| 443 |
+
raise TypeError("Cannot enter `np.errstate` twice.")
|
| 444 |
+
if self._call is _Unspecified:
|
| 445 |
+
extobj = _make_extobj(
|
| 446 |
+
all=self._all, divide=self._divide, over=self._over,
|
| 447 |
+
under=self._under, invalid=self._invalid)
|
| 448 |
+
else:
|
| 449 |
+
extobj = _make_extobj(
|
| 450 |
+
call=self._call,
|
| 451 |
+
all=self._all, divide=self._divide, over=self._over,
|
| 452 |
+
under=self._under, invalid=self._invalid)
|
| 453 |
+
|
| 454 |
+
self._token = _extobj_contextvar.set(extobj)
|
| 455 |
+
|
| 456 |
+
def __exit__(self, *exc_info):
|
| 457 |
+
_extobj_contextvar.reset(self._token)
|
| 458 |
+
|
| 459 |
+
def __call__(self, func):
|
| 460 |
+
# We need to customize `__call__` compared to `ContextDecorator`
|
| 461 |
+
# because we must store the token per-thread so cannot store it on
|
| 462 |
+
# the instance (we could create a new instance for this).
|
| 463 |
+
# This duplicates the code from `__enter__`.
|
| 464 |
+
@functools.wraps(func)
|
| 465 |
+
def inner(*args, **kwargs):
|
| 466 |
+
if self._call is _Unspecified:
|
| 467 |
+
extobj = _make_extobj(
|
| 468 |
+
all=self._all, divide=self._divide, over=self._over,
|
| 469 |
+
under=self._under, invalid=self._invalid)
|
| 470 |
+
else:
|
| 471 |
+
extobj = _make_extobj(
|
| 472 |
+
call=self._call,
|
| 473 |
+
all=self._all, divide=self._divide, over=self._over,
|
| 474 |
+
under=self._under, invalid=self._invalid)
|
| 475 |
+
|
| 476 |
+
_token = _extobj_contextvar.set(extobj)
|
| 477 |
+
try:
|
| 478 |
+
# Call the original, decorated, function:
|
| 479 |
+
return func(*args, **kwargs)
|
| 480 |
+
finally:
|
| 481 |
+
_extobj_contextvar.reset(_token)
|
| 482 |
+
|
| 483 |
+
return inner
|
janus/lib/python3.10/site-packages/numpy/_core/_umath_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (46.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/arrayprint.pyi
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Callable
|
| 2 |
+
from typing import Any, Literal, TypeAlias, TypedDict, SupportsIndex, type_check_only
|
| 3 |
+
|
| 4 |
+
# Using a private class is by no means ideal, but it is simply a consequence
|
| 5 |
+
# of a `contextlib.context` returning an instance of aforementioned class
|
| 6 |
+
from contextlib import _GeneratorContextManager
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy import (
|
| 10 |
+
integer,
|
| 11 |
+
timedelta64,
|
| 12 |
+
datetime64,
|
| 13 |
+
floating,
|
| 14 |
+
complexfloating,
|
| 15 |
+
void,
|
| 16 |
+
longdouble,
|
| 17 |
+
clongdouble,
|
| 18 |
+
)
|
| 19 |
+
from numpy._typing import NDArray, _CharLike_co, _FloatLike_co
|
| 20 |
+
|
| 21 |
+
_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
|
| 22 |
+
|
| 23 |
+
@type_check_only
|
| 24 |
+
class _FormatDict(TypedDict, total=False):
|
| 25 |
+
bool: Callable[[np.bool], str]
|
| 26 |
+
int: Callable[[integer[Any]], str]
|
| 27 |
+
timedelta: Callable[[timedelta64], str]
|
| 28 |
+
datetime: Callable[[datetime64], str]
|
| 29 |
+
float: Callable[[floating[Any]], str]
|
| 30 |
+
longfloat: Callable[[longdouble], str]
|
| 31 |
+
complexfloat: Callable[[complexfloating[Any, Any]], str]
|
| 32 |
+
longcomplexfloat: Callable[[clongdouble], str]
|
| 33 |
+
void: Callable[[void], str]
|
| 34 |
+
numpystr: Callable[[_CharLike_co], str]
|
| 35 |
+
object: Callable[[object], str]
|
| 36 |
+
all: Callable[[object], str]
|
| 37 |
+
int_kind: Callable[[integer[Any]], str]
|
| 38 |
+
float_kind: Callable[[floating[Any]], str]
|
| 39 |
+
complex_kind: Callable[[complexfloating[Any, Any]], str]
|
| 40 |
+
str_kind: Callable[[_CharLike_co], str]
|
| 41 |
+
|
| 42 |
+
@type_check_only
|
| 43 |
+
class _FormatOptions(TypedDict):
|
| 44 |
+
precision: int
|
| 45 |
+
threshold: int
|
| 46 |
+
edgeitems: int
|
| 47 |
+
linewidth: int
|
| 48 |
+
suppress: bool
|
| 49 |
+
nanstr: str
|
| 50 |
+
infstr: str
|
| 51 |
+
formatter: None | _FormatDict
|
| 52 |
+
sign: Literal["-", "+", " "]
|
| 53 |
+
floatmode: _FloatMode
|
| 54 |
+
legacy: Literal[False, "1.13", "1.21"]
|
| 55 |
+
|
| 56 |
+
def set_printoptions(
|
| 57 |
+
precision: None | SupportsIndex = ...,
|
| 58 |
+
threshold: None | int = ...,
|
| 59 |
+
edgeitems: None | int = ...,
|
| 60 |
+
linewidth: None | int = ...,
|
| 61 |
+
suppress: None | bool = ...,
|
| 62 |
+
nanstr: None | str = ...,
|
| 63 |
+
infstr: None | str = ...,
|
| 64 |
+
formatter: None | _FormatDict = ...,
|
| 65 |
+
sign: Literal[None, "-", "+", " "] = ...,
|
| 66 |
+
floatmode: None | _FloatMode = ...,
|
| 67 |
+
*,
|
| 68 |
+
legacy: Literal[None, False, "1.13", "1.21"] = ...,
|
| 69 |
+
override_repr: None | Callable[[NDArray[Any]], str] = ...,
|
| 70 |
+
) -> None: ...
|
| 71 |
+
def get_printoptions() -> _FormatOptions: ...
|
| 72 |
+
def array2string(
|
| 73 |
+
a: NDArray[Any],
|
| 74 |
+
max_line_width: None | int = ...,
|
| 75 |
+
precision: None | SupportsIndex = ...,
|
| 76 |
+
suppress_small: None | bool = ...,
|
| 77 |
+
separator: str = ...,
|
| 78 |
+
prefix: str = ...,
|
| 79 |
+
# NOTE: With the `style` argument being deprecated,
|
| 80 |
+
# all arguments between `formatter` and `suffix` are de facto
|
| 81 |
+
# keyworld-only arguments
|
| 82 |
+
*,
|
| 83 |
+
formatter: None | _FormatDict = ...,
|
| 84 |
+
threshold: None | int = ...,
|
| 85 |
+
edgeitems: None | int = ...,
|
| 86 |
+
sign: Literal[None, "-", "+", " "] = ...,
|
| 87 |
+
floatmode: None | _FloatMode = ...,
|
| 88 |
+
suffix: str = ...,
|
| 89 |
+
legacy: Literal[None, False, "1.13", "1.21"] = ...,
|
| 90 |
+
) -> str: ...
|
| 91 |
+
def format_float_scientific(
|
| 92 |
+
x: _FloatLike_co,
|
| 93 |
+
precision: None | int = ...,
|
| 94 |
+
unique: bool = ...,
|
| 95 |
+
trim: Literal["k", ".", "0", "-"] = ...,
|
| 96 |
+
sign: bool = ...,
|
| 97 |
+
pad_left: None | int = ...,
|
| 98 |
+
exp_digits: None | int = ...,
|
| 99 |
+
min_digits: None | int = ...,
|
| 100 |
+
) -> str: ...
|
| 101 |
+
def format_float_positional(
|
| 102 |
+
x: _FloatLike_co,
|
| 103 |
+
precision: None | int = ...,
|
| 104 |
+
unique: bool = ...,
|
| 105 |
+
fractional: bool = ...,
|
| 106 |
+
trim: Literal["k", ".", "0", "-"] = ...,
|
| 107 |
+
sign: bool = ...,
|
| 108 |
+
pad_left: None | int = ...,
|
| 109 |
+
pad_right: None | int = ...,
|
| 110 |
+
min_digits: None | int = ...,
|
| 111 |
+
) -> str: ...
|
| 112 |
+
def array_repr(
|
| 113 |
+
arr: NDArray[Any],
|
| 114 |
+
max_line_width: None | int = ...,
|
| 115 |
+
precision: None | SupportsIndex = ...,
|
| 116 |
+
suppress_small: None | bool = ...,
|
| 117 |
+
) -> str: ...
|
| 118 |
+
def array_str(
|
| 119 |
+
a: NDArray[Any],
|
| 120 |
+
max_line_width: None | int = ...,
|
| 121 |
+
precision: None | SupportsIndex = ...,
|
| 122 |
+
suppress_small: None | bool = ...,
|
| 123 |
+
) -> str: ...
|
| 124 |
+
def printoptions(
|
| 125 |
+
precision: None | SupportsIndex = ...,
|
| 126 |
+
threshold: None | int = ...,
|
| 127 |
+
edgeitems: None | int = ...,
|
| 128 |
+
linewidth: None | int = ...,
|
| 129 |
+
suppress: None | bool = ...,
|
| 130 |
+
nanstr: None | str = ...,
|
| 131 |
+
infstr: None | str = ...,
|
| 132 |
+
formatter: None | _FormatDict = ...,
|
| 133 |
+
sign: Literal[None, "-", "+", " "] = ...,
|
| 134 |
+
floatmode: None | _FloatMode = ...,
|
| 135 |
+
*,
|
| 136 |
+
legacy: Literal[None, False, "1.13", "1.21"] = ...
|
| 137 |
+
) -> _GeneratorContextManager[_FormatOptions]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/cversions.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Simple script to compute the api hash of the current API.
|
| 2 |
+
|
| 3 |
+
The API has is defined by numpy_api_order and ufunc_api_order.
|
| 4 |
+
|
| 5 |
+
"""
|
| 6 |
+
from os.path import dirname
|
| 7 |
+
|
| 8 |
+
from code_generators.genapi import fullapi_hash
|
| 9 |
+
from code_generators.numpy_api import full_api
|
| 10 |
+
|
| 11 |
+
if __name__ == '__main__':
|
| 12 |
+
curdir = dirname(__file__)
|
| 13 |
+
print(fullapi_hash(full_api))
|
janus/lib/python3.10/site-packages/numpy/_core/defchararray.py
ADDED
|
@@ -0,0 +1,1414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module contains a set of functions for vectorized string
|
| 3 |
+
operations and methods.
|
| 4 |
+
|
| 5 |
+
.. note::
|
| 6 |
+
The `chararray` class exists for backwards compatibility with
|
| 7 |
+
Numarray, it is not recommended for new development. Starting from numpy
|
| 8 |
+
1.4, if one needs arrays of strings, it is recommended to use arrays of
|
| 9 |
+
`dtype` `object_`, `bytes_` or `str_`, and use the free functions
|
| 10 |
+
in the `numpy.char` module for fast vectorized string operations.
|
| 11 |
+
|
| 12 |
+
Some methods will only be available if the corresponding string method is
|
| 13 |
+
available in your version of Python.
|
| 14 |
+
|
| 15 |
+
The preferred alias for `defchararray` is `numpy.char`.
|
| 16 |
+
|
| 17 |
+
"""
|
| 18 |
+
import functools
|
| 19 |
+
|
| 20 |
+
import numpy as np
|
| 21 |
+
from .._utils import set_module
|
| 22 |
+
from .numerictypes import bytes_, str_, character
|
| 23 |
+
from .numeric import ndarray, array as narray, asarray as asnarray
|
| 24 |
+
from numpy._core.multiarray import compare_chararrays
|
| 25 |
+
from numpy._core import overrides
|
| 26 |
+
from numpy.strings import *
|
| 27 |
+
from numpy.strings import (
|
| 28 |
+
multiply as strings_multiply,
|
| 29 |
+
partition as strings_partition,
|
| 30 |
+
rpartition as strings_rpartition,
|
| 31 |
+
)
|
| 32 |
+
from numpy._core.strings import (
|
| 33 |
+
_split as split,
|
| 34 |
+
_rsplit as rsplit,
|
| 35 |
+
_splitlines as splitlines,
|
| 36 |
+
_join as join,
|
| 37 |
+
)
|
| 38 |
+
|
| 39 |
+
__all__ = [
|
| 40 |
+
'equal', 'not_equal', 'greater_equal', 'less_equal',
|
| 41 |
+
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
|
| 42 |
+
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
|
| 43 |
+
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
|
| 44 |
+
'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
|
| 45 |
+
'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
|
| 46 |
+
'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
|
| 47 |
+
'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
|
| 48 |
+
'array', 'asarray', 'compare_chararrays', 'chararray'
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
array_function_dispatch = functools.partial(
|
| 53 |
+
overrides.array_function_dispatch, module='numpy.char')
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _binary_op_dispatcher(x1, x2):
|
| 57 |
+
return (x1, x2)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 61 |
+
def equal(x1, x2):
|
| 62 |
+
"""
|
| 63 |
+
Return (x1 == x2) element-wise.
|
| 64 |
+
|
| 65 |
+
Unlike `numpy.equal`, this comparison is performed by first
|
| 66 |
+
stripping whitespace characters from the end of the string. This
|
| 67 |
+
behavior is provided for backward-compatibility with numarray.
|
| 68 |
+
|
| 69 |
+
Parameters
|
| 70 |
+
----------
|
| 71 |
+
x1, x2 : array_like of str or unicode
|
| 72 |
+
Input arrays of the same shape.
|
| 73 |
+
|
| 74 |
+
Returns
|
| 75 |
+
-------
|
| 76 |
+
out : ndarray
|
| 77 |
+
Output array of bools.
|
| 78 |
+
|
| 79 |
+
Examples
|
| 80 |
+
--------
|
| 81 |
+
>>> import numpy as np
|
| 82 |
+
>>> y = "aa "
|
| 83 |
+
>>> x = "aa"
|
| 84 |
+
>>> np.char.equal(x, y)
|
| 85 |
+
array(True)
|
| 86 |
+
|
| 87 |
+
See Also
|
| 88 |
+
--------
|
| 89 |
+
not_equal, greater_equal, less_equal, greater, less
|
| 90 |
+
"""
|
| 91 |
+
return compare_chararrays(x1, x2, '==', True)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 95 |
+
def not_equal(x1, x2):
|
| 96 |
+
"""
|
| 97 |
+
Return (x1 != x2) element-wise.
|
| 98 |
+
|
| 99 |
+
Unlike `numpy.not_equal`, this comparison is performed by first
|
| 100 |
+
stripping whitespace characters from the end of the string. This
|
| 101 |
+
behavior is provided for backward-compatibility with numarray.
|
| 102 |
+
|
| 103 |
+
Parameters
|
| 104 |
+
----------
|
| 105 |
+
x1, x2 : array_like of str or unicode
|
| 106 |
+
Input arrays of the same shape.
|
| 107 |
+
|
| 108 |
+
Returns
|
| 109 |
+
-------
|
| 110 |
+
out : ndarray
|
| 111 |
+
Output array of bools.
|
| 112 |
+
|
| 113 |
+
See Also
|
| 114 |
+
--------
|
| 115 |
+
equal, greater_equal, less_equal, greater, less
|
| 116 |
+
|
| 117 |
+
Examples
|
| 118 |
+
--------
|
| 119 |
+
>>> import numpy as np
|
| 120 |
+
>>> x1 = np.array(['a', 'b', 'c'])
|
| 121 |
+
>>> np.char.not_equal(x1, 'b')
|
| 122 |
+
array([ True, False, True])
|
| 123 |
+
|
| 124 |
+
"""
|
| 125 |
+
return compare_chararrays(x1, x2, '!=', True)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 129 |
+
def greater_equal(x1, x2):
|
| 130 |
+
"""
|
| 131 |
+
Return (x1 >= x2) element-wise.
|
| 132 |
+
|
| 133 |
+
Unlike `numpy.greater_equal`, this comparison is performed by
|
| 134 |
+
first stripping whitespace characters from the end of the string.
|
| 135 |
+
This behavior is provided for backward-compatibility with
|
| 136 |
+
numarray.
|
| 137 |
+
|
| 138 |
+
Parameters
|
| 139 |
+
----------
|
| 140 |
+
x1, x2 : array_like of str or unicode
|
| 141 |
+
Input arrays of the same shape.
|
| 142 |
+
|
| 143 |
+
Returns
|
| 144 |
+
-------
|
| 145 |
+
out : ndarray
|
| 146 |
+
Output array of bools.
|
| 147 |
+
|
| 148 |
+
See Also
|
| 149 |
+
--------
|
| 150 |
+
equal, not_equal, less_equal, greater, less
|
| 151 |
+
|
| 152 |
+
Examples
|
| 153 |
+
--------
|
| 154 |
+
>>> import numpy as np
|
| 155 |
+
>>> x1 = np.array(['a', 'b', 'c'])
|
| 156 |
+
>>> np.char.greater_equal(x1, 'b')
|
| 157 |
+
array([False, True, True])
|
| 158 |
+
|
| 159 |
+
"""
|
| 160 |
+
return compare_chararrays(x1, x2, '>=', True)
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 164 |
+
def less_equal(x1, x2):
|
| 165 |
+
"""
|
| 166 |
+
Return (x1 <= x2) element-wise.
|
| 167 |
+
|
| 168 |
+
Unlike `numpy.less_equal`, this comparison is performed by first
|
| 169 |
+
stripping whitespace characters from the end of the string. This
|
| 170 |
+
behavior is provided for backward-compatibility with numarray.
|
| 171 |
+
|
| 172 |
+
Parameters
|
| 173 |
+
----------
|
| 174 |
+
x1, x2 : array_like of str or unicode
|
| 175 |
+
Input arrays of the same shape.
|
| 176 |
+
|
| 177 |
+
Returns
|
| 178 |
+
-------
|
| 179 |
+
out : ndarray
|
| 180 |
+
Output array of bools.
|
| 181 |
+
|
| 182 |
+
See Also
|
| 183 |
+
--------
|
| 184 |
+
equal, not_equal, greater_equal, greater, less
|
| 185 |
+
|
| 186 |
+
Examples
|
| 187 |
+
--------
|
| 188 |
+
>>> import numpy as np
|
| 189 |
+
>>> x1 = np.array(['a', 'b', 'c'])
|
| 190 |
+
>>> np.char.less_equal(x1, 'b')
|
| 191 |
+
array([ True, True, False])
|
| 192 |
+
|
| 193 |
+
"""
|
| 194 |
+
return compare_chararrays(x1, x2, '<=', True)
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 198 |
+
def greater(x1, x2):
|
| 199 |
+
"""
|
| 200 |
+
Return (x1 > x2) element-wise.
|
| 201 |
+
|
| 202 |
+
Unlike `numpy.greater`, this comparison is performed by first
|
| 203 |
+
stripping whitespace characters from the end of the string. This
|
| 204 |
+
behavior is provided for backward-compatibility with numarray.
|
| 205 |
+
|
| 206 |
+
Parameters
|
| 207 |
+
----------
|
| 208 |
+
x1, x2 : array_like of str or unicode
|
| 209 |
+
Input arrays of the same shape.
|
| 210 |
+
|
| 211 |
+
Returns
|
| 212 |
+
-------
|
| 213 |
+
out : ndarray
|
| 214 |
+
Output array of bools.
|
| 215 |
+
|
| 216 |
+
See Also
|
| 217 |
+
--------
|
| 218 |
+
equal, not_equal, greater_equal, less_equal, less
|
| 219 |
+
|
| 220 |
+
Examples
|
| 221 |
+
--------
|
| 222 |
+
>>> import numpy as np
|
| 223 |
+
>>> x1 = np.array(['a', 'b', 'c'])
|
| 224 |
+
>>> np.char.greater(x1, 'b')
|
| 225 |
+
array([False, False, True])
|
| 226 |
+
|
| 227 |
+
"""
|
| 228 |
+
return compare_chararrays(x1, x2, '>', True)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@array_function_dispatch(_binary_op_dispatcher)
|
| 232 |
+
def less(x1, x2):
|
| 233 |
+
"""
|
| 234 |
+
Return (x1 < x2) element-wise.
|
| 235 |
+
|
| 236 |
+
Unlike `numpy.greater`, this comparison is performed by first
|
| 237 |
+
stripping whitespace characters from the end of the string. This
|
| 238 |
+
behavior is provided for backward-compatibility with numarray.
|
| 239 |
+
|
| 240 |
+
Parameters
|
| 241 |
+
----------
|
| 242 |
+
x1, x2 : array_like of str or unicode
|
| 243 |
+
Input arrays of the same shape.
|
| 244 |
+
|
| 245 |
+
Returns
|
| 246 |
+
-------
|
| 247 |
+
out : ndarray
|
| 248 |
+
Output array of bools.
|
| 249 |
+
|
| 250 |
+
See Also
|
| 251 |
+
--------
|
| 252 |
+
equal, not_equal, greater_equal, less_equal, greater
|
| 253 |
+
|
| 254 |
+
Examples
|
| 255 |
+
--------
|
| 256 |
+
>>> import numpy as np
|
| 257 |
+
>>> x1 = np.array(['a', 'b', 'c'])
|
| 258 |
+
>>> np.char.less(x1, 'b')
|
| 259 |
+
array([True, False, False])
|
| 260 |
+
|
| 261 |
+
"""
|
| 262 |
+
return compare_chararrays(x1, x2, '<', True)
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
@set_module("numpy.char")
|
| 266 |
+
def multiply(a, i):
|
| 267 |
+
"""
|
| 268 |
+
Return (a * i), that is string multiple concatenation,
|
| 269 |
+
element-wise.
|
| 270 |
+
|
| 271 |
+
Values in ``i`` of less than 0 are treated as 0 (which yields an
|
| 272 |
+
empty string).
|
| 273 |
+
|
| 274 |
+
Parameters
|
| 275 |
+
----------
|
| 276 |
+
a : array_like, with `np.bytes_` or `np.str_` dtype
|
| 277 |
+
|
| 278 |
+
i : array_like, with any integer dtype
|
| 279 |
+
|
| 280 |
+
Returns
|
| 281 |
+
-------
|
| 282 |
+
out : ndarray
|
| 283 |
+
Output array of str or unicode, depending on input types
|
| 284 |
+
|
| 285 |
+
Notes
|
| 286 |
+
-----
|
| 287 |
+
This is a thin wrapper around np.strings.multiply that raises
|
| 288 |
+
`ValueError` when ``i`` is not an integer. It only
|
| 289 |
+
exists for backwards-compatibility.
|
| 290 |
+
|
| 291 |
+
Examples
|
| 292 |
+
--------
|
| 293 |
+
>>> import numpy as np
|
| 294 |
+
>>> a = np.array(["a", "b", "c"])
|
| 295 |
+
>>> np.strings.multiply(a, 3)
|
| 296 |
+
array(['aaa', 'bbb', 'ccc'], dtype='<U3')
|
| 297 |
+
>>> i = np.array([1, 2, 3])
|
| 298 |
+
>>> np.strings.multiply(a, i)
|
| 299 |
+
array(['a', 'bb', 'ccc'], dtype='<U3')
|
| 300 |
+
>>> np.strings.multiply(np.array(['a']), i)
|
| 301 |
+
array(['a', 'aa', 'aaa'], dtype='<U3')
|
| 302 |
+
>>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
|
| 303 |
+
>>> np.strings.multiply(a, 3)
|
| 304 |
+
array([['aaa', 'bbb', 'ccc'],
|
| 305 |
+
['ddd', 'eee', 'fff']], dtype='<U3')
|
| 306 |
+
>>> np.strings.multiply(a, i)
|
| 307 |
+
array([['a', 'bb', 'ccc'],
|
| 308 |
+
['d', 'ee', 'fff']], dtype='<U3')
|
| 309 |
+
|
| 310 |
+
"""
|
| 311 |
+
try:
|
| 312 |
+
return strings_multiply(a, i)
|
| 313 |
+
except TypeError:
|
| 314 |
+
raise ValueError("Can only multiply by integers")
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
@set_module("numpy.char")
|
| 318 |
+
def partition(a, sep):
|
| 319 |
+
"""
|
| 320 |
+
Partition each element in `a` around `sep`.
|
| 321 |
+
|
| 322 |
+
Calls :meth:`str.partition` element-wise.
|
| 323 |
+
|
| 324 |
+
For each element in `a`, split the element as the first
|
| 325 |
+
occurrence of `sep`, and return 3 strings containing the part
|
| 326 |
+
before the separator, the separator itself, and the part after
|
| 327 |
+
the separator. If the separator is not found, return 3 strings
|
| 328 |
+
containing the string itself, followed by two empty strings.
|
| 329 |
+
|
| 330 |
+
Parameters
|
| 331 |
+
----------
|
| 332 |
+
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
|
| 333 |
+
Input array
|
| 334 |
+
sep : {str, unicode}
|
| 335 |
+
Separator to split each string element in `a`.
|
| 336 |
+
|
| 337 |
+
Returns
|
| 338 |
+
-------
|
| 339 |
+
out : ndarray
|
| 340 |
+
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
|
| 341 |
+
depending on input types. The output array will have an extra
|
| 342 |
+
dimension with 3 elements per input element.
|
| 343 |
+
|
| 344 |
+
Examples
|
| 345 |
+
--------
|
| 346 |
+
>>> import numpy as np
|
| 347 |
+
>>> x = np.array(["Numpy is nice!"])
|
| 348 |
+
>>> np.char.partition(x, " ")
|
| 349 |
+
array([['Numpy', ' ', 'is nice!']], dtype='<U8')
|
| 350 |
+
|
| 351 |
+
See Also
|
| 352 |
+
--------
|
| 353 |
+
str.partition
|
| 354 |
+
|
| 355 |
+
"""
|
| 356 |
+
return np.stack(strings_partition(a, sep), axis=-1)
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
@set_module("numpy.char")
|
| 360 |
+
def rpartition(a, sep):
|
| 361 |
+
"""
|
| 362 |
+
Partition (split) each element around the right-most separator.
|
| 363 |
+
|
| 364 |
+
Calls :meth:`str.rpartition` element-wise.
|
| 365 |
+
|
| 366 |
+
For each element in `a`, split the element as the last
|
| 367 |
+
occurrence of `sep`, and return 3 strings containing the part
|
| 368 |
+
before the separator, the separator itself, and the part after
|
| 369 |
+
the separator. If the separator is not found, return 3 strings
|
| 370 |
+
containing the string itself, followed by two empty strings.
|
| 371 |
+
|
| 372 |
+
Parameters
|
| 373 |
+
----------
|
| 374 |
+
a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
|
| 375 |
+
Input array
|
| 376 |
+
sep : str or unicode
|
| 377 |
+
Right-most separator to split each element in array.
|
| 378 |
+
|
| 379 |
+
Returns
|
| 380 |
+
-------
|
| 381 |
+
out : ndarray
|
| 382 |
+
Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
|
| 383 |
+
depending on input types. The output array will have an extra
|
| 384 |
+
dimension with 3 elements per input element.
|
| 385 |
+
|
| 386 |
+
See Also
|
| 387 |
+
--------
|
| 388 |
+
str.rpartition
|
| 389 |
+
|
| 390 |
+
Examples
|
| 391 |
+
--------
|
| 392 |
+
>>> import numpy as np
|
| 393 |
+
>>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])
|
| 394 |
+
>>> np.char.rpartition(a, 'A')
|
| 395 |
+
array([['aAaAa', 'A', ''],
|
| 396 |
+
[' a', 'A', ' '],
|
| 397 |
+
['abB', 'A', 'Bba']], dtype='<U5')
|
| 398 |
+
|
| 399 |
+
"""
|
| 400 |
+
return np.stack(strings_rpartition(a, sep), axis=-1)
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
@set_module("numpy.char")
|
| 404 |
+
class chararray(ndarray):
|
| 405 |
+
"""
|
| 406 |
+
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
|
| 407 |
+
strides=None, order=None)
|
| 408 |
+
|
| 409 |
+
Provides a convenient view on arrays of string and unicode values.
|
| 410 |
+
|
| 411 |
+
.. note::
|
| 412 |
+
The `chararray` class exists for backwards compatibility with
|
| 413 |
+
Numarray, it is not recommended for new development. Starting from numpy
|
| 414 |
+
1.4, if one needs arrays of strings, it is recommended to use arrays of
|
| 415 |
+
`dtype` `~numpy.object_`, `~numpy.bytes_` or `~numpy.str_`, and use
|
| 416 |
+
the free functions in the `numpy.char` module for fast vectorized
|
| 417 |
+
string operations.
|
| 418 |
+
|
| 419 |
+
Versus a NumPy array of dtype `~numpy.bytes_` or `~numpy.str_`, this
|
| 420 |
+
class adds the following functionality:
|
| 421 |
+
|
| 422 |
+
1) values automatically have whitespace removed from the end
|
| 423 |
+
when indexed
|
| 424 |
+
|
| 425 |
+
2) comparison operators automatically remove whitespace from the
|
| 426 |
+
end when comparing values
|
| 427 |
+
|
| 428 |
+
3) vectorized string operations are provided as methods
|
| 429 |
+
(e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
|
| 430 |
+
|
| 431 |
+
chararrays should be created using `numpy.char.array` or
|
| 432 |
+
`numpy.char.asarray`, rather than this constructor directly.
|
| 433 |
+
|
| 434 |
+
This constructor creates the array, using `buffer` (with `offset`
|
| 435 |
+
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
|
| 436 |
+
constructs a new array with `strides` in "C order", unless both
|
| 437 |
+
``len(shape) >= 2`` and ``order='F'``, in which case `strides`
|
| 438 |
+
is in "Fortran order".
|
| 439 |
+
|
| 440 |
+
Methods
|
| 441 |
+
-------
|
| 442 |
+
astype
|
| 443 |
+
argsort
|
| 444 |
+
copy
|
| 445 |
+
count
|
| 446 |
+
decode
|
| 447 |
+
dump
|
| 448 |
+
dumps
|
| 449 |
+
encode
|
| 450 |
+
endswith
|
| 451 |
+
expandtabs
|
| 452 |
+
fill
|
| 453 |
+
find
|
| 454 |
+
flatten
|
| 455 |
+
getfield
|
| 456 |
+
index
|
| 457 |
+
isalnum
|
| 458 |
+
isalpha
|
| 459 |
+
isdecimal
|
| 460 |
+
isdigit
|
| 461 |
+
islower
|
| 462 |
+
isnumeric
|
| 463 |
+
isspace
|
| 464 |
+
istitle
|
| 465 |
+
isupper
|
| 466 |
+
item
|
| 467 |
+
join
|
| 468 |
+
ljust
|
| 469 |
+
lower
|
| 470 |
+
lstrip
|
| 471 |
+
nonzero
|
| 472 |
+
put
|
| 473 |
+
ravel
|
| 474 |
+
repeat
|
| 475 |
+
replace
|
| 476 |
+
reshape
|
| 477 |
+
resize
|
| 478 |
+
rfind
|
| 479 |
+
rindex
|
| 480 |
+
rjust
|
| 481 |
+
rsplit
|
| 482 |
+
rstrip
|
| 483 |
+
searchsorted
|
| 484 |
+
setfield
|
| 485 |
+
setflags
|
| 486 |
+
sort
|
| 487 |
+
split
|
| 488 |
+
splitlines
|
| 489 |
+
squeeze
|
| 490 |
+
startswith
|
| 491 |
+
strip
|
| 492 |
+
swapaxes
|
| 493 |
+
swapcase
|
| 494 |
+
take
|
| 495 |
+
title
|
| 496 |
+
tofile
|
| 497 |
+
tolist
|
| 498 |
+
tostring
|
| 499 |
+
translate
|
| 500 |
+
transpose
|
| 501 |
+
upper
|
| 502 |
+
view
|
| 503 |
+
zfill
|
| 504 |
+
|
| 505 |
+
Parameters
|
| 506 |
+
----------
|
| 507 |
+
shape : tuple
|
| 508 |
+
Shape of the array.
|
| 509 |
+
itemsize : int, optional
|
| 510 |
+
Length of each array element, in number of characters. Default is 1.
|
| 511 |
+
unicode : bool, optional
|
| 512 |
+
Are the array elements of type unicode (True) or string (False).
|
| 513 |
+
Default is False.
|
| 514 |
+
buffer : object exposing the buffer interface or str, optional
|
| 515 |
+
Memory address of the start of the array data. Default is None,
|
| 516 |
+
in which case a new array is created.
|
| 517 |
+
offset : int, optional
|
| 518 |
+
Fixed stride displacement from the beginning of an axis?
|
| 519 |
+
Default is 0. Needs to be >=0.
|
| 520 |
+
strides : array_like of ints, optional
|
| 521 |
+
Strides for the array (see `~numpy.ndarray.strides` for
|
| 522 |
+
full description). Default is None.
|
| 523 |
+
order : {'C', 'F'}, optional
|
| 524 |
+
The order in which the array data is stored in memory: 'C' ->
|
| 525 |
+
"row major" order (the default), 'F' -> "column major"
|
| 526 |
+
(Fortran) order.
|
| 527 |
+
|
| 528 |
+
Examples
|
| 529 |
+
--------
|
| 530 |
+
>>> import numpy as np
|
| 531 |
+
>>> charar = np.char.chararray((3, 3))
|
| 532 |
+
>>> charar[:] = 'a'
|
| 533 |
+
>>> charar
|
| 534 |
+
chararray([[b'a', b'a', b'a'],
|
| 535 |
+
[b'a', b'a', b'a'],
|
| 536 |
+
[b'a', b'a', b'a']], dtype='|S1')
|
| 537 |
+
|
| 538 |
+
>>> charar = np.char.chararray(charar.shape, itemsize=5)
|
| 539 |
+
>>> charar[:] = 'abc'
|
| 540 |
+
>>> charar
|
| 541 |
+
chararray([[b'abc', b'abc', b'abc'],
|
| 542 |
+
[b'abc', b'abc', b'abc'],
|
| 543 |
+
[b'abc', b'abc', b'abc']], dtype='|S5')
|
| 544 |
+
|
| 545 |
+
"""
|
| 546 |
+
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
|
| 547 |
+
offset=0, strides=None, order='C'):
|
| 548 |
+
if unicode:
|
| 549 |
+
dtype = str_
|
| 550 |
+
else:
|
| 551 |
+
dtype = bytes_
|
| 552 |
+
|
| 553 |
+
# force itemsize to be a Python int, since using NumPy integer
|
| 554 |
+
# types results in itemsize.itemsize being used as the size of
|
| 555 |
+
# strings in the new array.
|
| 556 |
+
itemsize = int(itemsize)
|
| 557 |
+
|
| 558 |
+
if isinstance(buffer, str):
|
| 559 |
+
# unicode objects do not have the buffer interface
|
| 560 |
+
filler = buffer
|
| 561 |
+
buffer = None
|
| 562 |
+
else:
|
| 563 |
+
filler = None
|
| 564 |
+
|
| 565 |
+
if buffer is None:
|
| 566 |
+
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
|
| 567 |
+
order=order)
|
| 568 |
+
else:
|
| 569 |
+
self = ndarray.__new__(subtype, shape, (dtype, itemsize),
|
| 570 |
+
buffer=buffer,
|
| 571 |
+
offset=offset, strides=strides,
|
| 572 |
+
order=order)
|
| 573 |
+
if filler is not None:
|
| 574 |
+
self[...] = filler
|
| 575 |
+
|
| 576 |
+
return self
|
| 577 |
+
|
| 578 |
+
def __array_wrap__(self, arr, context=None, return_scalar=False):
|
| 579 |
+
# When calling a ufunc (and some other functions), we return a
|
| 580 |
+
# chararray if the ufunc output is a string-like array,
|
| 581 |
+
# or an ndarray otherwise
|
| 582 |
+
if arr.dtype.char in "SUbc":
|
| 583 |
+
return arr.view(type(self))
|
| 584 |
+
return arr
|
| 585 |
+
|
| 586 |
+
def __array_finalize__(self, obj):
|
| 587 |
+
# The b is a special case because it is used for reconstructing.
|
| 588 |
+
if self.dtype.char not in 'VSUbc':
|
| 589 |
+
raise ValueError("Can only create a chararray from string data.")
|
| 590 |
+
|
| 591 |
+
def __getitem__(self, obj):
|
| 592 |
+
val = ndarray.__getitem__(self, obj)
|
| 593 |
+
if isinstance(val, character):
|
| 594 |
+
return val.rstrip()
|
| 595 |
+
return val
|
| 596 |
+
|
| 597 |
+
# IMPLEMENTATION NOTE: Most of the methods of this class are
|
| 598 |
+
# direct delegations to the free functions in this module.
|
| 599 |
+
# However, those that return an array of strings should instead
|
| 600 |
+
# return a chararray, so some extra wrapping is required.
|
| 601 |
+
|
| 602 |
+
def __eq__(self, other):
|
| 603 |
+
"""
|
| 604 |
+
Return (self == other) element-wise.
|
| 605 |
+
|
| 606 |
+
See Also
|
| 607 |
+
--------
|
| 608 |
+
equal
|
| 609 |
+
"""
|
| 610 |
+
return equal(self, other)
|
| 611 |
+
|
| 612 |
+
def __ne__(self, other):
|
| 613 |
+
"""
|
| 614 |
+
Return (self != other) element-wise.
|
| 615 |
+
|
| 616 |
+
See Also
|
| 617 |
+
--------
|
| 618 |
+
not_equal
|
| 619 |
+
"""
|
| 620 |
+
return not_equal(self, other)
|
| 621 |
+
|
| 622 |
+
def __ge__(self, other):
|
| 623 |
+
"""
|
| 624 |
+
Return (self >= other) element-wise.
|
| 625 |
+
|
| 626 |
+
See Also
|
| 627 |
+
--------
|
| 628 |
+
greater_equal
|
| 629 |
+
"""
|
| 630 |
+
return greater_equal(self, other)
|
| 631 |
+
|
| 632 |
+
def __le__(self, other):
|
| 633 |
+
"""
|
| 634 |
+
Return (self <= other) element-wise.
|
| 635 |
+
|
| 636 |
+
See Also
|
| 637 |
+
--------
|
| 638 |
+
less_equal
|
| 639 |
+
"""
|
| 640 |
+
return less_equal(self, other)
|
| 641 |
+
|
| 642 |
+
def __gt__(self, other):
|
| 643 |
+
"""
|
| 644 |
+
Return (self > other) element-wise.
|
| 645 |
+
|
| 646 |
+
See Also
|
| 647 |
+
--------
|
| 648 |
+
greater
|
| 649 |
+
"""
|
| 650 |
+
return greater(self, other)
|
| 651 |
+
|
| 652 |
+
def __lt__(self, other):
|
| 653 |
+
"""
|
| 654 |
+
Return (self < other) element-wise.
|
| 655 |
+
|
| 656 |
+
See Also
|
| 657 |
+
--------
|
| 658 |
+
less
|
| 659 |
+
"""
|
| 660 |
+
return less(self, other)
|
| 661 |
+
|
| 662 |
+
def __add__(self, other):
|
| 663 |
+
"""
|
| 664 |
+
Return (self + other), that is string concatenation,
|
| 665 |
+
element-wise for a pair of array_likes of str or unicode.
|
| 666 |
+
|
| 667 |
+
See Also
|
| 668 |
+
--------
|
| 669 |
+
add
|
| 670 |
+
"""
|
| 671 |
+
return add(self, other)
|
| 672 |
+
|
| 673 |
+
def __radd__(self, other):
|
| 674 |
+
"""
|
| 675 |
+
Return (other + self), that is string concatenation,
|
| 676 |
+
element-wise for a pair of array_likes of `bytes_` or `str_`.
|
| 677 |
+
|
| 678 |
+
See Also
|
| 679 |
+
--------
|
| 680 |
+
add
|
| 681 |
+
"""
|
| 682 |
+
return add(other, self)
|
| 683 |
+
|
| 684 |
+
def __mul__(self, i):
|
| 685 |
+
"""
|
| 686 |
+
Return (self * i), that is string multiple concatenation,
|
| 687 |
+
element-wise.
|
| 688 |
+
|
| 689 |
+
See Also
|
| 690 |
+
--------
|
| 691 |
+
multiply
|
| 692 |
+
"""
|
| 693 |
+
return asarray(multiply(self, i))
|
| 694 |
+
|
| 695 |
+
def __rmul__(self, i):
|
| 696 |
+
"""
|
| 697 |
+
Return (self * i), that is string multiple concatenation,
|
| 698 |
+
element-wise.
|
| 699 |
+
|
| 700 |
+
See Also
|
| 701 |
+
--------
|
| 702 |
+
multiply
|
| 703 |
+
"""
|
| 704 |
+
return asarray(multiply(self, i))
|
| 705 |
+
|
| 706 |
+
def __mod__(self, i):
|
| 707 |
+
"""
|
| 708 |
+
Return (self % i), that is pre-Python 2.6 string formatting
|
| 709 |
+
(interpolation), element-wise for a pair of array_likes of `bytes_`
|
| 710 |
+
or `str_`.
|
| 711 |
+
|
| 712 |
+
See Also
|
| 713 |
+
--------
|
| 714 |
+
mod
|
| 715 |
+
"""
|
| 716 |
+
return asarray(mod(self, i))
|
| 717 |
+
|
| 718 |
+
def __rmod__(self, other):
|
| 719 |
+
return NotImplemented
|
| 720 |
+
|
| 721 |
+
def argsort(self, axis=-1, kind=None, order=None):
|
| 722 |
+
"""
|
| 723 |
+
Return the indices that sort the array lexicographically.
|
| 724 |
+
|
| 725 |
+
For full documentation see `numpy.argsort`, for which this method is
|
| 726 |
+
in fact merely a "thin wrapper."
|
| 727 |
+
|
| 728 |
+
Examples
|
| 729 |
+
--------
|
| 730 |
+
>>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
|
| 731 |
+
>>> c = c.view(np.char.chararray); c
|
| 732 |
+
chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
|
| 733 |
+
dtype='|S5')
|
| 734 |
+
>>> c[c.argsort()]
|
| 735 |
+
chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
|
| 736 |
+
dtype='|S5')
|
| 737 |
+
|
| 738 |
+
"""
|
| 739 |
+
return self.__array__().argsort(axis, kind, order)
|
| 740 |
+
argsort.__doc__ = ndarray.argsort.__doc__
|
| 741 |
+
|
| 742 |
+
def capitalize(self):
|
| 743 |
+
"""
|
| 744 |
+
Return a copy of `self` with only the first character of each element
|
| 745 |
+
capitalized.
|
| 746 |
+
|
| 747 |
+
See Also
|
| 748 |
+
--------
|
| 749 |
+
char.capitalize
|
| 750 |
+
|
| 751 |
+
"""
|
| 752 |
+
return asarray(capitalize(self))
|
| 753 |
+
|
| 754 |
+
def center(self, width, fillchar=' '):
|
| 755 |
+
"""
|
| 756 |
+
Return a copy of `self` with its elements centered in a
|
| 757 |
+
string of length `width`.
|
| 758 |
+
|
| 759 |
+
See Also
|
| 760 |
+
--------
|
| 761 |
+
center
|
| 762 |
+
"""
|
| 763 |
+
return asarray(center(self, width, fillchar))
|
| 764 |
+
|
| 765 |
+
def count(self, sub, start=0, end=None):
|
| 766 |
+
"""
|
| 767 |
+
Returns an array with the number of non-overlapping occurrences of
|
| 768 |
+
substring `sub` in the range [`start`, `end`].
|
| 769 |
+
|
| 770 |
+
See Also
|
| 771 |
+
--------
|
| 772 |
+
char.count
|
| 773 |
+
|
| 774 |
+
"""
|
| 775 |
+
return count(self, sub, start, end)
|
| 776 |
+
|
| 777 |
+
def decode(self, encoding=None, errors=None):
|
| 778 |
+
"""
|
| 779 |
+
Calls ``bytes.decode`` element-wise.
|
| 780 |
+
|
| 781 |
+
See Also
|
| 782 |
+
--------
|
| 783 |
+
char.decode
|
| 784 |
+
|
| 785 |
+
"""
|
| 786 |
+
return decode(self, encoding, errors)
|
| 787 |
+
|
| 788 |
+
def encode(self, encoding=None, errors=None):
|
| 789 |
+
"""
|
| 790 |
+
Calls :meth:`str.encode` element-wise.
|
| 791 |
+
|
| 792 |
+
See Also
|
| 793 |
+
--------
|
| 794 |
+
char.encode
|
| 795 |
+
|
| 796 |
+
"""
|
| 797 |
+
return encode(self, encoding, errors)
|
| 798 |
+
|
| 799 |
+
def endswith(self, suffix, start=0, end=None):
|
| 800 |
+
"""
|
| 801 |
+
Returns a boolean array which is `True` where the string element
|
| 802 |
+
in `self` ends with `suffix`, otherwise `False`.
|
| 803 |
+
|
| 804 |
+
See Also
|
| 805 |
+
--------
|
| 806 |
+
char.endswith
|
| 807 |
+
|
| 808 |
+
"""
|
| 809 |
+
return endswith(self, suffix, start, end)
|
| 810 |
+
|
| 811 |
+
def expandtabs(self, tabsize=8):
|
| 812 |
+
"""
|
| 813 |
+
Return a copy of each string element where all tab characters are
|
| 814 |
+
replaced by one or more spaces.
|
| 815 |
+
|
| 816 |
+
See Also
|
| 817 |
+
--------
|
| 818 |
+
char.expandtabs
|
| 819 |
+
|
| 820 |
+
"""
|
| 821 |
+
return asarray(expandtabs(self, tabsize))
|
| 822 |
+
|
| 823 |
+
def find(self, sub, start=0, end=None):
|
| 824 |
+
"""
|
| 825 |
+
For each element, return the lowest index in the string where
|
| 826 |
+
substring `sub` is found.
|
| 827 |
+
|
| 828 |
+
See Also
|
| 829 |
+
--------
|
| 830 |
+
char.find
|
| 831 |
+
|
| 832 |
+
"""
|
| 833 |
+
return find(self, sub, start, end)
|
| 834 |
+
|
| 835 |
+
def index(self, sub, start=0, end=None):
|
| 836 |
+
"""
|
| 837 |
+
Like `find`, but raises :exc:`ValueError` when the substring is not
|
| 838 |
+
found.
|
| 839 |
+
|
| 840 |
+
See Also
|
| 841 |
+
--------
|
| 842 |
+
char.index
|
| 843 |
+
|
| 844 |
+
"""
|
| 845 |
+
return index(self, sub, start, end)
|
| 846 |
+
|
| 847 |
+
def isalnum(self):
|
| 848 |
+
"""
|
| 849 |
+
Returns true for each element if all characters in the string
|
| 850 |
+
are alphanumeric and there is at least one character, false
|
| 851 |
+
otherwise.
|
| 852 |
+
|
| 853 |
+
See Also
|
| 854 |
+
--------
|
| 855 |
+
char.isalnum
|
| 856 |
+
|
| 857 |
+
"""
|
| 858 |
+
return isalnum(self)
|
| 859 |
+
|
| 860 |
+
def isalpha(self):
|
| 861 |
+
"""
|
| 862 |
+
Returns true for each element if all characters in the string
|
| 863 |
+
are alphabetic and there is at least one character, false
|
| 864 |
+
otherwise.
|
| 865 |
+
|
| 866 |
+
See Also
|
| 867 |
+
--------
|
| 868 |
+
char.isalpha
|
| 869 |
+
|
| 870 |
+
"""
|
| 871 |
+
return isalpha(self)
|
| 872 |
+
|
| 873 |
+
def isdigit(self):
|
| 874 |
+
"""
|
| 875 |
+
Returns true for each element if all characters in the string are
|
| 876 |
+
digits and there is at least one character, false otherwise.
|
| 877 |
+
|
| 878 |
+
See Also
|
| 879 |
+
--------
|
| 880 |
+
char.isdigit
|
| 881 |
+
|
| 882 |
+
"""
|
| 883 |
+
return isdigit(self)
|
| 884 |
+
|
| 885 |
+
def islower(self):
|
| 886 |
+
"""
|
| 887 |
+
Returns true for each element if all cased characters in the
|
| 888 |
+
string are lowercase and there is at least one cased character,
|
| 889 |
+
false otherwise.
|
| 890 |
+
|
| 891 |
+
See Also
|
| 892 |
+
--------
|
| 893 |
+
char.islower
|
| 894 |
+
|
| 895 |
+
"""
|
| 896 |
+
return islower(self)
|
| 897 |
+
|
| 898 |
+
def isspace(self):
|
| 899 |
+
"""
|
| 900 |
+
Returns true for each element if there are only whitespace
|
| 901 |
+
characters in the string and there is at least one character,
|
| 902 |
+
false otherwise.
|
| 903 |
+
|
| 904 |
+
See Also
|
| 905 |
+
--------
|
| 906 |
+
char.isspace
|
| 907 |
+
|
| 908 |
+
"""
|
| 909 |
+
return isspace(self)
|
| 910 |
+
|
| 911 |
+
def istitle(self):
|
| 912 |
+
"""
|
| 913 |
+
Returns true for each element if the element is a titlecased
|
| 914 |
+
string and there is at least one character, false otherwise.
|
| 915 |
+
|
| 916 |
+
See Also
|
| 917 |
+
--------
|
| 918 |
+
char.istitle
|
| 919 |
+
|
| 920 |
+
"""
|
| 921 |
+
return istitle(self)
|
| 922 |
+
|
| 923 |
+
def isupper(self):
|
| 924 |
+
"""
|
| 925 |
+
Returns true for each element if all cased characters in the
|
| 926 |
+
string are uppercase and there is at least one character, false
|
| 927 |
+
otherwise.
|
| 928 |
+
|
| 929 |
+
See Also
|
| 930 |
+
--------
|
| 931 |
+
char.isupper
|
| 932 |
+
|
| 933 |
+
"""
|
| 934 |
+
return isupper(self)
|
| 935 |
+
|
| 936 |
+
def join(self, seq):
|
| 937 |
+
"""
|
| 938 |
+
Return a string which is the concatenation of the strings in the
|
| 939 |
+
sequence `seq`.
|
| 940 |
+
|
| 941 |
+
See Also
|
| 942 |
+
--------
|
| 943 |
+
char.join
|
| 944 |
+
|
| 945 |
+
"""
|
| 946 |
+
return join(self, seq)
|
| 947 |
+
|
| 948 |
+
def ljust(self, width, fillchar=' '):
|
| 949 |
+
"""
|
| 950 |
+
Return an array with the elements of `self` left-justified in a
|
| 951 |
+
string of length `width`.
|
| 952 |
+
|
| 953 |
+
See Also
|
| 954 |
+
--------
|
| 955 |
+
char.ljust
|
| 956 |
+
|
| 957 |
+
"""
|
| 958 |
+
return asarray(ljust(self, width, fillchar))
|
| 959 |
+
|
| 960 |
+
def lower(self):
|
| 961 |
+
"""
|
| 962 |
+
Return an array with the elements of `self` converted to
|
| 963 |
+
lowercase.
|
| 964 |
+
|
| 965 |
+
See Also
|
| 966 |
+
--------
|
| 967 |
+
char.lower
|
| 968 |
+
|
| 969 |
+
"""
|
| 970 |
+
return asarray(lower(self))
|
| 971 |
+
|
| 972 |
+
def lstrip(self, chars=None):
|
| 973 |
+
"""
|
| 974 |
+
For each element in `self`, return a copy with the leading characters
|
| 975 |
+
removed.
|
| 976 |
+
|
| 977 |
+
See Also
|
| 978 |
+
--------
|
| 979 |
+
char.lstrip
|
| 980 |
+
|
| 981 |
+
"""
|
| 982 |
+
return lstrip(self, chars)
|
| 983 |
+
|
| 984 |
+
def partition(self, sep):
|
| 985 |
+
"""
|
| 986 |
+
Partition each element in `self` around `sep`.
|
| 987 |
+
|
| 988 |
+
See Also
|
| 989 |
+
--------
|
| 990 |
+
partition
|
| 991 |
+
"""
|
| 992 |
+
return asarray(partition(self, sep))
|
| 993 |
+
|
| 994 |
+
def replace(self, old, new, count=None):
|
| 995 |
+
"""
|
| 996 |
+
For each element in `self`, return a copy of the string with all
|
| 997 |
+
occurrences of substring `old` replaced by `new`.
|
| 998 |
+
|
| 999 |
+
See Also
|
| 1000 |
+
--------
|
| 1001 |
+
char.replace
|
| 1002 |
+
|
| 1003 |
+
"""
|
| 1004 |
+
return replace(self, old, new, count if count is not None else -1)
|
| 1005 |
+
|
| 1006 |
+
def rfind(self, sub, start=0, end=None):
|
| 1007 |
+
"""
|
| 1008 |
+
For each element in `self`, return the highest index in the string
|
| 1009 |
+
where substring `sub` is found, such that `sub` is contained
|
| 1010 |
+
within [`start`, `end`].
|
| 1011 |
+
|
| 1012 |
+
See Also
|
| 1013 |
+
--------
|
| 1014 |
+
char.rfind
|
| 1015 |
+
|
| 1016 |
+
"""
|
| 1017 |
+
return rfind(self, sub, start, end)
|
| 1018 |
+
|
| 1019 |
+
def rindex(self, sub, start=0, end=None):
|
| 1020 |
+
"""
|
| 1021 |
+
Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is
|
| 1022 |
+
not found.
|
| 1023 |
+
|
| 1024 |
+
See Also
|
| 1025 |
+
--------
|
| 1026 |
+
char.rindex
|
| 1027 |
+
|
| 1028 |
+
"""
|
| 1029 |
+
return rindex(self, sub, start, end)
|
| 1030 |
+
|
| 1031 |
+
def rjust(self, width, fillchar=' '):
|
| 1032 |
+
"""
|
| 1033 |
+
Return an array with the elements of `self`
|
| 1034 |
+
right-justified in a string of length `width`.
|
| 1035 |
+
|
| 1036 |
+
See Also
|
| 1037 |
+
--------
|
| 1038 |
+
char.rjust
|
| 1039 |
+
|
| 1040 |
+
"""
|
| 1041 |
+
return asarray(rjust(self, width, fillchar))
|
| 1042 |
+
|
| 1043 |
+
def rpartition(self, sep):
|
| 1044 |
+
"""
|
| 1045 |
+
Partition each element in `self` around `sep`.
|
| 1046 |
+
|
| 1047 |
+
See Also
|
| 1048 |
+
--------
|
| 1049 |
+
rpartition
|
| 1050 |
+
"""
|
| 1051 |
+
return asarray(rpartition(self, sep))
|
| 1052 |
+
|
| 1053 |
+
def rsplit(self, sep=None, maxsplit=None):
|
| 1054 |
+
"""
|
| 1055 |
+
For each element in `self`, return a list of the words in
|
| 1056 |
+
the string, using `sep` as the delimiter string.
|
| 1057 |
+
|
| 1058 |
+
See Also
|
| 1059 |
+
--------
|
| 1060 |
+
char.rsplit
|
| 1061 |
+
|
| 1062 |
+
"""
|
| 1063 |
+
return rsplit(self, sep, maxsplit)
|
| 1064 |
+
|
| 1065 |
+
def rstrip(self, chars=None):
|
| 1066 |
+
"""
|
| 1067 |
+
For each element in `self`, return a copy with the trailing
|
| 1068 |
+
characters removed.
|
| 1069 |
+
|
| 1070 |
+
See Also
|
| 1071 |
+
--------
|
| 1072 |
+
char.rstrip
|
| 1073 |
+
|
| 1074 |
+
"""
|
| 1075 |
+
return rstrip(self, chars)
|
| 1076 |
+
|
| 1077 |
+
def split(self, sep=None, maxsplit=None):
|
| 1078 |
+
"""
|
| 1079 |
+
For each element in `self`, return a list of the words in the
|
| 1080 |
+
string, using `sep` as the delimiter string.
|
| 1081 |
+
|
| 1082 |
+
See Also
|
| 1083 |
+
--------
|
| 1084 |
+
char.split
|
| 1085 |
+
|
| 1086 |
+
"""
|
| 1087 |
+
return split(self, sep, maxsplit)
|
| 1088 |
+
|
| 1089 |
+
def splitlines(self, keepends=None):
|
| 1090 |
+
"""
|
| 1091 |
+
For each element in `self`, return a list of the lines in the
|
| 1092 |
+
element, breaking at line boundaries.
|
| 1093 |
+
|
| 1094 |
+
See Also
|
| 1095 |
+
--------
|
| 1096 |
+
char.splitlines
|
| 1097 |
+
|
| 1098 |
+
"""
|
| 1099 |
+
return splitlines(self, keepends)
|
| 1100 |
+
|
| 1101 |
+
def startswith(self, prefix, start=0, end=None):
|
| 1102 |
+
"""
|
| 1103 |
+
Returns a boolean array which is `True` where the string element
|
| 1104 |
+
in `self` starts with `prefix`, otherwise `False`.
|
| 1105 |
+
|
| 1106 |
+
See Also
|
| 1107 |
+
--------
|
| 1108 |
+
char.startswith
|
| 1109 |
+
|
| 1110 |
+
"""
|
| 1111 |
+
return startswith(self, prefix, start, end)
|
| 1112 |
+
|
| 1113 |
+
def strip(self, chars=None):
|
| 1114 |
+
"""
|
| 1115 |
+
For each element in `self`, return a copy with the leading and
|
| 1116 |
+
trailing characters removed.
|
| 1117 |
+
|
| 1118 |
+
See Also
|
| 1119 |
+
--------
|
| 1120 |
+
char.strip
|
| 1121 |
+
|
| 1122 |
+
"""
|
| 1123 |
+
return strip(self, chars)
|
| 1124 |
+
|
| 1125 |
+
def swapcase(self):
|
| 1126 |
+
"""
|
| 1127 |
+
For each element in `self`, return a copy of the string with
|
| 1128 |
+
uppercase characters converted to lowercase and vice versa.
|
| 1129 |
+
|
| 1130 |
+
See Also
|
| 1131 |
+
--------
|
| 1132 |
+
char.swapcase
|
| 1133 |
+
|
| 1134 |
+
"""
|
| 1135 |
+
return asarray(swapcase(self))
|
| 1136 |
+
|
| 1137 |
+
def title(self):
|
| 1138 |
+
"""
|
| 1139 |
+
For each element in `self`, return a titlecased version of the
|
| 1140 |
+
string: words start with uppercase characters, all remaining cased
|
| 1141 |
+
characters are lowercase.
|
| 1142 |
+
|
| 1143 |
+
See Also
|
| 1144 |
+
--------
|
| 1145 |
+
char.title
|
| 1146 |
+
|
| 1147 |
+
"""
|
| 1148 |
+
return asarray(title(self))
|
| 1149 |
+
|
| 1150 |
+
def translate(self, table, deletechars=None):
|
| 1151 |
+
"""
|
| 1152 |
+
For each element in `self`, return a copy of the string where
|
| 1153 |
+
all characters occurring in the optional argument
|
| 1154 |
+
`deletechars` are removed, and the remaining characters have
|
| 1155 |
+
been mapped through the given translation table.
|
| 1156 |
+
|
| 1157 |
+
See Also
|
| 1158 |
+
--------
|
| 1159 |
+
char.translate
|
| 1160 |
+
|
| 1161 |
+
"""
|
| 1162 |
+
return asarray(translate(self, table, deletechars))
|
| 1163 |
+
|
| 1164 |
+
def upper(self):
|
| 1165 |
+
"""
|
| 1166 |
+
Return an array with the elements of `self` converted to
|
| 1167 |
+
uppercase.
|
| 1168 |
+
|
| 1169 |
+
See Also
|
| 1170 |
+
--------
|
| 1171 |
+
char.upper
|
| 1172 |
+
|
| 1173 |
+
"""
|
| 1174 |
+
return asarray(upper(self))
|
| 1175 |
+
|
| 1176 |
+
def zfill(self, width):
|
| 1177 |
+
"""
|
| 1178 |
+
Return the numeric string left-filled with zeros in a string of
|
| 1179 |
+
length `width`.
|
| 1180 |
+
|
| 1181 |
+
See Also
|
| 1182 |
+
--------
|
| 1183 |
+
char.zfill
|
| 1184 |
+
|
| 1185 |
+
"""
|
| 1186 |
+
return asarray(zfill(self, width))
|
| 1187 |
+
|
| 1188 |
+
def isnumeric(self):
|
| 1189 |
+
"""
|
| 1190 |
+
For each element in `self`, return True if there are only
|
| 1191 |
+
numeric characters in the element.
|
| 1192 |
+
|
| 1193 |
+
See Also
|
| 1194 |
+
--------
|
| 1195 |
+
char.isnumeric
|
| 1196 |
+
|
| 1197 |
+
"""
|
| 1198 |
+
return isnumeric(self)
|
| 1199 |
+
|
| 1200 |
+
def isdecimal(self):
|
| 1201 |
+
"""
|
| 1202 |
+
For each element in `self`, return True if there are only
|
| 1203 |
+
decimal characters in the element.
|
| 1204 |
+
|
| 1205 |
+
See Also
|
| 1206 |
+
--------
|
| 1207 |
+
char.isdecimal
|
| 1208 |
+
|
| 1209 |
+
"""
|
| 1210 |
+
return isdecimal(self)
|
| 1211 |
+
|
| 1212 |
+
|
| 1213 |
+
@set_module("numpy.char")
|
| 1214 |
+
def array(obj, itemsize=None, copy=True, unicode=None, order=None):
|
| 1215 |
+
"""
|
| 1216 |
+
Create a `~numpy.char.chararray`.
|
| 1217 |
+
|
| 1218 |
+
.. note::
|
| 1219 |
+
This class is provided for numarray backward-compatibility.
|
| 1220 |
+
New code (not concerned with numarray compatibility) should use
|
| 1221 |
+
arrays of type `bytes_` or `str_` and use the free functions
|
| 1222 |
+
in :mod:`numpy.char` for fast vectorized string operations instead.
|
| 1223 |
+
|
| 1224 |
+
Versus a NumPy array of dtype `bytes_` or `str_`, this
|
| 1225 |
+
class adds the following functionality:
|
| 1226 |
+
|
| 1227 |
+
1) values automatically have whitespace removed from the end
|
| 1228 |
+
when indexed
|
| 1229 |
+
|
| 1230 |
+
2) comparison operators automatically remove whitespace from the
|
| 1231 |
+
end when comparing values
|
| 1232 |
+
|
| 1233 |
+
3) vectorized string operations are provided as methods
|
| 1234 |
+
(e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
|
| 1235 |
+
and infix operators (e.g. ``+, *, %``)
|
| 1236 |
+
|
| 1237 |
+
Parameters
|
| 1238 |
+
----------
|
| 1239 |
+
obj : array of str or unicode-like
|
| 1240 |
+
|
| 1241 |
+
itemsize : int, optional
|
| 1242 |
+
`itemsize` is the number of characters per scalar in the
|
| 1243 |
+
resulting array. If `itemsize` is None, and `obj` is an
|
| 1244 |
+
object array or a Python list, the `itemsize` will be
|
| 1245 |
+
automatically determined. If `itemsize` is provided and `obj`
|
| 1246 |
+
is of type str or unicode, then the `obj` string will be
|
| 1247 |
+
chunked into `itemsize` pieces.
|
| 1248 |
+
|
| 1249 |
+
copy : bool, optional
|
| 1250 |
+
If true (default), then the object is copied. Otherwise, a copy
|
| 1251 |
+
will only be made if ``__array__`` returns a copy, if obj is a
|
| 1252 |
+
nested sequence, or if a copy is needed to satisfy any of the other
|
| 1253 |
+
requirements (`itemsize`, unicode, `order`, etc.).
|
| 1254 |
+
|
| 1255 |
+
unicode : bool, optional
|
| 1256 |
+
When true, the resulting `~numpy.char.chararray` can contain Unicode
|
| 1257 |
+
characters, when false only 8-bit characters. If unicode is
|
| 1258 |
+
None and `obj` is one of the following:
|
| 1259 |
+
|
| 1260 |
+
- a `~numpy.char.chararray`,
|
| 1261 |
+
- an ndarray of type :class:`str_` or :class:`bytes_`
|
| 1262 |
+
- a Python :class:`str` or :class:`bytes` object,
|
| 1263 |
+
|
| 1264 |
+
then the unicode setting of the output array will be
|
| 1265 |
+
automatically determined.
|
| 1266 |
+
|
| 1267 |
+
order : {'C', 'F', 'A'}, optional
|
| 1268 |
+
Specify the order of the array. If order is 'C' (default), then the
|
| 1269 |
+
array will be in C-contiguous order (last-index varies the
|
| 1270 |
+
fastest). If order is 'F', then the returned array
|
| 1271 |
+
will be in Fortran-contiguous order (first-index varies the
|
| 1272 |
+
fastest). If order is 'A', then the returned array may
|
| 1273 |
+
be in any order (either C-, Fortran-contiguous, or even
|
| 1274 |
+
discontiguous).
|
| 1275 |
+
|
| 1276 |
+
Examples
|
| 1277 |
+
--------
|
| 1278 |
+
|
| 1279 |
+
>>> import numpy as np
|
| 1280 |
+
>>> char_array = np.char.array(['hello', 'world', 'numpy','array'])
|
| 1281 |
+
>>> char_array
|
| 1282 |
+
chararray(['hello', 'world', 'numpy', 'array'], dtype='<U5')
|
| 1283 |
+
|
| 1284 |
+
"""
|
| 1285 |
+
if isinstance(obj, (bytes, str)):
|
| 1286 |
+
if unicode is None:
|
| 1287 |
+
if isinstance(obj, str):
|
| 1288 |
+
unicode = True
|
| 1289 |
+
else:
|
| 1290 |
+
unicode = False
|
| 1291 |
+
|
| 1292 |
+
if itemsize is None:
|
| 1293 |
+
itemsize = len(obj)
|
| 1294 |
+
shape = len(obj) // itemsize
|
| 1295 |
+
|
| 1296 |
+
return chararray(shape, itemsize=itemsize, unicode=unicode,
|
| 1297 |
+
buffer=obj, order=order)
|
| 1298 |
+
|
| 1299 |
+
if isinstance(obj, (list, tuple)):
|
| 1300 |
+
obj = asnarray(obj)
|
| 1301 |
+
|
| 1302 |
+
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
|
| 1303 |
+
# If we just have a vanilla chararray, create a chararray
|
| 1304 |
+
# view around it.
|
| 1305 |
+
if not isinstance(obj, chararray):
|
| 1306 |
+
obj = obj.view(chararray)
|
| 1307 |
+
|
| 1308 |
+
if itemsize is None:
|
| 1309 |
+
itemsize = obj.itemsize
|
| 1310 |
+
# itemsize is in 8-bit chars, so for Unicode, we need
|
| 1311 |
+
# to divide by the size of a single Unicode character,
|
| 1312 |
+
# which for NumPy is always 4
|
| 1313 |
+
if issubclass(obj.dtype.type, str_):
|
| 1314 |
+
itemsize //= 4
|
| 1315 |
+
|
| 1316 |
+
if unicode is None:
|
| 1317 |
+
if issubclass(obj.dtype.type, str_):
|
| 1318 |
+
unicode = True
|
| 1319 |
+
else:
|
| 1320 |
+
unicode = False
|
| 1321 |
+
|
| 1322 |
+
if unicode:
|
| 1323 |
+
dtype = str_
|
| 1324 |
+
else:
|
| 1325 |
+
dtype = bytes_
|
| 1326 |
+
|
| 1327 |
+
if order is not None:
|
| 1328 |
+
obj = asnarray(obj, order=order)
|
| 1329 |
+
if (copy or
|
| 1330 |
+
(itemsize != obj.itemsize) or
|
| 1331 |
+
(not unicode and isinstance(obj, str_)) or
|
| 1332 |
+
(unicode and isinstance(obj, bytes_))):
|
| 1333 |
+
obj = obj.astype((dtype, int(itemsize)))
|
| 1334 |
+
return obj
|
| 1335 |
+
|
| 1336 |
+
if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
|
| 1337 |
+
if itemsize is None:
|
| 1338 |
+
# Since no itemsize was specified, convert the input array to
|
| 1339 |
+
# a list so the ndarray constructor will automatically
|
| 1340 |
+
# determine the itemsize for us.
|
| 1341 |
+
obj = obj.tolist()
|
| 1342 |
+
# Fall through to the default case
|
| 1343 |
+
|
| 1344 |
+
if unicode:
|
| 1345 |
+
dtype = str_
|
| 1346 |
+
else:
|
| 1347 |
+
dtype = bytes_
|
| 1348 |
+
|
| 1349 |
+
if itemsize is None:
|
| 1350 |
+
val = narray(obj, dtype=dtype, order=order, subok=True)
|
| 1351 |
+
else:
|
| 1352 |
+
val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
|
| 1353 |
+
return val.view(chararray)
|
| 1354 |
+
|
| 1355 |
+
|
| 1356 |
+
@set_module("numpy.char")
|
| 1357 |
+
def asarray(obj, itemsize=None, unicode=None, order=None):
|
| 1358 |
+
"""
|
| 1359 |
+
Convert the input to a `~numpy.char.chararray`, copying the data only if
|
| 1360 |
+
necessary.
|
| 1361 |
+
|
| 1362 |
+
Versus a NumPy array of dtype `bytes_` or `str_`, this
|
| 1363 |
+
class adds the following functionality:
|
| 1364 |
+
|
| 1365 |
+
1) values automatically have whitespace removed from the end
|
| 1366 |
+
when indexed
|
| 1367 |
+
|
| 1368 |
+
2) comparison operators automatically remove whitespace from the
|
| 1369 |
+
end when comparing values
|
| 1370 |
+
|
| 1371 |
+
3) vectorized string operations are provided as methods
|
| 1372 |
+
(e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
|
| 1373 |
+
and infix operators (e.g. ``+``, ``*``, ``%``)
|
| 1374 |
+
|
| 1375 |
+
Parameters
|
| 1376 |
+
----------
|
| 1377 |
+
obj : array of str or unicode-like
|
| 1378 |
+
|
| 1379 |
+
itemsize : int, optional
|
| 1380 |
+
`itemsize` is the number of characters per scalar in the
|
| 1381 |
+
resulting array. If `itemsize` is None, and `obj` is an
|
| 1382 |
+
object array or a Python list, the `itemsize` will be
|
| 1383 |
+
automatically determined. If `itemsize` is provided and `obj`
|
| 1384 |
+
is of type str or unicode, then the `obj` string will be
|
| 1385 |
+
chunked into `itemsize` pieces.
|
| 1386 |
+
|
| 1387 |
+
unicode : bool, optional
|
| 1388 |
+
When true, the resulting `~numpy.char.chararray` can contain Unicode
|
| 1389 |
+
characters, when false only 8-bit characters. If unicode is
|
| 1390 |
+
None and `obj` is one of the following:
|
| 1391 |
+
|
| 1392 |
+
- a `~numpy.char.chararray`,
|
| 1393 |
+
- an ndarray of type `str_` or `unicode_`
|
| 1394 |
+
- a Python str or unicode object,
|
| 1395 |
+
|
| 1396 |
+
then the unicode setting of the output array will be
|
| 1397 |
+
automatically determined.
|
| 1398 |
+
|
| 1399 |
+
order : {'C', 'F'}, optional
|
| 1400 |
+
Specify the order of the array. If order is 'C' (default), then the
|
| 1401 |
+
array will be in C-contiguous order (last-index varies the
|
| 1402 |
+
fastest). If order is 'F', then the returned array
|
| 1403 |
+
will be in Fortran-contiguous order (first-index varies the
|
| 1404 |
+
fastest).
|
| 1405 |
+
|
| 1406 |
+
Examples
|
| 1407 |
+
--------
|
| 1408 |
+
>>> import numpy as np
|
| 1409 |
+
>>> np.char.asarray(['hello', 'world'])
|
| 1410 |
+
chararray(['hello', 'world'], dtype='<U5')
|
| 1411 |
+
|
| 1412 |
+
"""
|
| 1413 |
+
return array(obj, itemsize, copy=False,
|
| 1414 |
+
unicode=unicode, order=order)
|
janus/lib/python3.10/site-packages/numpy/_core/defchararray.pyi
ADDED
|
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
Literal as L,
|
| 3 |
+
overload,
|
| 4 |
+
TypeAlias,
|
| 5 |
+
TypeVar,
|
| 6 |
+
Any,
|
| 7 |
+
SupportsIndex,
|
| 8 |
+
SupportsInt,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
from numpy import (
|
| 13 |
+
ndarray,
|
| 14 |
+
dtype,
|
| 15 |
+
str_,
|
| 16 |
+
bytes_,
|
| 17 |
+
int_,
|
| 18 |
+
object_,
|
| 19 |
+
_OrderKACF,
|
| 20 |
+
_SupportsBuffer,
|
| 21 |
+
_SupportsArray
|
| 22 |
+
)
|
| 23 |
+
from numpy._typing import (
|
| 24 |
+
NDArray,
|
| 25 |
+
_Shape,
|
| 26 |
+
_ShapeLike,
|
| 27 |
+
_ArrayLikeStr_co as U_co,
|
| 28 |
+
_ArrayLikeBytes_co as S_co,
|
| 29 |
+
_ArrayLikeString_co as T_co,
|
| 30 |
+
_ArrayLikeAnyString_co as UST_co,
|
| 31 |
+
_ArrayLikeInt_co as i_co,
|
| 32 |
+
_ArrayLikeBool_co as b_co,
|
| 33 |
+
)
|
| 34 |
+
from numpy._core.multiarray import compare_chararrays
|
| 35 |
+
|
| 36 |
+
__all__ = [
|
| 37 |
+
"equal",
|
| 38 |
+
"not_equal",
|
| 39 |
+
"greater_equal",
|
| 40 |
+
"less_equal",
|
| 41 |
+
"greater",
|
| 42 |
+
"less",
|
| 43 |
+
"str_len",
|
| 44 |
+
"add",
|
| 45 |
+
"multiply",
|
| 46 |
+
"mod",
|
| 47 |
+
"capitalize",
|
| 48 |
+
"center",
|
| 49 |
+
"count",
|
| 50 |
+
"decode",
|
| 51 |
+
"encode",
|
| 52 |
+
"endswith",
|
| 53 |
+
"expandtabs",
|
| 54 |
+
"find",
|
| 55 |
+
"index",
|
| 56 |
+
"isalnum",
|
| 57 |
+
"isalpha",
|
| 58 |
+
"isdigit",
|
| 59 |
+
"islower",
|
| 60 |
+
"isspace",
|
| 61 |
+
"istitle",
|
| 62 |
+
"isupper",
|
| 63 |
+
"join",
|
| 64 |
+
"ljust",
|
| 65 |
+
"lower",
|
| 66 |
+
"lstrip",
|
| 67 |
+
"partition",
|
| 68 |
+
"replace",
|
| 69 |
+
"rfind",
|
| 70 |
+
"rindex",
|
| 71 |
+
"rjust",
|
| 72 |
+
"rpartition",
|
| 73 |
+
"rsplit",
|
| 74 |
+
"rstrip",
|
| 75 |
+
"split",
|
| 76 |
+
"splitlines",
|
| 77 |
+
"startswith",
|
| 78 |
+
"strip",
|
| 79 |
+
"swapcase",
|
| 80 |
+
"title",
|
| 81 |
+
"translate",
|
| 82 |
+
"upper",
|
| 83 |
+
"zfill",
|
| 84 |
+
"isnumeric",
|
| 85 |
+
"isdecimal",
|
| 86 |
+
"array",
|
| 87 |
+
"asarray",
|
| 88 |
+
"compare_chararrays",
|
| 89 |
+
"chararray",
|
| 90 |
+
]
|
| 91 |
+
|
| 92 |
+
_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True)
|
| 93 |
+
_SCT = TypeVar("_SCT", bound=np.character)
|
| 94 |
+
_CharDType_co = TypeVar("_CharDType_co", bound=dtype[np.character], covariant=True)
|
| 95 |
+
_CharArray: TypeAlias = chararray[tuple[int, ...], dtype[_SCT]]
|
| 96 |
+
|
| 97 |
+
_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType]
|
| 98 |
+
_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType]
|
| 99 |
+
_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType]
|
| 100 |
+
|
| 101 |
+
class chararray(ndarray[_ShapeT_co, _CharDType_co]):
|
| 102 |
+
@overload
|
| 103 |
+
def __new__(
|
| 104 |
+
subtype,
|
| 105 |
+
shape: _ShapeLike,
|
| 106 |
+
itemsize: SupportsIndex | SupportsInt = ...,
|
| 107 |
+
unicode: L[False] = ...,
|
| 108 |
+
buffer: _SupportsBuffer = ...,
|
| 109 |
+
offset: SupportsIndex = ...,
|
| 110 |
+
strides: _ShapeLike = ...,
|
| 111 |
+
order: _OrderKACF = ...,
|
| 112 |
+
) -> chararray[_Shape, dtype[bytes_]]: ...
|
| 113 |
+
@overload
|
| 114 |
+
def __new__(
|
| 115 |
+
subtype,
|
| 116 |
+
shape: _ShapeLike,
|
| 117 |
+
itemsize: SupportsIndex | SupportsInt = ...,
|
| 118 |
+
unicode: L[True] = ...,
|
| 119 |
+
buffer: _SupportsBuffer = ...,
|
| 120 |
+
offset: SupportsIndex = ...,
|
| 121 |
+
strides: _ShapeLike = ...,
|
| 122 |
+
order: _OrderKACF = ...,
|
| 123 |
+
) -> chararray[_Shape, dtype[str_]]: ...
|
| 124 |
+
|
| 125 |
+
def __array_finalize__(self, obj: object) -> None: ...
|
| 126 |
+
def __mul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ...
|
| 127 |
+
def __rmul__(self, other: i_co) -> chararray[_Shape, _CharDType_co]: ...
|
| 128 |
+
def __mod__(self, i: Any) -> chararray[_Shape, _CharDType_co]: ...
|
| 129 |
+
|
| 130 |
+
@overload
|
| 131 |
+
def __eq__(
|
| 132 |
+
self: _CharArray[str_],
|
| 133 |
+
other: U_co,
|
| 134 |
+
) -> NDArray[np.bool]: ...
|
| 135 |
+
@overload
|
| 136 |
+
def __eq__(
|
| 137 |
+
self: _CharArray[bytes_],
|
| 138 |
+
other: S_co,
|
| 139 |
+
) -> NDArray[np.bool]: ...
|
| 140 |
+
|
| 141 |
+
@overload
|
| 142 |
+
def __ne__(
|
| 143 |
+
self: _CharArray[str_],
|
| 144 |
+
other: U_co,
|
| 145 |
+
) -> NDArray[np.bool]: ...
|
| 146 |
+
@overload
|
| 147 |
+
def __ne__(
|
| 148 |
+
self: _CharArray[bytes_],
|
| 149 |
+
other: S_co,
|
| 150 |
+
) -> NDArray[np.bool]: ...
|
| 151 |
+
|
| 152 |
+
@overload
|
| 153 |
+
def __ge__(
|
| 154 |
+
self: _CharArray[str_],
|
| 155 |
+
other: U_co,
|
| 156 |
+
) -> NDArray[np.bool]: ...
|
| 157 |
+
@overload
|
| 158 |
+
def __ge__(
|
| 159 |
+
self: _CharArray[bytes_],
|
| 160 |
+
other: S_co,
|
| 161 |
+
) -> NDArray[np.bool]: ...
|
| 162 |
+
|
| 163 |
+
@overload
|
| 164 |
+
def __le__(
|
| 165 |
+
self: _CharArray[str_],
|
| 166 |
+
other: U_co,
|
| 167 |
+
) -> NDArray[np.bool]: ...
|
| 168 |
+
@overload
|
| 169 |
+
def __le__(
|
| 170 |
+
self: _CharArray[bytes_],
|
| 171 |
+
other: S_co,
|
| 172 |
+
) -> NDArray[np.bool]: ...
|
| 173 |
+
|
| 174 |
+
@overload
|
| 175 |
+
def __gt__(
|
| 176 |
+
self: _CharArray[str_],
|
| 177 |
+
other: U_co,
|
| 178 |
+
) -> NDArray[np.bool]: ...
|
| 179 |
+
@overload
|
| 180 |
+
def __gt__(
|
| 181 |
+
self: _CharArray[bytes_],
|
| 182 |
+
other: S_co,
|
| 183 |
+
) -> NDArray[np.bool]: ...
|
| 184 |
+
|
| 185 |
+
@overload
|
| 186 |
+
def __lt__(
|
| 187 |
+
self: _CharArray[str_],
|
| 188 |
+
other: U_co,
|
| 189 |
+
) -> NDArray[np.bool]: ...
|
| 190 |
+
@overload
|
| 191 |
+
def __lt__(
|
| 192 |
+
self: _CharArray[bytes_],
|
| 193 |
+
other: S_co,
|
| 194 |
+
) -> NDArray[np.bool]: ...
|
| 195 |
+
|
| 196 |
+
@overload
|
| 197 |
+
def __add__(
|
| 198 |
+
self: _CharArray[str_],
|
| 199 |
+
other: U_co,
|
| 200 |
+
) -> _CharArray[str_]: ...
|
| 201 |
+
@overload
|
| 202 |
+
def __add__(
|
| 203 |
+
self: _CharArray[bytes_],
|
| 204 |
+
other: S_co,
|
| 205 |
+
) -> _CharArray[bytes_]: ...
|
| 206 |
+
|
| 207 |
+
@overload
|
| 208 |
+
def __radd__(
|
| 209 |
+
self: _CharArray[str_],
|
| 210 |
+
other: U_co,
|
| 211 |
+
) -> _CharArray[str_]: ...
|
| 212 |
+
@overload
|
| 213 |
+
def __radd__(
|
| 214 |
+
self: _CharArray[bytes_],
|
| 215 |
+
other: S_co,
|
| 216 |
+
) -> _CharArray[bytes_]: ...
|
| 217 |
+
|
| 218 |
+
@overload
|
| 219 |
+
def center(
|
| 220 |
+
self: _CharArray[str_],
|
| 221 |
+
width: i_co,
|
| 222 |
+
fillchar: U_co = ...,
|
| 223 |
+
) -> _CharArray[str_]: ...
|
| 224 |
+
@overload
|
| 225 |
+
def center(
|
| 226 |
+
self: _CharArray[bytes_],
|
| 227 |
+
width: i_co,
|
| 228 |
+
fillchar: S_co = ...,
|
| 229 |
+
) -> _CharArray[bytes_]: ...
|
| 230 |
+
|
| 231 |
+
@overload
|
| 232 |
+
def count(
|
| 233 |
+
self: _CharArray[str_],
|
| 234 |
+
sub: U_co,
|
| 235 |
+
start: i_co = ...,
|
| 236 |
+
end: None | i_co = ...,
|
| 237 |
+
) -> NDArray[int_]: ...
|
| 238 |
+
@overload
|
| 239 |
+
def count(
|
| 240 |
+
self: _CharArray[bytes_],
|
| 241 |
+
sub: S_co,
|
| 242 |
+
start: i_co = ...,
|
| 243 |
+
end: None | i_co = ...,
|
| 244 |
+
) -> NDArray[int_]: ...
|
| 245 |
+
|
| 246 |
+
def decode(
|
| 247 |
+
self: _CharArray[bytes_],
|
| 248 |
+
encoding: None | str = ...,
|
| 249 |
+
errors: None | str = ...,
|
| 250 |
+
) -> _CharArray[str_]: ...
|
| 251 |
+
|
| 252 |
+
def encode(
|
| 253 |
+
self: _CharArray[str_],
|
| 254 |
+
encoding: None | str = ...,
|
| 255 |
+
errors: None | str = ...,
|
| 256 |
+
) -> _CharArray[bytes_]: ...
|
| 257 |
+
|
| 258 |
+
@overload
|
| 259 |
+
def endswith(
|
| 260 |
+
self: _CharArray[str_],
|
| 261 |
+
suffix: U_co,
|
| 262 |
+
start: i_co = ...,
|
| 263 |
+
end: None | i_co = ...,
|
| 264 |
+
) -> NDArray[np.bool]: ...
|
| 265 |
+
@overload
|
| 266 |
+
def endswith(
|
| 267 |
+
self: _CharArray[bytes_],
|
| 268 |
+
suffix: S_co,
|
| 269 |
+
start: i_co = ...,
|
| 270 |
+
end: None | i_co = ...,
|
| 271 |
+
) -> NDArray[np.bool]: ...
|
| 272 |
+
|
| 273 |
+
def expandtabs(
|
| 274 |
+
self,
|
| 275 |
+
tabsize: i_co = ...,
|
| 276 |
+
) -> chararray[_Shape, _CharDType_co]: ...
|
| 277 |
+
|
| 278 |
+
@overload
|
| 279 |
+
def find(
|
| 280 |
+
self: _CharArray[str_],
|
| 281 |
+
sub: U_co,
|
| 282 |
+
start: i_co = ...,
|
| 283 |
+
end: None | i_co = ...,
|
| 284 |
+
) -> NDArray[int_]: ...
|
| 285 |
+
@overload
|
| 286 |
+
def find(
|
| 287 |
+
self: _CharArray[bytes_],
|
| 288 |
+
sub: S_co,
|
| 289 |
+
start: i_co = ...,
|
| 290 |
+
end: None | i_co = ...,
|
| 291 |
+
) -> NDArray[int_]: ...
|
| 292 |
+
|
| 293 |
+
@overload
|
| 294 |
+
def index(
|
| 295 |
+
self: _CharArray[str_],
|
| 296 |
+
sub: U_co,
|
| 297 |
+
start: i_co = ...,
|
| 298 |
+
end: None | i_co = ...,
|
| 299 |
+
) -> NDArray[int_]: ...
|
| 300 |
+
@overload
|
| 301 |
+
def index(
|
| 302 |
+
self: _CharArray[bytes_],
|
| 303 |
+
sub: S_co,
|
| 304 |
+
start: i_co = ...,
|
| 305 |
+
end: None | i_co = ...,
|
| 306 |
+
) -> NDArray[int_]: ...
|
| 307 |
+
|
| 308 |
+
@overload
|
| 309 |
+
def join(
|
| 310 |
+
self: _CharArray[str_],
|
| 311 |
+
seq: U_co,
|
| 312 |
+
) -> _CharArray[str_]: ...
|
| 313 |
+
@overload
|
| 314 |
+
def join(
|
| 315 |
+
self: _CharArray[bytes_],
|
| 316 |
+
seq: S_co,
|
| 317 |
+
) -> _CharArray[bytes_]: ...
|
| 318 |
+
|
| 319 |
+
@overload
|
| 320 |
+
def ljust(
|
| 321 |
+
self: _CharArray[str_],
|
| 322 |
+
width: i_co,
|
| 323 |
+
fillchar: U_co = ...,
|
| 324 |
+
) -> _CharArray[str_]: ...
|
| 325 |
+
@overload
|
| 326 |
+
def ljust(
|
| 327 |
+
self: _CharArray[bytes_],
|
| 328 |
+
width: i_co,
|
| 329 |
+
fillchar: S_co = ...,
|
| 330 |
+
) -> _CharArray[bytes_]: ...
|
| 331 |
+
|
| 332 |
+
@overload
|
| 333 |
+
def lstrip(
|
| 334 |
+
self: _CharArray[str_],
|
| 335 |
+
chars: None | U_co = ...,
|
| 336 |
+
) -> _CharArray[str_]: ...
|
| 337 |
+
@overload
|
| 338 |
+
def lstrip(
|
| 339 |
+
self: _CharArray[bytes_],
|
| 340 |
+
chars: None | S_co = ...,
|
| 341 |
+
) -> _CharArray[bytes_]: ...
|
| 342 |
+
|
| 343 |
+
@overload
|
| 344 |
+
def partition(
|
| 345 |
+
self: _CharArray[str_],
|
| 346 |
+
sep: U_co,
|
| 347 |
+
) -> _CharArray[str_]: ...
|
| 348 |
+
@overload
|
| 349 |
+
def partition(
|
| 350 |
+
self: _CharArray[bytes_],
|
| 351 |
+
sep: S_co,
|
| 352 |
+
) -> _CharArray[bytes_]: ...
|
| 353 |
+
|
| 354 |
+
@overload
|
| 355 |
+
def replace(
|
| 356 |
+
self: _CharArray[str_],
|
| 357 |
+
old: U_co,
|
| 358 |
+
new: U_co,
|
| 359 |
+
count: None | i_co = ...,
|
| 360 |
+
) -> _CharArray[str_]: ...
|
| 361 |
+
@overload
|
| 362 |
+
def replace(
|
| 363 |
+
self: _CharArray[bytes_],
|
| 364 |
+
old: S_co,
|
| 365 |
+
new: S_co,
|
| 366 |
+
count: None | i_co = ...,
|
| 367 |
+
) -> _CharArray[bytes_]: ...
|
| 368 |
+
|
| 369 |
+
@overload
|
| 370 |
+
def rfind(
|
| 371 |
+
self: _CharArray[str_],
|
| 372 |
+
sub: U_co,
|
| 373 |
+
start: i_co = ...,
|
| 374 |
+
end: None | i_co = ...,
|
| 375 |
+
) -> NDArray[int_]: ...
|
| 376 |
+
@overload
|
| 377 |
+
def rfind(
|
| 378 |
+
self: _CharArray[bytes_],
|
| 379 |
+
sub: S_co,
|
| 380 |
+
start: i_co = ...,
|
| 381 |
+
end: None | i_co = ...,
|
| 382 |
+
) -> NDArray[int_]: ...
|
| 383 |
+
|
| 384 |
+
@overload
|
| 385 |
+
def rindex(
|
| 386 |
+
self: _CharArray[str_],
|
| 387 |
+
sub: U_co,
|
| 388 |
+
start: i_co = ...,
|
| 389 |
+
end: None | i_co = ...,
|
| 390 |
+
) -> NDArray[int_]: ...
|
| 391 |
+
@overload
|
| 392 |
+
def rindex(
|
| 393 |
+
self: _CharArray[bytes_],
|
| 394 |
+
sub: S_co,
|
| 395 |
+
start: i_co = ...,
|
| 396 |
+
end: None | i_co = ...,
|
| 397 |
+
) -> NDArray[int_]: ...
|
| 398 |
+
|
| 399 |
+
@overload
|
| 400 |
+
def rjust(
|
| 401 |
+
self: _CharArray[str_],
|
| 402 |
+
width: i_co,
|
| 403 |
+
fillchar: U_co = ...,
|
| 404 |
+
) -> _CharArray[str_]: ...
|
| 405 |
+
@overload
|
| 406 |
+
def rjust(
|
| 407 |
+
self: _CharArray[bytes_],
|
| 408 |
+
width: i_co,
|
| 409 |
+
fillchar: S_co = ...,
|
| 410 |
+
) -> _CharArray[bytes_]: ...
|
| 411 |
+
|
| 412 |
+
@overload
|
| 413 |
+
def rpartition(
|
| 414 |
+
self: _CharArray[str_],
|
| 415 |
+
sep: U_co,
|
| 416 |
+
) -> _CharArray[str_]: ...
|
| 417 |
+
@overload
|
| 418 |
+
def rpartition(
|
| 419 |
+
self: _CharArray[bytes_],
|
| 420 |
+
sep: S_co,
|
| 421 |
+
) -> _CharArray[bytes_]: ...
|
| 422 |
+
|
| 423 |
+
@overload
|
| 424 |
+
def rsplit(
|
| 425 |
+
self: _CharArray[str_],
|
| 426 |
+
sep: None | U_co = ...,
|
| 427 |
+
maxsplit: None | i_co = ...,
|
| 428 |
+
) -> NDArray[object_]: ...
|
| 429 |
+
@overload
|
| 430 |
+
def rsplit(
|
| 431 |
+
self: _CharArray[bytes_],
|
| 432 |
+
sep: None | S_co = ...,
|
| 433 |
+
maxsplit: None | i_co = ...,
|
| 434 |
+
) -> NDArray[object_]: ...
|
| 435 |
+
|
| 436 |
+
@overload
|
| 437 |
+
def rstrip(
|
| 438 |
+
self: _CharArray[str_],
|
| 439 |
+
chars: None | U_co = ...,
|
| 440 |
+
) -> _CharArray[str_]: ...
|
| 441 |
+
@overload
|
| 442 |
+
def rstrip(
|
| 443 |
+
self: _CharArray[bytes_],
|
| 444 |
+
chars: None | S_co = ...,
|
| 445 |
+
) -> _CharArray[bytes_]: ...
|
| 446 |
+
|
| 447 |
+
@overload
|
| 448 |
+
def split(
|
| 449 |
+
self: _CharArray[str_],
|
| 450 |
+
sep: None | U_co = ...,
|
| 451 |
+
maxsplit: None | i_co = ...,
|
| 452 |
+
) -> NDArray[object_]: ...
|
| 453 |
+
@overload
|
| 454 |
+
def split(
|
| 455 |
+
self: _CharArray[bytes_],
|
| 456 |
+
sep: None | S_co = ...,
|
| 457 |
+
maxsplit: None | i_co = ...,
|
| 458 |
+
) -> NDArray[object_]: ...
|
| 459 |
+
|
| 460 |
+
def splitlines(self, keepends: None | b_co = ...) -> NDArray[object_]: ...
|
| 461 |
+
|
| 462 |
+
@overload
|
| 463 |
+
def startswith(
|
| 464 |
+
self: _CharArray[str_],
|
| 465 |
+
prefix: U_co,
|
| 466 |
+
start: i_co = ...,
|
| 467 |
+
end: None | i_co = ...,
|
| 468 |
+
) -> NDArray[np.bool]: ...
|
| 469 |
+
@overload
|
| 470 |
+
def startswith(
|
| 471 |
+
self: _CharArray[bytes_],
|
| 472 |
+
prefix: S_co,
|
| 473 |
+
start: i_co = ...,
|
| 474 |
+
end: None | i_co = ...,
|
| 475 |
+
) -> NDArray[np.bool]: ...
|
| 476 |
+
|
| 477 |
+
@overload
|
| 478 |
+
def strip(
|
| 479 |
+
self: _CharArray[str_],
|
| 480 |
+
chars: None | U_co = ...,
|
| 481 |
+
) -> _CharArray[str_]: ...
|
| 482 |
+
@overload
|
| 483 |
+
def strip(
|
| 484 |
+
self: _CharArray[bytes_],
|
| 485 |
+
chars: None | S_co = ...,
|
| 486 |
+
) -> _CharArray[bytes_]: ...
|
| 487 |
+
|
| 488 |
+
@overload
|
| 489 |
+
def translate(
|
| 490 |
+
self: _CharArray[str_],
|
| 491 |
+
table: U_co,
|
| 492 |
+
deletechars: None | U_co = ...,
|
| 493 |
+
) -> _CharArray[str_]: ...
|
| 494 |
+
@overload
|
| 495 |
+
def translate(
|
| 496 |
+
self: _CharArray[bytes_],
|
| 497 |
+
table: S_co,
|
| 498 |
+
deletechars: None | S_co = ...,
|
| 499 |
+
) -> _CharArray[bytes_]: ...
|
| 500 |
+
|
| 501 |
+
def zfill(self, width: i_co) -> chararray[_Shape, _CharDType_co]: ...
|
| 502 |
+
def capitalize(self) -> chararray[_ShapeT_co, _CharDType_co]: ...
|
| 503 |
+
def title(self) -> chararray[_ShapeT_co, _CharDType_co]: ...
|
| 504 |
+
def swapcase(self) -> chararray[_ShapeT_co, _CharDType_co]: ...
|
| 505 |
+
def lower(self) -> chararray[_ShapeT_co, _CharDType_co]: ...
|
| 506 |
+
def upper(self) -> chararray[_ShapeT_co, _CharDType_co]: ...
|
| 507 |
+
def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 508 |
+
def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 509 |
+
def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 510 |
+
def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 511 |
+
def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 512 |
+
def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 513 |
+
def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 514 |
+
def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 515 |
+
def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
|
| 516 |
+
|
| 517 |
+
|
| 518 |
+
# Comparison
|
| 519 |
+
@overload
|
| 520 |
+
def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 521 |
+
@overload
|
| 522 |
+
def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 523 |
+
@overload
|
| 524 |
+
def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 525 |
+
|
| 526 |
+
@overload
|
| 527 |
+
def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 528 |
+
@overload
|
| 529 |
+
def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 530 |
+
@overload
|
| 531 |
+
def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 532 |
+
|
| 533 |
+
@overload
|
| 534 |
+
def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 535 |
+
@overload
|
| 536 |
+
def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 537 |
+
@overload
|
| 538 |
+
def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 539 |
+
|
| 540 |
+
@overload
|
| 541 |
+
def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 542 |
+
@overload
|
| 543 |
+
def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 544 |
+
@overload
|
| 545 |
+
def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 546 |
+
|
| 547 |
+
@overload
|
| 548 |
+
def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 549 |
+
@overload
|
| 550 |
+
def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 551 |
+
@overload
|
| 552 |
+
def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 553 |
+
|
| 554 |
+
@overload
|
| 555 |
+
def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 556 |
+
@overload
|
| 557 |
+
def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 558 |
+
@overload
|
| 559 |
+
def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 560 |
+
|
| 561 |
+
@overload
|
| 562 |
+
def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...
|
| 563 |
+
@overload
|
| 564 |
+
def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...
|
| 565 |
+
@overload
|
| 566 |
+
def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 567 |
+
@overload
|
| 568 |
+
def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 569 |
+
|
| 570 |
+
@overload
|
| 571 |
+
def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...
|
| 572 |
+
@overload
|
| 573 |
+
def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...
|
| 574 |
+
@overload
|
| 575 |
+
def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...
|
| 576 |
+
@overload
|
| 577 |
+
def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...
|
| 578 |
+
|
| 579 |
+
|
| 580 |
+
@overload
|
| 581 |
+
def mod(a: U_co, value: Any) -> NDArray[np.str_]: ...
|
| 582 |
+
@overload
|
| 583 |
+
def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ...
|
| 584 |
+
@overload
|
| 585 |
+
def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ...
|
| 586 |
+
@overload
|
| 587 |
+
def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ...
|
| 588 |
+
|
| 589 |
+
@overload
|
| 590 |
+
def capitalize(a: U_co) -> NDArray[str_]: ...
|
| 591 |
+
@overload
|
| 592 |
+
def capitalize(a: S_co) -> NDArray[bytes_]: ...
|
| 593 |
+
@overload
|
| 594 |
+
def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 595 |
+
@overload
|
| 596 |
+
def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 597 |
+
|
| 598 |
+
@overload
|
| 599 |
+
def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
|
| 600 |
+
@overload
|
| 601 |
+
def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
|
| 602 |
+
@overload
|
| 603 |
+
def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 604 |
+
@overload
|
| 605 |
+
def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 606 |
+
|
| 607 |
+
def decode(
|
| 608 |
+
a: S_co,
|
| 609 |
+
encoding: None | str = ...,
|
| 610 |
+
errors: None | str = ...,
|
| 611 |
+
) -> NDArray[str_]: ...
|
| 612 |
+
def encode(
|
| 613 |
+
a: U_co | T_co,
|
| 614 |
+
encoding: None | str = ...,
|
| 615 |
+
errors: None | str = ...,
|
| 616 |
+
) -> NDArray[bytes_]: ...
|
| 617 |
+
|
| 618 |
+
@overload
|
| 619 |
+
def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
|
| 620 |
+
@overload
|
| 621 |
+
def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
|
| 622 |
+
@overload
|
| 623 |
+
def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...
|
| 624 |
+
@overload
|
| 625 |
+
def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 626 |
+
|
| 627 |
+
@overload
|
| 628 |
+
def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
|
| 629 |
+
@overload
|
| 630 |
+
def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
|
| 631 |
+
@overload
|
| 632 |
+
def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 633 |
+
@overload
|
| 634 |
+
def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 635 |
+
|
| 636 |
+
@overload
|
| 637 |
+
def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
|
| 638 |
+
@overload
|
| 639 |
+
def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
|
| 640 |
+
@overload
|
| 641 |
+
def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 642 |
+
@overload
|
| 643 |
+
def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 644 |
+
|
| 645 |
+
@overload
|
| 646 |
+
def lower(a: U_co) -> NDArray[str_]: ...
|
| 647 |
+
@overload
|
| 648 |
+
def lower(a: S_co) -> NDArray[bytes_]: ...
|
| 649 |
+
@overload
|
| 650 |
+
def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 651 |
+
@overload
|
| 652 |
+
def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 653 |
+
|
| 654 |
+
@overload
|
| 655 |
+
def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
| 656 |
+
@overload
|
| 657 |
+
def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
| 658 |
+
@overload
|
| 659 |
+
def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 660 |
+
@overload
|
| 661 |
+
def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 662 |
+
|
| 663 |
+
@overload
|
| 664 |
+
def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
|
| 665 |
+
@overload
|
| 666 |
+
def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
|
| 667 |
+
@overload
|
| 668 |
+
def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 669 |
+
@overload
|
| 670 |
+
def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 671 |
+
|
| 672 |
+
@overload
|
| 673 |
+
def replace(
|
| 674 |
+
a: U_co,
|
| 675 |
+
old: U_co,
|
| 676 |
+
new: U_co,
|
| 677 |
+
count: None | i_co = ...,
|
| 678 |
+
) -> NDArray[str_]: ...
|
| 679 |
+
@overload
|
| 680 |
+
def replace(
|
| 681 |
+
a: S_co,
|
| 682 |
+
old: S_co,
|
| 683 |
+
new: S_co,
|
| 684 |
+
count: None | i_co = ...,
|
| 685 |
+
) -> NDArray[bytes_]: ...
|
| 686 |
+
@overload
|
| 687 |
+
def replace(
|
| 688 |
+
a: _StringDTypeSupportsArray,
|
| 689 |
+
old: _StringDTypeSupportsArray,
|
| 690 |
+
new: _StringDTypeSupportsArray,
|
| 691 |
+
count: i_co = ...,
|
| 692 |
+
) -> _StringDTypeArray: ...
|
| 693 |
+
@overload
|
| 694 |
+
def replace(
|
| 695 |
+
a: T_co,
|
| 696 |
+
old: T_co,
|
| 697 |
+
new: T_co,
|
| 698 |
+
count: i_co = ...,
|
| 699 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
| 700 |
+
|
| 701 |
+
@overload
|
| 702 |
+
def rjust(
|
| 703 |
+
a: U_co,
|
| 704 |
+
width: i_co,
|
| 705 |
+
fillchar: U_co = ...,
|
| 706 |
+
) -> NDArray[str_]: ...
|
| 707 |
+
@overload
|
| 708 |
+
def rjust(
|
| 709 |
+
a: S_co,
|
| 710 |
+
width: i_co,
|
| 711 |
+
fillchar: S_co = ...,
|
| 712 |
+
) -> NDArray[bytes_]: ...
|
| 713 |
+
@overload
|
| 714 |
+
def rjust(
|
| 715 |
+
a: _StringDTypeSupportsArray,
|
| 716 |
+
width: i_co,
|
| 717 |
+
fillchar: _StringDTypeSupportsArray = ...,
|
| 718 |
+
) -> _StringDTypeArray: ...
|
| 719 |
+
@overload
|
| 720 |
+
def rjust(
|
| 721 |
+
a: T_co,
|
| 722 |
+
width: i_co,
|
| 723 |
+
fillchar: T_co = ...,
|
| 724 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
| 725 |
+
|
| 726 |
+
@overload
|
| 727 |
+
def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
|
| 728 |
+
@overload
|
| 729 |
+
def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
|
| 730 |
+
@overload
|
| 731 |
+
def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 732 |
+
@overload
|
| 733 |
+
def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 734 |
+
|
| 735 |
+
@overload
|
| 736 |
+
def rsplit(
|
| 737 |
+
a: U_co,
|
| 738 |
+
sep: None | U_co = ...,
|
| 739 |
+
maxsplit: None | i_co = ...,
|
| 740 |
+
) -> NDArray[object_]: ...
|
| 741 |
+
@overload
|
| 742 |
+
def rsplit(
|
| 743 |
+
a: S_co,
|
| 744 |
+
sep: None | S_co = ...,
|
| 745 |
+
maxsplit: None | i_co = ...,
|
| 746 |
+
) -> NDArray[object_]: ...
|
| 747 |
+
@overload
|
| 748 |
+
def rsplit(
|
| 749 |
+
a: _StringDTypeSupportsArray,
|
| 750 |
+
sep: None | _StringDTypeSupportsArray = ...,
|
| 751 |
+
maxsplit: None | i_co = ...,
|
| 752 |
+
) -> NDArray[object_]: ...
|
| 753 |
+
@overload
|
| 754 |
+
def rsplit(
|
| 755 |
+
a: T_co,
|
| 756 |
+
sep: None | T_co = ...,
|
| 757 |
+
maxsplit: None | i_co = ...,
|
| 758 |
+
) -> NDArray[object_]: ...
|
| 759 |
+
|
| 760 |
+
@overload
|
| 761 |
+
def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
| 762 |
+
@overload
|
| 763 |
+
def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
| 764 |
+
@overload
|
| 765 |
+
def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 766 |
+
@overload
|
| 767 |
+
def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 768 |
+
|
| 769 |
+
@overload
|
| 770 |
+
def split(
|
| 771 |
+
a: U_co,
|
| 772 |
+
sep: None | U_co = ...,
|
| 773 |
+
maxsplit: None | i_co = ...,
|
| 774 |
+
) -> NDArray[object_]: ...
|
| 775 |
+
@overload
|
| 776 |
+
def split(
|
| 777 |
+
a: S_co,
|
| 778 |
+
sep: None | S_co = ...,
|
| 779 |
+
maxsplit: None | i_co = ...,
|
| 780 |
+
) -> NDArray[object_]: ...
|
| 781 |
+
@overload
|
| 782 |
+
def split(
|
| 783 |
+
a: _StringDTypeSupportsArray,
|
| 784 |
+
sep: None | _StringDTypeSupportsArray = ...,
|
| 785 |
+
maxsplit: None | i_co = ...,
|
| 786 |
+
) -> NDArray[object_]: ...
|
| 787 |
+
@overload
|
| 788 |
+
def split(
|
| 789 |
+
a: T_co,
|
| 790 |
+
sep: None | T_co = ...,
|
| 791 |
+
maxsplit: None | i_co = ...,
|
| 792 |
+
) -> NDArray[object_]: ...
|
| 793 |
+
|
| 794 |
+
def splitlines(a: UST_co, keepends: None | b_co = ...) -> NDArray[np.object_]: ...
|
| 795 |
+
|
| 796 |
+
@overload
|
| 797 |
+
def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
| 798 |
+
@overload
|
| 799 |
+
def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
| 800 |
+
@overload
|
| 801 |
+
def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 802 |
+
@overload
|
| 803 |
+
def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 804 |
+
|
| 805 |
+
@overload
|
| 806 |
+
def swapcase(a: U_co) -> NDArray[str_]: ...
|
| 807 |
+
@overload
|
| 808 |
+
def swapcase(a: S_co) -> NDArray[bytes_]: ...
|
| 809 |
+
@overload
|
| 810 |
+
def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 811 |
+
@overload
|
| 812 |
+
def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 813 |
+
|
| 814 |
+
@overload
|
| 815 |
+
def title(a: U_co) -> NDArray[str_]: ...
|
| 816 |
+
@overload
|
| 817 |
+
def title(a: S_co) -> NDArray[bytes_]: ...
|
| 818 |
+
@overload
|
| 819 |
+
def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 820 |
+
@overload
|
| 821 |
+
def title(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 822 |
+
|
| 823 |
+
@overload
|
| 824 |
+
def translate(
|
| 825 |
+
a: U_co,
|
| 826 |
+
table: str,
|
| 827 |
+
deletechars: None | str = ...,
|
| 828 |
+
) -> NDArray[str_]: ...
|
| 829 |
+
@overload
|
| 830 |
+
def translate(
|
| 831 |
+
a: S_co,
|
| 832 |
+
table: str,
|
| 833 |
+
deletechars: None | str = ...,
|
| 834 |
+
) -> NDArray[bytes_]: ...
|
| 835 |
+
@overload
|
| 836 |
+
def translate(
|
| 837 |
+
a: _StringDTypeSupportsArray,
|
| 838 |
+
table: str,
|
| 839 |
+
deletechars: None | str = ...,
|
| 840 |
+
) -> _StringDTypeArray: ...
|
| 841 |
+
@overload
|
| 842 |
+
def translate(
|
| 843 |
+
a: T_co,
|
| 844 |
+
table: str,
|
| 845 |
+
deletechars: None | str = ...,
|
| 846 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
| 847 |
+
|
| 848 |
+
@overload
|
| 849 |
+
def upper(a: U_co) -> NDArray[str_]: ...
|
| 850 |
+
@overload
|
| 851 |
+
def upper(a: S_co) -> NDArray[bytes_]: ...
|
| 852 |
+
@overload
|
| 853 |
+
def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 854 |
+
@overload
|
| 855 |
+
def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 856 |
+
|
| 857 |
+
@overload
|
| 858 |
+
def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
|
| 859 |
+
@overload
|
| 860 |
+
def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
|
| 861 |
+
@overload
|
| 862 |
+
def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...
|
| 863 |
+
@overload
|
| 864 |
+
def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...
|
| 865 |
+
|
| 866 |
+
# String information
|
| 867 |
+
@overload
|
| 868 |
+
def count(
|
| 869 |
+
a: U_co,
|
| 870 |
+
sub: U_co,
|
| 871 |
+
start: i_co = ...,
|
| 872 |
+
end: None | i_co = ...,
|
| 873 |
+
) -> NDArray[int_]: ...
|
| 874 |
+
@overload
|
| 875 |
+
def count(
|
| 876 |
+
a: S_co,
|
| 877 |
+
sub: S_co,
|
| 878 |
+
start: i_co = ...,
|
| 879 |
+
end: None | i_co = ...,
|
| 880 |
+
) -> NDArray[int_]: ...
|
| 881 |
+
@overload
|
| 882 |
+
def count(
|
| 883 |
+
a: T_co,
|
| 884 |
+
sub: T_co,
|
| 885 |
+
start: i_co = ...,
|
| 886 |
+
end: i_co | None = ...,
|
| 887 |
+
) -> NDArray[np.int_]: ...
|
| 888 |
+
|
| 889 |
+
@overload
|
| 890 |
+
def endswith(
|
| 891 |
+
a: U_co,
|
| 892 |
+
suffix: U_co,
|
| 893 |
+
start: i_co = ...,
|
| 894 |
+
end: None | i_co = ...,
|
| 895 |
+
) -> NDArray[np.bool]: ...
|
| 896 |
+
@overload
|
| 897 |
+
def endswith(
|
| 898 |
+
a: S_co,
|
| 899 |
+
suffix: S_co,
|
| 900 |
+
start: i_co = ...,
|
| 901 |
+
end: None | i_co = ...,
|
| 902 |
+
) -> NDArray[np.bool]: ...
|
| 903 |
+
@overload
|
| 904 |
+
def endswith(
|
| 905 |
+
a: T_co,
|
| 906 |
+
suffix: T_co,
|
| 907 |
+
start: i_co = ...,
|
| 908 |
+
end: i_co | None = ...,
|
| 909 |
+
) -> NDArray[np.bool]: ...
|
| 910 |
+
|
| 911 |
+
@overload
|
| 912 |
+
def find(
|
| 913 |
+
a: U_co,
|
| 914 |
+
sub: U_co,
|
| 915 |
+
start: i_co = ...,
|
| 916 |
+
end: None | i_co = ...,
|
| 917 |
+
) -> NDArray[int_]: ...
|
| 918 |
+
@overload
|
| 919 |
+
def find(
|
| 920 |
+
a: S_co,
|
| 921 |
+
sub: S_co,
|
| 922 |
+
start: i_co = ...,
|
| 923 |
+
end: None | i_co = ...,
|
| 924 |
+
) -> NDArray[int_]: ...
|
| 925 |
+
@overload
|
| 926 |
+
def find(
|
| 927 |
+
a: T_co,
|
| 928 |
+
sub: T_co,
|
| 929 |
+
start: i_co = ...,
|
| 930 |
+
end: i_co | None = ...,
|
| 931 |
+
) -> NDArray[np.int_]: ...
|
| 932 |
+
|
| 933 |
+
@overload
|
| 934 |
+
def index(
|
| 935 |
+
a: U_co,
|
| 936 |
+
sub: U_co,
|
| 937 |
+
start: i_co = ...,
|
| 938 |
+
end: None | i_co = ...,
|
| 939 |
+
) -> NDArray[int_]: ...
|
| 940 |
+
@overload
|
| 941 |
+
def index(
|
| 942 |
+
a: S_co,
|
| 943 |
+
sub: S_co,
|
| 944 |
+
start: i_co = ...,
|
| 945 |
+
end: None | i_co = ...,
|
| 946 |
+
) -> NDArray[int_]: ...
|
| 947 |
+
@overload
|
| 948 |
+
def index(
|
| 949 |
+
a: T_co,
|
| 950 |
+
sub: T_co,
|
| 951 |
+
start: i_co = ...,
|
| 952 |
+
end: i_co | None = ...,
|
| 953 |
+
) -> NDArray[np.int_]: ...
|
| 954 |
+
|
| 955 |
+
def isalpha(a: UST_co) -> NDArray[np.bool]: ...
|
| 956 |
+
def isalnum(a: UST_co) -> NDArray[np.bool]: ...
|
| 957 |
+
def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ...
|
| 958 |
+
def isdigit(a: UST_co) -> NDArray[np.bool]: ...
|
| 959 |
+
def islower(a: UST_co) -> NDArray[np.bool]: ...
|
| 960 |
+
def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ...
|
| 961 |
+
def isspace(a: UST_co) -> NDArray[np.bool]: ...
|
| 962 |
+
def istitle(a: UST_co) -> NDArray[np.bool]: ...
|
| 963 |
+
def isupper(a: UST_co) -> NDArray[np.bool]: ...
|
| 964 |
+
|
| 965 |
+
@overload
|
| 966 |
+
def rfind(
|
| 967 |
+
a: U_co,
|
| 968 |
+
sub: U_co,
|
| 969 |
+
start: i_co = ...,
|
| 970 |
+
end: None | i_co = ...,
|
| 971 |
+
) -> NDArray[int_]: ...
|
| 972 |
+
@overload
|
| 973 |
+
def rfind(
|
| 974 |
+
a: S_co,
|
| 975 |
+
sub: S_co,
|
| 976 |
+
start: i_co = ...,
|
| 977 |
+
end: None | i_co = ...,
|
| 978 |
+
) -> NDArray[int_]: ...
|
| 979 |
+
@overload
|
| 980 |
+
def rfind(
|
| 981 |
+
a: T_co,
|
| 982 |
+
sub: T_co,
|
| 983 |
+
start: i_co = ...,
|
| 984 |
+
end: i_co | None = ...,
|
| 985 |
+
) -> NDArray[np.int_]: ...
|
| 986 |
+
|
| 987 |
+
@overload
|
| 988 |
+
def rindex(
|
| 989 |
+
a: U_co,
|
| 990 |
+
sub: U_co,
|
| 991 |
+
start: i_co = ...,
|
| 992 |
+
end: None | i_co = ...,
|
| 993 |
+
) -> NDArray[int_]: ...
|
| 994 |
+
@overload
|
| 995 |
+
def rindex(
|
| 996 |
+
a: S_co,
|
| 997 |
+
sub: S_co,
|
| 998 |
+
start: i_co = ...,
|
| 999 |
+
end: None | i_co = ...,
|
| 1000 |
+
) -> NDArray[int_]: ...
|
| 1001 |
+
@overload
|
| 1002 |
+
def rindex(
|
| 1003 |
+
a: T_co,
|
| 1004 |
+
sub: T_co,
|
| 1005 |
+
start: i_co = ...,
|
| 1006 |
+
end: i_co | None = ...,
|
| 1007 |
+
) -> NDArray[np.int_]: ...
|
| 1008 |
+
|
| 1009 |
+
@overload
|
| 1010 |
+
def startswith(
|
| 1011 |
+
a: U_co,
|
| 1012 |
+
prefix: U_co,
|
| 1013 |
+
start: i_co = ...,
|
| 1014 |
+
end: None | i_co = ...,
|
| 1015 |
+
) -> NDArray[np.bool]: ...
|
| 1016 |
+
@overload
|
| 1017 |
+
def startswith(
|
| 1018 |
+
a: S_co,
|
| 1019 |
+
prefix: S_co,
|
| 1020 |
+
start: i_co = ...,
|
| 1021 |
+
end: None | i_co = ...,
|
| 1022 |
+
) -> NDArray[np.bool]: ...
|
| 1023 |
+
@overload
|
| 1024 |
+
def startswith(
|
| 1025 |
+
a: T_co,
|
| 1026 |
+
suffix: T_co,
|
| 1027 |
+
start: i_co = ...,
|
| 1028 |
+
end: i_co | None = ...,
|
| 1029 |
+
) -> NDArray[np.bool]: ...
|
| 1030 |
+
|
| 1031 |
+
def str_len(A: UST_co) -> NDArray[int_]: ...
|
| 1032 |
+
|
| 1033 |
+
# Overload 1 and 2: str- or bytes-based array-likes
|
| 1034 |
+
# overload 3: arbitrary object with unicode=False (-> bytes_)
|
| 1035 |
+
# overload 4: arbitrary object with unicode=True (-> str_)
|
| 1036 |
+
@overload
|
| 1037 |
+
def array(
|
| 1038 |
+
obj: U_co,
|
| 1039 |
+
itemsize: None | int = ...,
|
| 1040 |
+
copy: bool = ...,
|
| 1041 |
+
unicode: L[False] = ...,
|
| 1042 |
+
order: _OrderKACF = ...,
|
| 1043 |
+
) -> _CharArray[str_]: ...
|
| 1044 |
+
@overload
|
| 1045 |
+
def array(
|
| 1046 |
+
obj: S_co,
|
| 1047 |
+
itemsize: None | int = ...,
|
| 1048 |
+
copy: bool = ...,
|
| 1049 |
+
unicode: L[False] = ...,
|
| 1050 |
+
order: _OrderKACF = ...,
|
| 1051 |
+
) -> _CharArray[bytes_]: ...
|
| 1052 |
+
@overload
|
| 1053 |
+
def array(
|
| 1054 |
+
obj: object,
|
| 1055 |
+
itemsize: None | int = ...,
|
| 1056 |
+
copy: bool = ...,
|
| 1057 |
+
unicode: L[False] = ...,
|
| 1058 |
+
order: _OrderKACF = ...,
|
| 1059 |
+
) -> _CharArray[bytes_]: ...
|
| 1060 |
+
@overload
|
| 1061 |
+
def array(
|
| 1062 |
+
obj: object,
|
| 1063 |
+
itemsize: None | int = ...,
|
| 1064 |
+
copy: bool = ...,
|
| 1065 |
+
unicode: L[True] = ...,
|
| 1066 |
+
order: _OrderKACF = ...,
|
| 1067 |
+
) -> _CharArray[str_]: ...
|
| 1068 |
+
|
| 1069 |
+
@overload
|
| 1070 |
+
def asarray(
|
| 1071 |
+
obj: U_co,
|
| 1072 |
+
itemsize: None | int = ...,
|
| 1073 |
+
unicode: L[False] = ...,
|
| 1074 |
+
order: _OrderKACF = ...,
|
| 1075 |
+
) -> _CharArray[str_]: ...
|
| 1076 |
+
@overload
|
| 1077 |
+
def asarray(
|
| 1078 |
+
obj: S_co,
|
| 1079 |
+
itemsize: None | int = ...,
|
| 1080 |
+
unicode: L[False] = ...,
|
| 1081 |
+
order: _OrderKACF = ...,
|
| 1082 |
+
) -> _CharArray[bytes_]: ...
|
| 1083 |
+
@overload
|
| 1084 |
+
def asarray(
|
| 1085 |
+
obj: object,
|
| 1086 |
+
itemsize: None | int = ...,
|
| 1087 |
+
unicode: L[False] = ...,
|
| 1088 |
+
order: _OrderKACF = ...,
|
| 1089 |
+
) -> _CharArray[bytes_]: ...
|
| 1090 |
+
@overload
|
| 1091 |
+
def asarray(
|
| 1092 |
+
obj: object,
|
| 1093 |
+
itemsize: None | int = ...,
|
| 1094 |
+
unicode: L[True] = ...,
|
| 1095 |
+
order: _OrderKACF = ...,
|
| 1096 |
+
) -> _CharArray[str_]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/einsumfunc.py
ADDED
|
@@ -0,0 +1,1499 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Implementation of optimized einsum.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
import itertools
|
| 6 |
+
import operator
|
| 7 |
+
|
| 8 |
+
from numpy._core.multiarray import c_einsum
|
| 9 |
+
from numpy._core.numeric import asanyarray, tensordot
|
| 10 |
+
from numpy._core.overrides import array_function_dispatch
|
| 11 |
+
|
| 12 |
+
__all__ = ['einsum', 'einsum_path']
|
| 13 |
+
|
| 14 |
+
# importing string for string.ascii_letters would be too slow
|
| 15 |
+
# the first import before caching has been measured to take 800 µs (#23777)
|
| 16 |
+
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
| 17 |
+
einsum_symbols_set = set(einsum_symbols)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
|
| 21 |
+
"""
|
| 22 |
+
Computes the number of FLOPS in the contraction.
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
----------
|
| 26 |
+
idx_contraction : iterable
|
| 27 |
+
The indices involved in the contraction
|
| 28 |
+
inner : bool
|
| 29 |
+
Does this contraction require an inner product?
|
| 30 |
+
num_terms : int
|
| 31 |
+
The number of terms in a contraction
|
| 32 |
+
size_dictionary : dict
|
| 33 |
+
The size of each of the indices in idx_contraction
|
| 34 |
+
|
| 35 |
+
Returns
|
| 36 |
+
-------
|
| 37 |
+
flop_count : int
|
| 38 |
+
The total number of FLOPS required for the contraction.
|
| 39 |
+
|
| 40 |
+
Examples
|
| 41 |
+
--------
|
| 42 |
+
|
| 43 |
+
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
|
| 44 |
+
30
|
| 45 |
+
|
| 46 |
+
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
|
| 47 |
+
60
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
|
| 52 |
+
op_factor = max(1, num_terms - 1)
|
| 53 |
+
if inner:
|
| 54 |
+
op_factor += 1
|
| 55 |
+
|
| 56 |
+
return overall_size * op_factor
|
| 57 |
+
|
| 58 |
+
def _compute_size_by_dict(indices, idx_dict):
|
| 59 |
+
"""
|
| 60 |
+
Computes the product of the elements in indices based on the dictionary
|
| 61 |
+
idx_dict.
|
| 62 |
+
|
| 63 |
+
Parameters
|
| 64 |
+
----------
|
| 65 |
+
indices : iterable
|
| 66 |
+
Indices to base the product on.
|
| 67 |
+
idx_dict : dictionary
|
| 68 |
+
Dictionary of index sizes
|
| 69 |
+
|
| 70 |
+
Returns
|
| 71 |
+
-------
|
| 72 |
+
ret : int
|
| 73 |
+
The resulting product.
|
| 74 |
+
|
| 75 |
+
Examples
|
| 76 |
+
--------
|
| 77 |
+
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
|
| 78 |
+
90
|
| 79 |
+
|
| 80 |
+
"""
|
| 81 |
+
ret = 1
|
| 82 |
+
for i in indices:
|
| 83 |
+
ret *= idx_dict[i]
|
| 84 |
+
return ret
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def _find_contraction(positions, input_sets, output_set):
|
| 88 |
+
"""
|
| 89 |
+
Finds the contraction for a given set of input and output sets.
|
| 90 |
+
|
| 91 |
+
Parameters
|
| 92 |
+
----------
|
| 93 |
+
positions : iterable
|
| 94 |
+
Integer positions of terms used in the contraction.
|
| 95 |
+
input_sets : list
|
| 96 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 97 |
+
output_set : set
|
| 98 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 99 |
+
|
| 100 |
+
Returns
|
| 101 |
+
-------
|
| 102 |
+
new_result : set
|
| 103 |
+
The indices of the resulting contraction
|
| 104 |
+
remaining : list
|
| 105 |
+
List of sets that have not been contracted, the new set is appended to
|
| 106 |
+
the end of this list
|
| 107 |
+
idx_removed : set
|
| 108 |
+
Indices removed from the entire contraction
|
| 109 |
+
idx_contraction : set
|
| 110 |
+
The indices used in the current contraction
|
| 111 |
+
|
| 112 |
+
Examples
|
| 113 |
+
--------
|
| 114 |
+
|
| 115 |
+
# A simple dot product test case
|
| 116 |
+
>>> pos = (0, 1)
|
| 117 |
+
>>> isets = [set('ab'), set('bc')]
|
| 118 |
+
>>> oset = set('ac')
|
| 119 |
+
>>> _find_contraction(pos, isets, oset)
|
| 120 |
+
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
|
| 121 |
+
|
| 122 |
+
# A more complex case with additional terms in the contraction
|
| 123 |
+
>>> pos = (0, 2)
|
| 124 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 125 |
+
>>> oset = set('ac')
|
| 126 |
+
>>> _find_contraction(pos, isets, oset)
|
| 127 |
+
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
idx_contract = set()
|
| 131 |
+
idx_remain = output_set.copy()
|
| 132 |
+
remaining = []
|
| 133 |
+
for ind, value in enumerate(input_sets):
|
| 134 |
+
if ind in positions:
|
| 135 |
+
idx_contract |= value
|
| 136 |
+
else:
|
| 137 |
+
remaining.append(value)
|
| 138 |
+
idx_remain |= value
|
| 139 |
+
|
| 140 |
+
new_result = idx_remain & idx_contract
|
| 141 |
+
idx_removed = (idx_contract - new_result)
|
| 142 |
+
remaining.append(new_result)
|
| 143 |
+
|
| 144 |
+
return (new_result, remaining, idx_removed, idx_contract)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
|
| 148 |
+
"""
|
| 149 |
+
Computes all possible pair contractions, sieves the results based
|
| 150 |
+
on ``memory_limit`` and returns the lowest cost path. This algorithm
|
| 151 |
+
scales factorial with respect to the elements in the list ``input_sets``.
|
| 152 |
+
|
| 153 |
+
Parameters
|
| 154 |
+
----------
|
| 155 |
+
input_sets : list
|
| 156 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 157 |
+
output_set : set
|
| 158 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 159 |
+
idx_dict : dictionary
|
| 160 |
+
Dictionary of index sizes
|
| 161 |
+
memory_limit : int
|
| 162 |
+
The maximum number of elements in a temporary array
|
| 163 |
+
|
| 164 |
+
Returns
|
| 165 |
+
-------
|
| 166 |
+
path : list
|
| 167 |
+
The optimal contraction order within the memory limit constraint.
|
| 168 |
+
|
| 169 |
+
Examples
|
| 170 |
+
--------
|
| 171 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 172 |
+
>>> oset = set()
|
| 173 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
| 174 |
+
>>> _optimal_path(isets, oset, idx_sizes, 5000)
|
| 175 |
+
[(0, 2), (0, 1)]
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
full_results = [(0, [], input_sets)]
|
| 179 |
+
for iteration in range(len(input_sets) - 1):
|
| 180 |
+
iter_results = []
|
| 181 |
+
|
| 182 |
+
# Compute all unique pairs
|
| 183 |
+
for curr in full_results:
|
| 184 |
+
cost, positions, remaining = curr
|
| 185 |
+
for con in itertools.combinations(
|
| 186 |
+
range(len(input_sets) - iteration), 2
|
| 187 |
+
):
|
| 188 |
+
|
| 189 |
+
# Find the contraction
|
| 190 |
+
cont = _find_contraction(con, remaining, output_set)
|
| 191 |
+
new_result, new_input_sets, idx_removed, idx_contract = cont
|
| 192 |
+
|
| 193 |
+
# Sieve the results based on memory_limit
|
| 194 |
+
new_size = _compute_size_by_dict(new_result, idx_dict)
|
| 195 |
+
if new_size > memory_limit:
|
| 196 |
+
continue
|
| 197 |
+
|
| 198 |
+
# Build (total_cost, positions, indices_remaining)
|
| 199 |
+
total_cost = cost + _flop_count(
|
| 200 |
+
idx_contract, idx_removed, len(con), idx_dict
|
| 201 |
+
)
|
| 202 |
+
new_pos = positions + [con]
|
| 203 |
+
iter_results.append((total_cost, new_pos, new_input_sets))
|
| 204 |
+
|
| 205 |
+
# Update combinatorial list, if we did not find anything return best
|
| 206 |
+
# path + remaining contractions
|
| 207 |
+
if iter_results:
|
| 208 |
+
full_results = iter_results
|
| 209 |
+
else:
|
| 210 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
| 211 |
+
path += [tuple(range(len(input_sets) - iteration))]
|
| 212 |
+
return path
|
| 213 |
+
|
| 214 |
+
# If we have not found anything return single einsum contraction
|
| 215 |
+
if len(full_results) == 0:
|
| 216 |
+
return [tuple(range(len(input_sets)))]
|
| 217 |
+
|
| 218 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
| 219 |
+
return path
|
| 220 |
+
|
| 221 |
+
def _parse_possible_contraction(
|
| 222 |
+
positions, input_sets, output_set, idx_dict,
|
| 223 |
+
memory_limit, path_cost, naive_cost
|
| 224 |
+
):
|
| 225 |
+
"""Compute the cost (removed size + flops) and resultant indices for
|
| 226 |
+
performing the contraction specified by ``positions``.
|
| 227 |
+
|
| 228 |
+
Parameters
|
| 229 |
+
----------
|
| 230 |
+
positions : tuple of int
|
| 231 |
+
The locations of the proposed tensors to contract.
|
| 232 |
+
input_sets : list of sets
|
| 233 |
+
The indices found on each tensors.
|
| 234 |
+
output_set : set
|
| 235 |
+
The output indices of the expression.
|
| 236 |
+
idx_dict : dict
|
| 237 |
+
Mapping of each index to its size.
|
| 238 |
+
memory_limit : int
|
| 239 |
+
The total allowed size for an intermediary tensor.
|
| 240 |
+
path_cost : int
|
| 241 |
+
The contraction cost so far.
|
| 242 |
+
naive_cost : int
|
| 243 |
+
The cost of the unoptimized expression.
|
| 244 |
+
|
| 245 |
+
Returns
|
| 246 |
+
-------
|
| 247 |
+
cost : (int, int)
|
| 248 |
+
A tuple containing the size of any indices removed, and the flop cost.
|
| 249 |
+
positions : tuple of int
|
| 250 |
+
The locations of the proposed tensors to contract.
|
| 251 |
+
new_input_sets : list of sets
|
| 252 |
+
The resulting new list of indices if this proposed contraction
|
| 253 |
+
is performed.
|
| 254 |
+
|
| 255 |
+
"""
|
| 256 |
+
|
| 257 |
+
# Find the contraction
|
| 258 |
+
contract = _find_contraction(positions, input_sets, output_set)
|
| 259 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
| 260 |
+
|
| 261 |
+
# Sieve the results based on memory_limit
|
| 262 |
+
new_size = _compute_size_by_dict(idx_result, idx_dict)
|
| 263 |
+
if new_size > memory_limit:
|
| 264 |
+
return None
|
| 265 |
+
|
| 266 |
+
# Build sort tuple
|
| 267 |
+
old_sizes = (
|
| 268 |
+
_compute_size_by_dict(input_sets[p], idx_dict) for p in positions
|
| 269 |
+
)
|
| 270 |
+
removed_size = sum(old_sizes) - new_size
|
| 271 |
+
|
| 272 |
+
# NB: removed_size used to be just the size of any removed indices i.e.:
|
| 273 |
+
# helpers.compute_size_by_dict(idx_removed, idx_dict)
|
| 274 |
+
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
|
| 275 |
+
sort = (-removed_size, cost)
|
| 276 |
+
|
| 277 |
+
# Sieve based on total cost as well
|
| 278 |
+
if (path_cost + cost) > naive_cost:
|
| 279 |
+
return None
|
| 280 |
+
|
| 281 |
+
# Add contraction to possible choices
|
| 282 |
+
return [sort, positions, new_input_sets]
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def _update_other_results(results, best):
|
| 286 |
+
"""Update the positions and provisional input_sets of ``results``
|
| 287 |
+
based on performing the contraction result ``best``. Remove any
|
| 288 |
+
involving the tensors contracted.
|
| 289 |
+
|
| 290 |
+
Parameters
|
| 291 |
+
----------
|
| 292 |
+
results : list
|
| 293 |
+
List of contraction results produced by
|
| 294 |
+
``_parse_possible_contraction``.
|
| 295 |
+
best : list
|
| 296 |
+
The best contraction of ``results`` i.e. the one that
|
| 297 |
+
will be performed.
|
| 298 |
+
|
| 299 |
+
Returns
|
| 300 |
+
-------
|
| 301 |
+
mod_results : list
|
| 302 |
+
The list of modified results, updated with outcome of
|
| 303 |
+
``best`` contraction.
|
| 304 |
+
"""
|
| 305 |
+
|
| 306 |
+
best_con = best[1]
|
| 307 |
+
bx, by = best_con
|
| 308 |
+
mod_results = []
|
| 309 |
+
|
| 310 |
+
for cost, (x, y), con_sets in results:
|
| 311 |
+
|
| 312 |
+
# Ignore results involving tensors just contracted
|
| 313 |
+
if x in best_con or y in best_con:
|
| 314 |
+
continue
|
| 315 |
+
|
| 316 |
+
# Update the input_sets
|
| 317 |
+
del con_sets[by - int(by > x) - int(by > y)]
|
| 318 |
+
del con_sets[bx - int(bx > x) - int(bx > y)]
|
| 319 |
+
con_sets.insert(-1, best[2][-1])
|
| 320 |
+
|
| 321 |
+
# Update the position indices
|
| 322 |
+
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
|
| 323 |
+
mod_results.append((cost, mod_con, con_sets))
|
| 324 |
+
|
| 325 |
+
return mod_results
|
| 326 |
+
|
| 327 |
+
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
|
| 328 |
+
"""
|
| 329 |
+
Finds the path by contracting the best pair until the input list is
|
| 330 |
+
exhausted. The best pair is found by minimizing the tuple
|
| 331 |
+
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
|
| 332 |
+
matrix multiplication or inner product operations, then Hadamard like
|
| 333 |
+
operations, and finally outer operations. Outer products are limited by
|
| 334 |
+
``memory_limit``. This algorithm scales cubically with respect to the
|
| 335 |
+
number of elements in the list ``input_sets``.
|
| 336 |
+
|
| 337 |
+
Parameters
|
| 338 |
+
----------
|
| 339 |
+
input_sets : list
|
| 340 |
+
List of sets that represent the lhs side of the einsum subscript
|
| 341 |
+
output_set : set
|
| 342 |
+
Set that represents the rhs side of the overall einsum subscript
|
| 343 |
+
idx_dict : dictionary
|
| 344 |
+
Dictionary of index sizes
|
| 345 |
+
memory_limit : int
|
| 346 |
+
The maximum number of elements in a temporary array
|
| 347 |
+
|
| 348 |
+
Returns
|
| 349 |
+
-------
|
| 350 |
+
path : list
|
| 351 |
+
The greedy contraction order within the memory limit constraint.
|
| 352 |
+
|
| 353 |
+
Examples
|
| 354 |
+
--------
|
| 355 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
| 356 |
+
>>> oset = set()
|
| 357 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
| 358 |
+
>>> _greedy_path(isets, oset, idx_sizes, 5000)
|
| 359 |
+
[(0, 2), (0, 1)]
|
| 360 |
+
"""
|
| 361 |
+
|
| 362 |
+
# Handle trivial cases that leaked through
|
| 363 |
+
if len(input_sets) == 1:
|
| 364 |
+
return [(0,)]
|
| 365 |
+
elif len(input_sets) == 2:
|
| 366 |
+
return [(0, 1)]
|
| 367 |
+
|
| 368 |
+
# Build up a naive cost
|
| 369 |
+
contract = _find_contraction(
|
| 370 |
+
range(len(input_sets)), input_sets, output_set
|
| 371 |
+
)
|
| 372 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
| 373 |
+
naive_cost = _flop_count(
|
| 374 |
+
idx_contract, idx_removed, len(input_sets), idx_dict
|
| 375 |
+
)
|
| 376 |
+
|
| 377 |
+
# Initially iterate over all pairs
|
| 378 |
+
comb_iter = itertools.combinations(range(len(input_sets)), 2)
|
| 379 |
+
known_contractions = []
|
| 380 |
+
|
| 381 |
+
path_cost = 0
|
| 382 |
+
path = []
|
| 383 |
+
|
| 384 |
+
for iteration in range(len(input_sets) - 1):
|
| 385 |
+
|
| 386 |
+
# Iterate over all pairs on the first step, only previously
|
| 387 |
+
# found pairs on subsequent steps
|
| 388 |
+
for positions in comb_iter:
|
| 389 |
+
|
| 390 |
+
# Always initially ignore outer products
|
| 391 |
+
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
|
| 392 |
+
continue
|
| 393 |
+
|
| 394 |
+
result = _parse_possible_contraction(
|
| 395 |
+
positions, input_sets, output_set, idx_dict,
|
| 396 |
+
memory_limit, path_cost, naive_cost
|
| 397 |
+
)
|
| 398 |
+
if result is not None:
|
| 399 |
+
known_contractions.append(result)
|
| 400 |
+
|
| 401 |
+
# If we do not have a inner contraction, rescan pairs
|
| 402 |
+
# including outer products
|
| 403 |
+
if len(known_contractions) == 0:
|
| 404 |
+
|
| 405 |
+
# Then check the outer products
|
| 406 |
+
for positions in itertools.combinations(
|
| 407 |
+
range(len(input_sets)), 2
|
| 408 |
+
):
|
| 409 |
+
result = _parse_possible_contraction(
|
| 410 |
+
positions, input_sets, output_set, idx_dict,
|
| 411 |
+
memory_limit, path_cost, naive_cost
|
| 412 |
+
)
|
| 413 |
+
if result is not None:
|
| 414 |
+
known_contractions.append(result)
|
| 415 |
+
|
| 416 |
+
# If we still did not find any remaining contractions,
|
| 417 |
+
# default back to einsum like behavior
|
| 418 |
+
if len(known_contractions) == 0:
|
| 419 |
+
path.append(tuple(range(len(input_sets))))
|
| 420 |
+
break
|
| 421 |
+
|
| 422 |
+
# Sort based on first index
|
| 423 |
+
best = min(known_contractions, key=lambda x: x[0])
|
| 424 |
+
|
| 425 |
+
# Now propagate as many unused contractions as possible
|
| 426 |
+
# to the next iteration
|
| 427 |
+
known_contractions = _update_other_results(known_contractions, best)
|
| 428 |
+
|
| 429 |
+
# Next iteration only compute contractions with the new tensor
|
| 430 |
+
# All other contractions have been accounted for
|
| 431 |
+
input_sets = best[2]
|
| 432 |
+
new_tensor_pos = len(input_sets) - 1
|
| 433 |
+
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
|
| 434 |
+
|
| 435 |
+
# Update path and total cost
|
| 436 |
+
path.append(best[1])
|
| 437 |
+
path_cost += best[0][1]
|
| 438 |
+
|
| 439 |
+
return path
|
| 440 |
+
|
| 441 |
+
|
| 442 |
+
def _can_dot(inputs, result, idx_removed):
|
| 443 |
+
"""
|
| 444 |
+
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
|
| 445 |
+
|
| 446 |
+
Parameters
|
| 447 |
+
----------
|
| 448 |
+
inputs : list of str
|
| 449 |
+
Specifies the subscripts for summation.
|
| 450 |
+
result : str
|
| 451 |
+
Resulting summation.
|
| 452 |
+
idx_removed : set
|
| 453 |
+
Indices that are removed in the summation
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
Returns
|
| 457 |
+
-------
|
| 458 |
+
type : bool
|
| 459 |
+
Returns true if BLAS should and can be used, else False
|
| 460 |
+
|
| 461 |
+
Notes
|
| 462 |
+
-----
|
| 463 |
+
If the operations is BLAS level 1 or 2 and is not already aligned
|
| 464 |
+
we default back to einsum as the memory movement to copy is more
|
| 465 |
+
costly than the operation itself.
|
| 466 |
+
|
| 467 |
+
|
| 468 |
+
Examples
|
| 469 |
+
--------
|
| 470 |
+
|
| 471 |
+
# Standard GEMM operation
|
| 472 |
+
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
|
| 473 |
+
True
|
| 474 |
+
|
| 475 |
+
# Can use the standard BLAS, but requires odd data movement
|
| 476 |
+
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
|
| 477 |
+
False
|
| 478 |
+
|
| 479 |
+
# DDOT where the memory is not aligned
|
| 480 |
+
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
|
| 481 |
+
False
|
| 482 |
+
|
| 483 |
+
"""
|
| 484 |
+
|
| 485 |
+
# All `dot` calls remove indices
|
| 486 |
+
if len(idx_removed) == 0:
|
| 487 |
+
return False
|
| 488 |
+
|
| 489 |
+
# BLAS can only handle two operands
|
| 490 |
+
if len(inputs) != 2:
|
| 491 |
+
return False
|
| 492 |
+
|
| 493 |
+
input_left, input_right = inputs
|
| 494 |
+
|
| 495 |
+
for c in set(input_left + input_right):
|
| 496 |
+
# can't deal with repeated indices on same input or more than 2 total
|
| 497 |
+
nl, nr = input_left.count(c), input_right.count(c)
|
| 498 |
+
if (nl > 1) or (nr > 1) or (nl + nr > 2):
|
| 499 |
+
return False
|
| 500 |
+
|
| 501 |
+
# can't do implicit summation or dimension collapse e.g.
|
| 502 |
+
# "ab,bc->c" (implicitly sum over 'a')
|
| 503 |
+
# "ab,ca->ca" (take diagonal of 'a')
|
| 504 |
+
if nl + nr - 1 == int(c in result):
|
| 505 |
+
return False
|
| 506 |
+
|
| 507 |
+
# Build a few temporaries
|
| 508 |
+
set_left = set(input_left)
|
| 509 |
+
set_right = set(input_right)
|
| 510 |
+
keep_left = set_left - idx_removed
|
| 511 |
+
keep_right = set_right - idx_removed
|
| 512 |
+
rs = len(idx_removed)
|
| 513 |
+
|
| 514 |
+
# At this point we are a DOT, GEMV, or GEMM operation
|
| 515 |
+
|
| 516 |
+
# Handle inner products
|
| 517 |
+
|
| 518 |
+
# DDOT with aligned data
|
| 519 |
+
if input_left == input_right:
|
| 520 |
+
return True
|
| 521 |
+
|
| 522 |
+
# DDOT without aligned data (better to use einsum)
|
| 523 |
+
if set_left == set_right:
|
| 524 |
+
return False
|
| 525 |
+
|
| 526 |
+
# Handle the 4 possible (aligned) GEMV or GEMM cases
|
| 527 |
+
|
| 528 |
+
# GEMM or GEMV no transpose
|
| 529 |
+
if input_left[-rs:] == input_right[:rs]:
|
| 530 |
+
return True
|
| 531 |
+
|
| 532 |
+
# GEMM or GEMV transpose both
|
| 533 |
+
if input_left[:rs] == input_right[-rs:]:
|
| 534 |
+
return True
|
| 535 |
+
|
| 536 |
+
# GEMM or GEMV transpose right
|
| 537 |
+
if input_left[-rs:] == input_right[-rs:]:
|
| 538 |
+
return True
|
| 539 |
+
|
| 540 |
+
# GEMM or GEMV transpose left
|
| 541 |
+
if input_left[:rs] == input_right[:rs]:
|
| 542 |
+
return True
|
| 543 |
+
|
| 544 |
+
# Einsum is faster than GEMV if we have to copy data
|
| 545 |
+
if not keep_left or not keep_right:
|
| 546 |
+
return False
|
| 547 |
+
|
| 548 |
+
# We are a matrix-matrix product, but we need to copy data
|
| 549 |
+
return True
|
| 550 |
+
|
| 551 |
+
|
| 552 |
+
def _parse_einsum_input(operands):
|
| 553 |
+
"""
|
| 554 |
+
A reproduction of einsum c side einsum parsing in python.
|
| 555 |
+
|
| 556 |
+
Returns
|
| 557 |
+
-------
|
| 558 |
+
input_strings : str
|
| 559 |
+
Parsed input strings
|
| 560 |
+
output_string : str
|
| 561 |
+
Parsed output string
|
| 562 |
+
operands : list of array_like
|
| 563 |
+
The operands to use in the numpy contraction
|
| 564 |
+
|
| 565 |
+
Examples
|
| 566 |
+
--------
|
| 567 |
+
The operand list is simplified to reduce printing:
|
| 568 |
+
|
| 569 |
+
>>> np.random.seed(123)
|
| 570 |
+
>>> a = np.random.rand(4, 4)
|
| 571 |
+
>>> b = np.random.rand(4, 4, 4)
|
| 572 |
+
>>> _parse_einsum_input(('...a,...a->...', a, b))
|
| 573 |
+
('za,xza', 'xz', [a, b]) # may vary
|
| 574 |
+
|
| 575 |
+
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
|
| 576 |
+
('za,xza', 'xz', [a, b]) # may vary
|
| 577 |
+
"""
|
| 578 |
+
|
| 579 |
+
if len(operands) == 0:
|
| 580 |
+
raise ValueError("No input operands")
|
| 581 |
+
|
| 582 |
+
if isinstance(operands[0], str):
|
| 583 |
+
subscripts = operands[0].replace(" ", "")
|
| 584 |
+
operands = [asanyarray(v) for v in operands[1:]]
|
| 585 |
+
|
| 586 |
+
# Ensure all characters are valid
|
| 587 |
+
for s in subscripts:
|
| 588 |
+
if s in '.,->':
|
| 589 |
+
continue
|
| 590 |
+
if s not in einsum_symbols:
|
| 591 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 592 |
+
|
| 593 |
+
else:
|
| 594 |
+
tmp_operands = list(operands)
|
| 595 |
+
operand_list = []
|
| 596 |
+
subscript_list = []
|
| 597 |
+
for p in range(len(operands) // 2):
|
| 598 |
+
operand_list.append(tmp_operands.pop(0))
|
| 599 |
+
subscript_list.append(tmp_operands.pop(0))
|
| 600 |
+
|
| 601 |
+
output_list = tmp_operands[-1] if len(tmp_operands) else None
|
| 602 |
+
operands = [asanyarray(v) for v in operand_list]
|
| 603 |
+
subscripts = ""
|
| 604 |
+
last = len(subscript_list) - 1
|
| 605 |
+
for num, sub in enumerate(subscript_list):
|
| 606 |
+
for s in sub:
|
| 607 |
+
if s is Ellipsis:
|
| 608 |
+
subscripts += "..."
|
| 609 |
+
else:
|
| 610 |
+
try:
|
| 611 |
+
s = operator.index(s)
|
| 612 |
+
except TypeError as e:
|
| 613 |
+
raise TypeError(
|
| 614 |
+
"For this input type lists must contain "
|
| 615 |
+
"either int or Ellipsis"
|
| 616 |
+
) from e
|
| 617 |
+
subscripts += einsum_symbols[s]
|
| 618 |
+
if num != last:
|
| 619 |
+
subscripts += ","
|
| 620 |
+
|
| 621 |
+
if output_list is not None:
|
| 622 |
+
subscripts += "->"
|
| 623 |
+
for s in output_list:
|
| 624 |
+
if s is Ellipsis:
|
| 625 |
+
subscripts += "..."
|
| 626 |
+
else:
|
| 627 |
+
try:
|
| 628 |
+
s = operator.index(s)
|
| 629 |
+
except TypeError as e:
|
| 630 |
+
raise TypeError(
|
| 631 |
+
"For this input type lists must contain "
|
| 632 |
+
"either int or Ellipsis"
|
| 633 |
+
) from e
|
| 634 |
+
subscripts += einsum_symbols[s]
|
| 635 |
+
# Check for proper "->"
|
| 636 |
+
if ("-" in subscripts) or (">" in subscripts):
|
| 637 |
+
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
|
| 638 |
+
if invalid or (subscripts.count("->") != 1):
|
| 639 |
+
raise ValueError("Subscripts can only contain one '->'.")
|
| 640 |
+
|
| 641 |
+
# Parse ellipses
|
| 642 |
+
if "." in subscripts:
|
| 643 |
+
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
|
| 644 |
+
unused = list(einsum_symbols_set - set(used))
|
| 645 |
+
ellipse_inds = "".join(unused)
|
| 646 |
+
longest = 0
|
| 647 |
+
|
| 648 |
+
if "->" in subscripts:
|
| 649 |
+
input_tmp, output_sub = subscripts.split("->")
|
| 650 |
+
split_subscripts = input_tmp.split(",")
|
| 651 |
+
out_sub = True
|
| 652 |
+
else:
|
| 653 |
+
split_subscripts = subscripts.split(',')
|
| 654 |
+
out_sub = False
|
| 655 |
+
|
| 656 |
+
for num, sub in enumerate(split_subscripts):
|
| 657 |
+
if "." in sub:
|
| 658 |
+
if (sub.count(".") != 3) or (sub.count("...") != 1):
|
| 659 |
+
raise ValueError("Invalid Ellipses.")
|
| 660 |
+
|
| 661 |
+
# Take into account numerical values
|
| 662 |
+
if operands[num].shape == ():
|
| 663 |
+
ellipse_count = 0
|
| 664 |
+
else:
|
| 665 |
+
ellipse_count = max(operands[num].ndim, 1)
|
| 666 |
+
ellipse_count -= (len(sub) - 3)
|
| 667 |
+
|
| 668 |
+
if ellipse_count > longest:
|
| 669 |
+
longest = ellipse_count
|
| 670 |
+
|
| 671 |
+
if ellipse_count < 0:
|
| 672 |
+
raise ValueError("Ellipses lengths do not match.")
|
| 673 |
+
elif ellipse_count == 0:
|
| 674 |
+
split_subscripts[num] = sub.replace('...', '')
|
| 675 |
+
else:
|
| 676 |
+
rep_inds = ellipse_inds[-ellipse_count:]
|
| 677 |
+
split_subscripts[num] = sub.replace('...', rep_inds)
|
| 678 |
+
|
| 679 |
+
subscripts = ",".join(split_subscripts)
|
| 680 |
+
if longest == 0:
|
| 681 |
+
out_ellipse = ""
|
| 682 |
+
else:
|
| 683 |
+
out_ellipse = ellipse_inds[-longest:]
|
| 684 |
+
|
| 685 |
+
if out_sub:
|
| 686 |
+
subscripts += "->" + output_sub.replace("...", out_ellipse)
|
| 687 |
+
else:
|
| 688 |
+
# Special care for outputless ellipses
|
| 689 |
+
output_subscript = ""
|
| 690 |
+
tmp_subscripts = subscripts.replace(",", "")
|
| 691 |
+
for s in sorted(set(tmp_subscripts)):
|
| 692 |
+
if s not in (einsum_symbols):
|
| 693 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 694 |
+
if tmp_subscripts.count(s) == 1:
|
| 695 |
+
output_subscript += s
|
| 696 |
+
normal_inds = ''.join(sorted(set(output_subscript) -
|
| 697 |
+
set(out_ellipse)))
|
| 698 |
+
|
| 699 |
+
subscripts += "->" + out_ellipse + normal_inds
|
| 700 |
+
|
| 701 |
+
# Build output string if does not exist
|
| 702 |
+
if "->" in subscripts:
|
| 703 |
+
input_subscripts, output_subscript = subscripts.split("->")
|
| 704 |
+
else:
|
| 705 |
+
input_subscripts = subscripts
|
| 706 |
+
# Build output subscripts
|
| 707 |
+
tmp_subscripts = subscripts.replace(",", "")
|
| 708 |
+
output_subscript = ""
|
| 709 |
+
for s in sorted(set(tmp_subscripts)):
|
| 710 |
+
if s not in einsum_symbols:
|
| 711 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
| 712 |
+
if tmp_subscripts.count(s) == 1:
|
| 713 |
+
output_subscript += s
|
| 714 |
+
|
| 715 |
+
# Make sure output subscripts are in the input
|
| 716 |
+
for char in output_subscript:
|
| 717 |
+
if output_subscript.count(char) != 1:
|
| 718 |
+
raise ValueError("Output character %s appeared more than once in "
|
| 719 |
+
"the output." % char)
|
| 720 |
+
if char not in input_subscripts:
|
| 721 |
+
raise ValueError("Output character %s did not appear in the input"
|
| 722 |
+
% char)
|
| 723 |
+
|
| 724 |
+
# Make sure number operands is equivalent to the number of terms
|
| 725 |
+
if len(input_subscripts.split(',')) != len(operands):
|
| 726 |
+
raise ValueError("Number of einsum subscripts must be equal to the "
|
| 727 |
+
"number of operands.")
|
| 728 |
+
|
| 729 |
+
return (input_subscripts, output_subscript, operands)
|
| 730 |
+
|
| 731 |
+
|
| 732 |
+
def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
|
| 733 |
+
# NOTE: technically, we should only dispatch on array-like arguments, not
|
| 734 |
+
# subscripts (given as strings). But separating operands into
|
| 735 |
+
# arrays/subscripts is a little tricky/slow (given einsum's two supported
|
| 736 |
+
# signatures), so as a practical shortcut we dispatch on everything.
|
| 737 |
+
# Strings will be ignored for dispatching since they don't define
|
| 738 |
+
# __array_function__.
|
| 739 |
+
return operands
|
| 740 |
+
|
| 741 |
+
|
| 742 |
+
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
|
| 743 |
+
def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
| 744 |
+
"""
|
| 745 |
+
einsum_path(subscripts, *operands, optimize='greedy')
|
| 746 |
+
|
| 747 |
+
Evaluates the lowest cost contraction order for an einsum expression by
|
| 748 |
+
considering the creation of intermediate arrays.
|
| 749 |
+
|
| 750 |
+
Parameters
|
| 751 |
+
----------
|
| 752 |
+
subscripts : str
|
| 753 |
+
Specifies the subscripts for summation.
|
| 754 |
+
*operands : list of array_like
|
| 755 |
+
These are the arrays for the operation.
|
| 756 |
+
optimize : {bool, list, tuple, 'greedy', 'optimal'}
|
| 757 |
+
Choose the type of path. If a tuple is provided, the second argument is
|
| 758 |
+
assumed to be the maximum intermediate size created. If only a single
|
| 759 |
+
argument is provided the largest input or output array size is used
|
| 760 |
+
as a maximum intermediate size.
|
| 761 |
+
|
| 762 |
+
* if a list is given that starts with ``einsum_path``, uses this as the
|
| 763 |
+
contraction path
|
| 764 |
+
* if False no optimization is taken
|
| 765 |
+
* if True defaults to the 'greedy' algorithm
|
| 766 |
+
* 'optimal' An algorithm that combinatorially explores all possible
|
| 767 |
+
ways of contracting the listed tensors and chooses the least costly
|
| 768 |
+
path. Scales exponentially with the number of terms in the
|
| 769 |
+
contraction.
|
| 770 |
+
* 'greedy' An algorithm that chooses the best pair contraction
|
| 771 |
+
at each step. Effectively, this algorithm searches the largest inner,
|
| 772 |
+
Hadamard, and then outer products at each step. Scales cubically with
|
| 773 |
+
the number of terms in the contraction. Equivalent to the 'optimal'
|
| 774 |
+
path for most contractions.
|
| 775 |
+
|
| 776 |
+
Default is 'greedy'.
|
| 777 |
+
|
| 778 |
+
Returns
|
| 779 |
+
-------
|
| 780 |
+
path : list of tuples
|
| 781 |
+
A list representation of the einsum path.
|
| 782 |
+
string_repr : str
|
| 783 |
+
A printable representation of the einsum path.
|
| 784 |
+
|
| 785 |
+
Notes
|
| 786 |
+
-----
|
| 787 |
+
The resulting path indicates which terms of the input contraction should be
|
| 788 |
+
contracted first, the result of this contraction is then appended to the
|
| 789 |
+
end of the contraction list. This list can then be iterated over until all
|
| 790 |
+
intermediate contractions are complete.
|
| 791 |
+
|
| 792 |
+
See Also
|
| 793 |
+
--------
|
| 794 |
+
einsum, linalg.multi_dot
|
| 795 |
+
|
| 796 |
+
Examples
|
| 797 |
+
--------
|
| 798 |
+
|
| 799 |
+
We can begin with a chain dot example. In this case, it is optimal to
|
| 800 |
+
contract the ``b`` and ``c`` tensors first as represented by the first
|
| 801 |
+
element of the path ``(1, 2)``. The resulting tensor is added to the end
|
| 802 |
+
of the contraction and the remaining contraction ``(0, 1)`` is then
|
| 803 |
+
completed.
|
| 804 |
+
|
| 805 |
+
>>> np.random.seed(123)
|
| 806 |
+
>>> a = np.random.rand(2, 2)
|
| 807 |
+
>>> b = np.random.rand(2, 5)
|
| 808 |
+
>>> c = np.random.rand(5, 2)
|
| 809 |
+
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
|
| 810 |
+
>>> print(path_info[0])
|
| 811 |
+
['einsum_path', (1, 2), (0, 1)]
|
| 812 |
+
>>> print(path_info[1])
|
| 813 |
+
Complete contraction: ij,jk,kl->il # may vary
|
| 814 |
+
Naive scaling: 4
|
| 815 |
+
Optimized scaling: 3
|
| 816 |
+
Naive FLOP count: 1.600e+02
|
| 817 |
+
Optimized FLOP count: 5.600e+01
|
| 818 |
+
Theoretical speedup: 2.857
|
| 819 |
+
Largest intermediate: 4.000e+00 elements
|
| 820 |
+
-------------------------------------------------------------------------
|
| 821 |
+
scaling current remaining
|
| 822 |
+
-------------------------------------------------------------------------
|
| 823 |
+
3 kl,jk->jl ij,jl->il
|
| 824 |
+
3 jl,ij->il il->il
|
| 825 |
+
|
| 826 |
+
|
| 827 |
+
A more complex index transformation example.
|
| 828 |
+
|
| 829 |
+
>>> I = np.random.rand(10, 10, 10, 10)
|
| 830 |
+
>>> C = np.random.rand(10, 10)
|
| 831 |
+
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
|
| 832 |
+
... optimize='greedy')
|
| 833 |
+
|
| 834 |
+
>>> print(path_info[0])
|
| 835 |
+
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
|
| 836 |
+
>>> print(path_info[1])
|
| 837 |
+
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
|
| 838 |
+
Naive scaling: 8
|
| 839 |
+
Optimized scaling: 5
|
| 840 |
+
Naive FLOP count: 8.000e+08
|
| 841 |
+
Optimized FLOP count: 8.000e+05
|
| 842 |
+
Theoretical speedup: 1000.000
|
| 843 |
+
Largest intermediate: 1.000e+04 elements
|
| 844 |
+
--------------------------------------------------------------------------
|
| 845 |
+
scaling current remaining
|
| 846 |
+
--------------------------------------------------------------------------
|
| 847 |
+
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
|
| 848 |
+
5 bcde,fb->cdef gc,hd,cdef->efgh
|
| 849 |
+
5 cdef,gc->defg hd,defg->efgh
|
| 850 |
+
5 defg,hd->efgh efgh->efgh
|
| 851 |
+
"""
|
| 852 |
+
|
| 853 |
+
# Figure out what the path really is
|
| 854 |
+
path_type = optimize
|
| 855 |
+
if path_type is True:
|
| 856 |
+
path_type = 'greedy'
|
| 857 |
+
if path_type is None:
|
| 858 |
+
path_type = False
|
| 859 |
+
|
| 860 |
+
explicit_einsum_path = False
|
| 861 |
+
memory_limit = None
|
| 862 |
+
|
| 863 |
+
# No optimization or a named path algorithm
|
| 864 |
+
if (path_type is False) or isinstance(path_type, str):
|
| 865 |
+
pass
|
| 866 |
+
|
| 867 |
+
# Given an explicit path
|
| 868 |
+
elif len(path_type) and (path_type[0] == 'einsum_path'):
|
| 869 |
+
explicit_einsum_path = True
|
| 870 |
+
|
| 871 |
+
# Path tuple with memory limit
|
| 872 |
+
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
|
| 873 |
+
isinstance(path_type[1], (int, float))):
|
| 874 |
+
memory_limit = int(path_type[1])
|
| 875 |
+
path_type = path_type[0]
|
| 876 |
+
|
| 877 |
+
else:
|
| 878 |
+
raise TypeError("Did not understand the path: %s" % str(path_type))
|
| 879 |
+
|
| 880 |
+
# Hidden option, only einsum should call this
|
| 881 |
+
einsum_call_arg = einsum_call
|
| 882 |
+
|
| 883 |
+
# Python side parsing
|
| 884 |
+
input_subscripts, output_subscript, operands = (
|
| 885 |
+
_parse_einsum_input(operands)
|
| 886 |
+
)
|
| 887 |
+
|
| 888 |
+
# Build a few useful list and sets
|
| 889 |
+
input_list = input_subscripts.split(',')
|
| 890 |
+
input_sets = [set(x) for x in input_list]
|
| 891 |
+
output_set = set(output_subscript)
|
| 892 |
+
indices = set(input_subscripts.replace(',', ''))
|
| 893 |
+
|
| 894 |
+
# Get length of each unique dimension and ensure all dimensions are correct
|
| 895 |
+
dimension_dict = {}
|
| 896 |
+
broadcast_indices = [[] for x in range(len(input_list))]
|
| 897 |
+
for tnum, term in enumerate(input_list):
|
| 898 |
+
sh = operands[tnum].shape
|
| 899 |
+
if len(sh) != len(term):
|
| 900 |
+
raise ValueError("Einstein sum subscript %s does not contain the "
|
| 901 |
+
"correct number of indices for operand %d."
|
| 902 |
+
% (input_subscripts[tnum], tnum))
|
| 903 |
+
for cnum, char in enumerate(term):
|
| 904 |
+
dim = sh[cnum]
|
| 905 |
+
|
| 906 |
+
# Build out broadcast indices
|
| 907 |
+
if dim == 1:
|
| 908 |
+
broadcast_indices[tnum].append(char)
|
| 909 |
+
|
| 910 |
+
if char in dimension_dict.keys():
|
| 911 |
+
# For broadcasting cases we always want the largest dim size
|
| 912 |
+
if dimension_dict[char] == 1:
|
| 913 |
+
dimension_dict[char] = dim
|
| 914 |
+
elif dim not in (1, dimension_dict[char]):
|
| 915 |
+
raise ValueError("Size of label '%s' for operand %d (%d) "
|
| 916 |
+
"does not match previous terms (%d)."
|
| 917 |
+
% (char, tnum, dimension_dict[char], dim))
|
| 918 |
+
else:
|
| 919 |
+
dimension_dict[char] = dim
|
| 920 |
+
|
| 921 |
+
# Convert broadcast inds to sets
|
| 922 |
+
broadcast_indices = [set(x) for x in broadcast_indices]
|
| 923 |
+
|
| 924 |
+
# Compute size of each input array plus the output array
|
| 925 |
+
size_list = [_compute_size_by_dict(term, dimension_dict)
|
| 926 |
+
for term in input_list + [output_subscript]]
|
| 927 |
+
max_size = max(size_list)
|
| 928 |
+
|
| 929 |
+
if memory_limit is None:
|
| 930 |
+
memory_arg = max_size
|
| 931 |
+
else:
|
| 932 |
+
memory_arg = memory_limit
|
| 933 |
+
|
| 934 |
+
# Compute naive cost
|
| 935 |
+
# This isn't quite right, need to look into exactly how einsum does this
|
| 936 |
+
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
|
| 937 |
+
naive_cost = _flop_count(
|
| 938 |
+
indices, inner_product, len(input_list), dimension_dict
|
| 939 |
+
)
|
| 940 |
+
|
| 941 |
+
# Compute the path
|
| 942 |
+
if explicit_einsum_path:
|
| 943 |
+
path = path_type[1:]
|
| 944 |
+
elif (
|
| 945 |
+
(path_type is False)
|
| 946 |
+
or (len(input_list) in [1, 2])
|
| 947 |
+
or (indices == output_set)
|
| 948 |
+
):
|
| 949 |
+
# Nothing to be optimized, leave it to einsum
|
| 950 |
+
path = [tuple(range(len(input_list)))]
|
| 951 |
+
elif path_type == "greedy":
|
| 952 |
+
path = _greedy_path(
|
| 953 |
+
input_sets, output_set, dimension_dict, memory_arg
|
| 954 |
+
)
|
| 955 |
+
elif path_type == "optimal":
|
| 956 |
+
path = _optimal_path(
|
| 957 |
+
input_sets, output_set, dimension_dict, memory_arg
|
| 958 |
+
)
|
| 959 |
+
else:
|
| 960 |
+
raise KeyError("Path name %s not found", path_type)
|
| 961 |
+
|
| 962 |
+
cost_list, scale_list, size_list, contraction_list = [], [], [], []
|
| 963 |
+
|
| 964 |
+
# Build contraction tuple (positions, gemm, einsum_str, remaining)
|
| 965 |
+
for cnum, contract_inds in enumerate(path):
|
| 966 |
+
# Make sure we remove inds from right to left
|
| 967 |
+
contract_inds = tuple(sorted(contract_inds, reverse=True))
|
| 968 |
+
|
| 969 |
+
contract = _find_contraction(contract_inds, input_sets, output_set)
|
| 970 |
+
out_inds, input_sets, idx_removed, idx_contract = contract
|
| 971 |
+
|
| 972 |
+
cost = _flop_count(
|
| 973 |
+
idx_contract, idx_removed, len(contract_inds), dimension_dict
|
| 974 |
+
)
|
| 975 |
+
cost_list.append(cost)
|
| 976 |
+
scale_list.append(len(idx_contract))
|
| 977 |
+
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
|
| 978 |
+
|
| 979 |
+
bcast = set()
|
| 980 |
+
tmp_inputs = []
|
| 981 |
+
for x in contract_inds:
|
| 982 |
+
tmp_inputs.append(input_list.pop(x))
|
| 983 |
+
bcast |= broadcast_indices.pop(x)
|
| 984 |
+
|
| 985 |
+
new_bcast_inds = bcast - idx_removed
|
| 986 |
+
|
| 987 |
+
# If we're broadcasting, nix blas
|
| 988 |
+
if not len(idx_removed & bcast):
|
| 989 |
+
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
|
| 990 |
+
else:
|
| 991 |
+
do_blas = False
|
| 992 |
+
|
| 993 |
+
# Last contraction
|
| 994 |
+
if (cnum - len(path)) == -1:
|
| 995 |
+
idx_result = output_subscript
|
| 996 |
+
else:
|
| 997 |
+
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
|
| 998 |
+
idx_result = "".join([x[1] for x in sorted(sort_result)])
|
| 999 |
+
|
| 1000 |
+
input_list.append(idx_result)
|
| 1001 |
+
broadcast_indices.append(new_bcast_inds)
|
| 1002 |
+
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
|
| 1003 |
+
|
| 1004 |
+
contraction = (
|
| 1005 |
+
contract_inds, idx_removed, einsum_str, input_list[:], do_blas
|
| 1006 |
+
)
|
| 1007 |
+
contraction_list.append(contraction)
|
| 1008 |
+
|
| 1009 |
+
opt_cost = sum(cost_list) + 1
|
| 1010 |
+
|
| 1011 |
+
if len(input_list) != 1:
|
| 1012 |
+
# Explicit "einsum_path" is usually trusted, but we detect this kind of
|
| 1013 |
+
# mistake in order to prevent from returning an intermediate value.
|
| 1014 |
+
raise RuntimeError(
|
| 1015 |
+
"Invalid einsum_path is specified: {} more operands has to be "
|
| 1016 |
+
"contracted.".format(len(input_list) - 1))
|
| 1017 |
+
|
| 1018 |
+
if einsum_call_arg:
|
| 1019 |
+
return (operands, contraction_list)
|
| 1020 |
+
|
| 1021 |
+
# Return the path along with a nice string representation
|
| 1022 |
+
overall_contraction = input_subscripts + "->" + output_subscript
|
| 1023 |
+
header = ("scaling", "current", "remaining")
|
| 1024 |
+
|
| 1025 |
+
speedup = naive_cost / opt_cost
|
| 1026 |
+
max_i = max(size_list)
|
| 1027 |
+
|
| 1028 |
+
path_print = " Complete contraction: %s\n" % overall_contraction
|
| 1029 |
+
path_print += " Naive scaling: %d\n" % len(indices)
|
| 1030 |
+
path_print += " Optimized scaling: %d\n" % max(scale_list)
|
| 1031 |
+
path_print += " Naive FLOP count: %.3e\n" % naive_cost
|
| 1032 |
+
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
|
| 1033 |
+
path_print += " Theoretical speedup: %3.3f\n" % speedup
|
| 1034 |
+
path_print += " Largest intermediate: %.3e elements\n" % max_i
|
| 1035 |
+
path_print += "-" * 74 + "\n"
|
| 1036 |
+
path_print += "%6s %24s %40s\n" % header
|
| 1037 |
+
path_print += "-" * 74
|
| 1038 |
+
|
| 1039 |
+
for n, contraction in enumerate(contraction_list):
|
| 1040 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
| 1041 |
+
remaining_str = ",".join(remaining) + "->" + output_subscript
|
| 1042 |
+
path_run = (scale_list[n], einsum_str, remaining_str)
|
| 1043 |
+
path_print += "\n%4d %24s %40s" % path_run
|
| 1044 |
+
|
| 1045 |
+
path = ['einsum_path'] + path
|
| 1046 |
+
return (path, path_print)
|
| 1047 |
+
|
| 1048 |
+
|
| 1049 |
+
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
|
| 1050 |
+
# Arguably we dispatch on more arguments than we really should; see note in
|
| 1051 |
+
# _einsum_path_dispatcher for why.
|
| 1052 |
+
yield from operands
|
| 1053 |
+
yield out
|
| 1054 |
+
|
| 1055 |
+
|
| 1056 |
+
# Rewrite einsum to handle different cases
|
| 1057 |
+
@array_function_dispatch(_einsum_dispatcher, module='numpy')
|
| 1058 |
+
def einsum(*operands, out=None, optimize=False, **kwargs):
|
| 1059 |
+
"""
|
| 1060 |
+
einsum(subscripts, *operands, out=None, dtype=None, order='K',
|
| 1061 |
+
casting='safe', optimize=False)
|
| 1062 |
+
|
| 1063 |
+
Evaluates the Einstein summation convention on the operands.
|
| 1064 |
+
|
| 1065 |
+
Using the Einstein summation convention, many common multi-dimensional,
|
| 1066 |
+
linear algebraic array operations can be represented in a simple fashion.
|
| 1067 |
+
In *implicit* mode `einsum` computes these values.
|
| 1068 |
+
|
| 1069 |
+
In *explicit* mode, `einsum` provides further flexibility to compute
|
| 1070 |
+
other array operations that might not be considered classical Einstein
|
| 1071 |
+
summation operations, by disabling, or forcing summation over specified
|
| 1072 |
+
subscript labels.
|
| 1073 |
+
|
| 1074 |
+
See the notes and examples for clarification.
|
| 1075 |
+
|
| 1076 |
+
Parameters
|
| 1077 |
+
----------
|
| 1078 |
+
subscripts : str
|
| 1079 |
+
Specifies the subscripts for summation as comma separated list of
|
| 1080 |
+
subscript labels. An implicit (classical Einstein summation)
|
| 1081 |
+
calculation is performed unless the explicit indicator '->' is
|
| 1082 |
+
included as well as subscript labels of the precise output form.
|
| 1083 |
+
operands : list of array_like
|
| 1084 |
+
These are the arrays for the operation.
|
| 1085 |
+
out : ndarray, optional
|
| 1086 |
+
If provided, the calculation is done into this array.
|
| 1087 |
+
dtype : {data-type, None}, optional
|
| 1088 |
+
If provided, forces the calculation to use the data type specified.
|
| 1089 |
+
Note that you may have to also give a more liberal `casting`
|
| 1090 |
+
parameter to allow the conversions. Default is None.
|
| 1091 |
+
order : {'C', 'F', 'A', 'K'}, optional
|
| 1092 |
+
Controls the memory layout of the output. 'C' means it should
|
| 1093 |
+
be C contiguous. 'F' means it should be Fortran contiguous,
|
| 1094 |
+
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
|
| 1095 |
+
'K' means it should be as close to the layout as the inputs as
|
| 1096 |
+
is possible, including arbitrarily permuted axes.
|
| 1097 |
+
Default is 'K'.
|
| 1098 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
| 1099 |
+
Controls what kind of data casting may occur. Setting this to
|
| 1100 |
+
'unsafe' is not recommended, as it can adversely affect accumulations.
|
| 1101 |
+
|
| 1102 |
+
* 'no' means the data types should not be cast at all.
|
| 1103 |
+
* 'equiv' means only byte-order changes are allowed.
|
| 1104 |
+
* 'safe' means only casts which can preserve values are allowed.
|
| 1105 |
+
* 'same_kind' means only safe casts or casts within a kind,
|
| 1106 |
+
like float64 to float32, are allowed.
|
| 1107 |
+
* 'unsafe' means any data conversions may be done.
|
| 1108 |
+
|
| 1109 |
+
Default is 'safe'.
|
| 1110 |
+
optimize : {False, True, 'greedy', 'optimal'}, optional
|
| 1111 |
+
Controls if intermediate optimization should occur. No optimization
|
| 1112 |
+
will occur if False and True will default to the 'greedy' algorithm.
|
| 1113 |
+
Also accepts an explicit contraction list from the ``np.einsum_path``
|
| 1114 |
+
function. See ``np.einsum_path`` for more details. Defaults to False.
|
| 1115 |
+
|
| 1116 |
+
Returns
|
| 1117 |
+
-------
|
| 1118 |
+
output : ndarray
|
| 1119 |
+
The calculation based on the Einstein summation convention.
|
| 1120 |
+
|
| 1121 |
+
See Also
|
| 1122 |
+
--------
|
| 1123 |
+
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
|
| 1124 |
+
einsum:
|
| 1125 |
+
Similar verbose interface is provided by the
|
| 1126 |
+
`einops <https://github.com/arogozhnikov/einops>`_ package to cover
|
| 1127 |
+
additional operations: transpose, reshape/flatten, repeat/tile,
|
| 1128 |
+
squeeze/unsqueeze and reductions.
|
| 1129 |
+
The `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
|
| 1130 |
+
optimizes contraction order for einsum-like expressions
|
| 1131 |
+
in backend-agnostic manner.
|
| 1132 |
+
|
| 1133 |
+
Notes
|
| 1134 |
+
-----
|
| 1135 |
+
The Einstein summation convention can be used to compute
|
| 1136 |
+
many multi-dimensional, linear algebraic array operations. `einsum`
|
| 1137 |
+
provides a succinct way of representing these.
|
| 1138 |
+
|
| 1139 |
+
A non-exhaustive list of these operations,
|
| 1140 |
+
which can be computed by `einsum`, is shown below along with examples:
|
| 1141 |
+
|
| 1142 |
+
* Trace of an array, :py:func:`numpy.trace`.
|
| 1143 |
+
* Return a diagonal, :py:func:`numpy.diag`.
|
| 1144 |
+
* Array axis summations, :py:func:`numpy.sum`.
|
| 1145 |
+
* Transpositions and permutations, :py:func:`numpy.transpose`.
|
| 1146 |
+
* Matrix multiplication and dot product, :py:func:`numpy.matmul`
|
| 1147 |
+
:py:func:`numpy.dot`.
|
| 1148 |
+
* Vector inner and outer products, :py:func:`numpy.inner`
|
| 1149 |
+
:py:func:`numpy.outer`.
|
| 1150 |
+
* Broadcasting, element-wise and scalar multiplication,
|
| 1151 |
+
:py:func:`numpy.multiply`.
|
| 1152 |
+
* Tensor contractions, :py:func:`numpy.tensordot`.
|
| 1153 |
+
* Chained array operations, in efficient calculation order,
|
| 1154 |
+
:py:func:`numpy.einsum_path`.
|
| 1155 |
+
|
| 1156 |
+
The subscripts string is a comma-separated list of subscript labels,
|
| 1157 |
+
where each label refers to a dimension of the corresponding operand.
|
| 1158 |
+
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
|
| 1159 |
+
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
|
| 1160 |
+
appears only once, it is not summed, so ``np.einsum('i', a)``
|
| 1161 |
+
produces a view of ``a`` with no changes. A further example
|
| 1162 |
+
``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication
|
| 1163 |
+
and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`.
|
| 1164 |
+
Repeated subscript labels in one operand take the diagonal.
|
| 1165 |
+
For example, ``np.einsum('ii', a)`` is equivalent to
|
| 1166 |
+
:py:func:`np.trace(a) <numpy.trace>`.
|
| 1167 |
+
|
| 1168 |
+
In *implicit mode*, the chosen subscripts are important
|
| 1169 |
+
since the axes of the output are reordered alphabetically. This
|
| 1170 |
+
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
|
| 1171 |
+
``np.einsum('ji', a)`` takes its transpose. Additionally,
|
| 1172 |
+
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
|
| 1173 |
+
``np.einsum('ij,jh', a, b)`` returns the transpose of the
|
| 1174 |
+
multiplication since subscript 'h' precedes subscript 'i'.
|
| 1175 |
+
|
| 1176 |
+
In *explicit mode* the output can be directly controlled by
|
| 1177 |
+
specifying output subscript labels. This requires the
|
| 1178 |
+
identifier '->' as well as the list of output subscript labels.
|
| 1179 |
+
This feature increases the flexibility of the function since
|
| 1180 |
+
summing can be disabled or forced when required. The call
|
| 1181 |
+
``np.einsum('i->', a)`` is like :py:func:`np.sum(a) <numpy.sum>`
|
| 1182 |
+
if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)``
|
| 1183 |
+
is like :py:func:`np.diag(a) <numpy.diag>` if ``a`` is a square 2-D array.
|
| 1184 |
+
The difference is that `einsum` does not allow broadcasting by default.
|
| 1185 |
+
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
|
| 1186 |
+
order of the output subscript labels and therefore returns matrix
|
| 1187 |
+
multiplication, unlike the example above in implicit mode.
|
| 1188 |
+
|
| 1189 |
+
To enable and control broadcasting, use an ellipsis. Default
|
| 1190 |
+
NumPy-style broadcasting is done by adding an ellipsis
|
| 1191 |
+
to the left of each term, like ``np.einsum('...ii->...i', a)``.
|
| 1192 |
+
``np.einsum('...i->...', a)`` is like
|
| 1193 |
+
:py:func:`np.sum(a, axis=-1) <numpy.sum>` for array ``a`` of any shape.
|
| 1194 |
+
To take the trace along the first and last axes,
|
| 1195 |
+
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
|
| 1196 |
+
product with the left-most indices instead of rightmost, one can do
|
| 1197 |
+
``np.einsum('ij...,jk...->ik...', a, b)``.
|
| 1198 |
+
|
| 1199 |
+
When there is only one operand, no axes are summed, and no output
|
| 1200 |
+
parameter is provided, a view into the operand is returned instead
|
| 1201 |
+
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
|
| 1202 |
+
produces a view (changed in version 1.10.0).
|
| 1203 |
+
|
| 1204 |
+
`einsum` also provides an alternative way to provide the subscripts and
|
| 1205 |
+
operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
|
| 1206 |
+
If the output shape is not provided in this format `einsum` will be
|
| 1207 |
+
calculated in implicit mode, otherwise it will be performed explicitly.
|
| 1208 |
+
The examples below have corresponding `einsum` calls with the two
|
| 1209 |
+
parameter methods.
|
| 1210 |
+
|
| 1211 |
+
Views returned from einsum are now writeable whenever the input array
|
| 1212 |
+
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
|
| 1213 |
+
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
|
| 1214 |
+
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
|
| 1215 |
+
of a 2D array.
|
| 1216 |
+
|
| 1217 |
+
Added the ``optimize`` argument which will optimize the contraction order
|
| 1218 |
+
of an einsum expression. For a contraction with three or more operands
|
| 1219 |
+
this can greatly increase the computational efficiency at the cost of
|
| 1220 |
+
a larger memory footprint during computation.
|
| 1221 |
+
|
| 1222 |
+
Typically a 'greedy' algorithm is applied which empirical tests have shown
|
| 1223 |
+
returns the optimal path in the majority of cases. In some cases 'optimal'
|
| 1224 |
+
will return the superlative path through a more expensive, exhaustive
|
| 1225 |
+
search. For iterative calculations it may be advisable to calculate
|
| 1226 |
+
the optimal path once and reuse that path by supplying it as an argument.
|
| 1227 |
+
An example is given below.
|
| 1228 |
+
|
| 1229 |
+
See :py:func:`numpy.einsum_path` for more details.
|
| 1230 |
+
|
| 1231 |
+
Examples
|
| 1232 |
+
--------
|
| 1233 |
+
>>> a = np.arange(25).reshape(5,5)
|
| 1234 |
+
>>> b = np.arange(5)
|
| 1235 |
+
>>> c = np.arange(6).reshape(2,3)
|
| 1236 |
+
|
| 1237 |
+
Trace of a matrix:
|
| 1238 |
+
|
| 1239 |
+
>>> np.einsum('ii', a)
|
| 1240 |
+
60
|
| 1241 |
+
>>> np.einsum(a, [0,0])
|
| 1242 |
+
60
|
| 1243 |
+
>>> np.trace(a)
|
| 1244 |
+
60
|
| 1245 |
+
|
| 1246 |
+
Extract the diagonal (requires explicit form):
|
| 1247 |
+
|
| 1248 |
+
>>> np.einsum('ii->i', a)
|
| 1249 |
+
array([ 0, 6, 12, 18, 24])
|
| 1250 |
+
>>> np.einsum(a, [0,0], [0])
|
| 1251 |
+
array([ 0, 6, 12, 18, 24])
|
| 1252 |
+
>>> np.diag(a)
|
| 1253 |
+
array([ 0, 6, 12, 18, 24])
|
| 1254 |
+
|
| 1255 |
+
Sum over an axis (requires explicit form):
|
| 1256 |
+
|
| 1257 |
+
>>> np.einsum('ij->i', a)
|
| 1258 |
+
array([ 10, 35, 60, 85, 110])
|
| 1259 |
+
>>> np.einsum(a, [0,1], [0])
|
| 1260 |
+
array([ 10, 35, 60, 85, 110])
|
| 1261 |
+
>>> np.sum(a, axis=1)
|
| 1262 |
+
array([ 10, 35, 60, 85, 110])
|
| 1263 |
+
|
| 1264 |
+
For higher dimensional arrays summing a single axis can be done
|
| 1265 |
+
with ellipsis:
|
| 1266 |
+
|
| 1267 |
+
>>> np.einsum('...j->...', a)
|
| 1268 |
+
array([ 10, 35, 60, 85, 110])
|
| 1269 |
+
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
|
| 1270 |
+
array([ 10, 35, 60, 85, 110])
|
| 1271 |
+
|
| 1272 |
+
Compute a matrix transpose, or reorder any number of axes:
|
| 1273 |
+
|
| 1274 |
+
>>> np.einsum('ji', c)
|
| 1275 |
+
array([[0, 3],
|
| 1276 |
+
[1, 4],
|
| 1277 |
+
[2, 5]])
|
| 1278 |
+
>>> np.einsum('ij->ji', c)
|
| 1279 |
+
array([[0, 3],
|
| 1280 |
+
[1, 4],
|
| 1281 |
+
[2, 5]])
|
| 1282 |
+
>>> np.einsum(c, [1,0])
|
| 1283 |
+
array([[0, 3],
|
| 1284 |
+
[1, 4],
|
| 1285 |
+
[2, 5]])
|
| 1286 |
+
>>> np.transpose(c)
|
| 1287 |
+
array([[0, 3],
|
| 1288 |
+
[1, 4],
|
| 1289 |
+
[2, 5]])
|
| 1290 |
+
|
| 1291 |
+
Vector inner products:
|
| 1292 |
+
|
| 1293 |
+
>>> np.einsum('i,i', b, b)
|
| 1294 |
+
30
|
| 1295 |
+
>>> np.einsum(b, [0], b, [0])
|
| 1296 |
+
30
|
| 1297 |
+
>>> np.inner(b,b)
|
| 1298 |
+
30
|
| 1299 |
+
|
| 1300 |
+
Matrix vector multiplication:
|
| 1301 |
+
|
| 1302 |
+
>>> np.einsum('ij,j', a, b)
|
| 1303 |
+
array([ 30, 80, 130, 180, 230])
|
| 1304 |
+
>>> np.einsum(a, [0,1], b, [1])
|
| 1305 |
+
array([ 30, 80, 130, 180, 230])
|
| 1306 |
+
>>> np.dot(a, b)
|
| 1307 |
+
array([ 30, 80, 130, 180, 230])
|
| 1308 |
+
>>> np.einsum('...j,j', a, b)
|
| 1309 |
+
array([ 30, 80, 130, 180, 230])
|
| 1310 |
+
|
| 1311 |
+
Broadcasting and scalar multiplication:
|
| 1312 |
+
|
| 1313 |
+
>>> np.einsum('..., ...', 3, c)
|
| 1314 |
+
array([[ 0, 3, 6],
|
| 1315 |
+
[ 9, 12, 15]])
|
| 1316 |
+
>>> np.einsum(',ij', 3, c)
|
| 1317 |
+
array([[ 0, 3, 6],
|
| 1318 |
+
[ 9, 12, 15]])
|
| 1319 |
+
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
|
| 1320 |
+
array([[ 0, 3, 6],
|
| 1321 |
+
[ 9, 12, 15]])
|
| 1322 |
+
>>> np.multiply(3, c)
|
| 1323 |
+
array([[ 0, 3, 6],
|
| 1324 |
+
[ 9, 12, 15]])
|
| 1325 |
+
|
| 1326 |
+
Vector outer product:
|
| 1327 |
+
|
| 1328 |
+
>>> np.einsum('i,j', np.arange(2)+1, b)
|
| 1329 |
+
array([[0, 1, 2, 3, 4],
|
| 1330 |
+
[0, 2, 4, 6, 8]])
|
| 1331 |
+
>>> np.einsum(np.arange(2)+1, [0], b, [1])
|
| 1332 |
+
array([[0, 1, 2, 3, 4],
|
| 1333 |
+
[0, 2, 4, 6, 8]])
|
| 1334 |
+
>>> np.outer(np.arange(2)+1, b)
|
| 1335 |
+
array([[0, 1, 2, 3, 4],
|
| 1336 |
+
[0, 2, 4, 6, 8]])
|
| 1337 |
+
|
| 1338 |
+
Tensor contraction:
|
| 1339 |
+
|
| 1340 |
+
>>> a = np.arange(60.).reshape(3,4,5)
|
| 1341 |
+
>>> b = np.arange(24.).reshape(4,3,2)
|
| 1342 |
+
>>> np.einsum('ijk,jil->kl', a, b)
|
| 1343 |
+
array([[4400., 4730.],
|
| 1344 |
+
[4532., 4874.],
|
| 1345 |
+
[4664., 5018.],
|
| 1346 |
+
[4796., 5162.],
|
| 1347 |
+
[4928., 5306.]])
|
| 1348 |
+
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
|
| 1349 |
+
array([[4400., 4730.],
|
| 1350 |
+
[4532., 4874.],
|
| 1351 |
+
[4664., 5018.],
|
| 1352 |
+
[4796., 5162.],
|
| 1353 |
+
[4928., 5306.]])
|
| 1354 |
+
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
|
| 1355 |
+
array([[4400., 4730.],
|
| 1356 |
+
[4532., 4874.],
|
| 1357 |
+
[4664., 5018.],
|
| 1358 |
+
[4796., 5162.],
|
| 1359 |
+
[4928., 5306.]])
|
| 1360 |
+
|
| 1361 |
+
Writeable returned arrays (since version 1.10.0):
|
| 1362 |
+
|
| 1363 |
+
>>> a = np.zeros((3, 3))
|
| 1364 |
+
>>> np.einsum('ii->i', a)[:] = 1
|
| 1365 |
+
>>> a
|
| 1366 |
+
array([[1., 0., 0.],
|
| 1367 |
+
[0., 1., 0.],
|
| 1368 |
+
[0., 0., 1.]])
|
| 1369 |
+
|
| 1370 |
+
Example of ellipsis use:
|
| 1371 |
+
|
| 1372 |
+
>>> a = np.arange(6).reshape((3,2))
|
| 1373 |
+
>>> b = np.arange(12).reshape((4,3))
|
| 1374 |
+
>>> np.einsum('ki,jk->ij', a, b)
|
| 1375 |
+
array([[10, 28, 46, 64],
|
| 1376 |
+
[13, 40, 67, 94]])
|
| 1377 |
+
>>> np.einsum('ki,...k->i...', a, b)
|
| 1378 |
+
array([[10, 28, 46, 64],
|
| 1379 |
+
[13, 40, 67, 94]])
|
| 1380 |
+
>>> np.einsum('k...,jk', a, b)
|
| 1381 |
+
array([[10, 28, 46, 64],
|
| 1382 |
+
[13, 40, 67, 94]])
|
| 1383 |
+
|
| 1384 |
+
Chained array operations. For more complicated contractions, speed ups
|
| 1385 |
+
might be achieved by repeatedly computing a 'greedy' path or pre-computing
|
| 1386 |
+
the 'optimal' path and repeatedly applying it, using an `einsum_path`
|
| 1387 |
+
insertion (since version 1.12.0). Performance improvements can be
|
| 1388 |
+
particularly significant with larger arrays:
|
| 1389 |
+
|
| 1390 |
+
>>> a = np.ones(64).reshape(2,4,8)
|
| 1391 |
+
|
| 1392 |
+
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
|
| 1393 |
+
|
| 1394 |
+
>>> for iteration in range(500):
|
| 1395 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
|
| 1396 |
+
|
| 1397 |
+
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
|
| 1398 |
+
|
| 1399 |
+
>>> for iteration in range(500):
|
| 1400 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
|
| 1401 |
+
... optimize='optimal')
|
| 1402 |
+
|
| 1403 |
+
Greedy `einsum` (faster optimal path approximation): ~160ms
|
| 1404 |
+
|
| 1405 |
+
>>> for iteration in range(500):
|
| 1406 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
|
| 1407 |
+
|
| 1408 |
+
Optimal `einsum` (best usage pattern in some use cases): ~110ms
|
| 1409 |
+
|
| 1410 |
+
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
|
| 1411 |
+
... optimize='optimal')[0]
|
| 1412 |
+
>>> for iteration in range(500):
|
| 1413 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
|
| 1414 |
+
|
| 1415 |
+
"""
|
| 1416 |
+
# Special handling if out is specified
|
| 1417 |
+
specified_out = out is not None
|
| 1418 |
+
|
| 1419 |
+
# If no optimization, run pure einsum
|
| 1420 |
+
if optimize is False:
|
| 1421 |
+
if specified_out:
|
| 1422 |
+
kwargs['out'] = out
|
| 1423 |
+
return c_einsum(*operands, **kwargs)
|
| 1424 |
+
|
| 1425 |
+
# Check the kwargs to avoid a more cryptic error later, without having to
|
| 1426 |
+
# repeat default values here
|
| 1427 |
+
valid_einsum_kwargs = ['dtype', 'order', 'casting']
|
| 1428 |
+
unknown_kwargs = [k for (k, v) in kwargs.items() if
|
| 1429 |
+
k not in valid_einsum_kwargs]
|
| 1430 |
+
if len(unknown_kwargs):
|
| 1431 |
+
raise TypeError("Did not understand the following kwargs: %s"
|
| 1432 |
+
% unknown_kwargs)
|
| 1433 |
+
|
| 1434 |
+
# Build the contraction list and operand
|
| 1435 |
+
operands, contraction_list = einsum_path(*operands, optimize=optimize,
|
| 1436 |
+
einsum_call=True)
|
| 1437 |
+
|
| 1438 |
+
# Handle order kwarg for output array, c_einsum allows mixed case
|
| 1439 |
+
output_order = kwargs.pop('order', 'K')
|
| 1440 |
+
if output_order.upper() == 'A':
|
| 1441 |
+
if all(arr.flags.f_contiguous for arr in operands):
|
| 1442 |
+
output_order = 'F'
|
| 1443 |
+
else:
|
| 1444 |
+
output_order = 'C'
|
| 1445 |
+
|
| 1446 |
+
# Start contraction loop
|
| 1447 |
+
for num, contraction in enumerate(contraction_list):
|
| 1448 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
| 1449 |
+
tmp_operands = [operands.pop(x) for x in inds]
|
| 1450 |
+
|
| 1451 |
+
# Do we need to deal with the output?
|
| 1452 |
+
handle_out = specified_out and ((num + 1) == len(contraction_list))
|
| 1453 |
+
|
| 1454 |
+
# Call tensordot if still possible
|
| 1455 |
+
if blas:
|
| 1456 |
+
# Checks have already been handled
|
| 1457 |
+
input_str, results_index = einsum_str.split('->')
|
| 1458 |
+
input_left, input_right = input_str.split(',')
|
| 1459 |
+
|
| 1460 |
+
tensor_result = input_left + input_right
|
| 1461 |
+
for s in idx_rm:
|
| 1462 |
+
tensor_result = tensor_result.replace(s, "")
|
| 1463 |
+
|
| 1464 |
+
# Find indices to contract over
|
| 1465 |
+
left_pos, right_pos = [], []
|
| 1466 |
+
for s in sorted(idx_rm):
|
| 1467 |
+
left_pos.append(input_left.find(s))
|
| 1468 |
+
right_pos.append(input_right.find(s))
|
| 1469 |
+
|
| 1470 |
+
# Contract!
|
| 1471 |
+
new_view = tensordot(
|
| 1472 |
+
*tmp_operands, axes=(tuple(left_pos), tuple(right_pos))
|
| 1473 |
+
)
|
| 1474 |
+
|
| 1475 |
+
# Build a new view if needed
|
| 1476 |
+
if (tensor_result != results_index) or handle_out:
|
| 1477 |
+
if handle_out:
|
| 1478 |
+
kwargs["out"] = out
|
| 1479 |
+
new_view = c_einsum(
|
| 1480 |
+
tensor_result + '->' + results_index, new_view, **kwargs
|
| 1481 |
+
)
|
| 1482 |
+
|
| 1483 |
+
# Call einsum
|
| 1484 |
+
else:
|
| 1485 |
+
# If out was specified
|
| 1486 |
+
if handle_out:
|
| 1487 |
+
kwargs["out"] = out
|
| 1488 |
+
|
| 1489 |
+
# Do the contraction
|
| 1490 |
+
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
|
| 1491 |
+
|
| 1492 |
+
# Append new items and dereference what we can
|
| 1493 |
+
operands.append(new_view)
|
| 1494 |
+
del tmp_operands, new_view
|
| 1495 |
+
|
| 1496 |
+
if specified_out:
|
| 1497 |
+
return out
|
| 1498 |
+
else:
|
| 1499 |
+
return asanyarray(operands[0], order=output_order)
|
janus/lib/python3.10/site-packages/numpy/_core/einsumfunc.pyi
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
from typing import TypeAlias, TypeVar, Any, overload, Literal
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy import number, _OrderKACF
|
| 6 |
+
from numpy._typing import (
|
| 7 |
+
NDArray,
|
| 8 |
+
_ArrayLikeBool_co,
|
| 9 |
+
_ArrayLikeUInt_co,
|
| 10 |
+
_ArrayLikeInt_co,
|
| 11 |
+
_ArrayLikeFloat_co,
|
| 12 |
+
_ArrayLikeComplex_co,
|
| 13 |
+
_ArrayLikeObject_co,
|
| 14 |
+
_DTypeLikeBool,
|
| 15 |
+
_DTypeLikeUInt,
|
| 16 |
+
_DTypeLikeInt,
|
| 17 |
+
_DTypeLikeFloat,
|
| 18 |
+
_DTypeLikeComplex,
|
| 19 |
+
_DTypeLikeComplex_co,
|
| 20 |
+
_DTypeLikeObject,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
__all__ = ["einsum", "einsum_path"]
|
| 24 |
+
|
| 25 |
+
_ArrayType = TypeVar(
|
| 26 |
+
"_ArrayType",
|
| 27 |
+
bound=NDArray[np.bool | number[Any]],
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None
|
| 31 |
+
_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"]
|
| 32 |
+
_CastingUnsafe: TypeAlias = Literal["unsafe"]
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
# TODO: Properly handle the `casting`-based combinatorics
|
| 36 |
+
# TODO: We need to evaluate the content `__subscripts` in order
|
| 37 |
+
# to identify whether or an array or scalar is returned. At a cursory
|
| 38 |
+
# glance this seems like something that can quite easily be done with
|
| 39 |
+
# a mypy plugin.
|
| 40 |
+
# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
|
| 41 |
+
@overload
|
| 42 |
+
def einsum(
|
| 43 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 44 |
+
/,
|
| 45 |
+
*operands: _ArrayLikeBool_co,
|
| 46 |
+
out: None = ...,
|
| 47 |
+
dtype: None | _DTypeLikeBool = ...,
|
| 48 |
+
order: _OrderKACF = ...,
|
| 49 |
+
casting: _CastingSafe = ...,
|
| 50 |
+
optimize: _OptimizeKind = ...,
|
| 51 |
+
) -> Any: ...
|
| 52 |
+
@overload
|
| 53 |
+
def einsum(
|
| 54 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 55 |
+
/,
|
| 56 |
+
*operands: _ArrayLikeUInt_co,
|
| 57 |
+
out: None = ...,
|
| 58 |
+
dtype: None | _DTypeLikeUInt = ...,
|
| 59 |
+
order: _OrderKACF = ...,
|
| 60 |
+
casting: _CastingSafe = ...,
|
| 61 |
+
optimize: _OptimizeKind = ...,
|
| 62 |
+
) -> Any: ...
|
| 63 |
+
@overload
|
| 64 |
+
def einsum(
|
| 65 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 66 |
+
/,
|
| 67 |
+
*operands: _ArrayLikeInt_co,
|
| 68 |
+
out: None = ...,
|
| 69 |
+
dtype: None | _DTypeLikeInt = ...,
|
| 70 |
+
order: _OrderKACF = ...,
|
| 71 |
+
casting: _CastingSafe = ...,
|
| 72 |
+
optimize: _OptimizeKind = ...,
|
| 73 |
+
) -> Any: ...
|
| 74 |
+
@overload
|
| 75 |
+
def einsum(
|
| 76 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 77 |
+
/,
|
| 78 |
+
*operands: _ArrayLikeFloat_co,
|
| 79 |
+
out: None = ...,
|
| 80 |
+
dtype: None | _DTypeLikeFloat = ...,
|
| 81 |
+
order: _OrderKACF = ...,
|
| 82 |
+
casting: _CastingSafe = ...,
|
| 83 |
+
optimize: _OptimizeKind = ...,
|
| 84 |
+
) -> Any: ...
|
| 85 |
+
@overload
|
| 86 |
+
def einsum(
|
| 87 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 88 |
+
/,
|
| 89 |
+
*operands: _ArrayLikeComplex_co,
|
| 90 |
+
out: None = ...,
|
| 91 |
+
dtype: None | _DTypeLikeComplex = ...,
|
| 92 |
+
order: _OrderKACF = ...,
|
| 93 |
+
casting: _CastingSafe = ...,
|
| 94 |
+
optimize: _OptimizeKind = ...,
|
| 95 |
+
) -> Any: ...
|
| 96 |
+
@overload
|
| 97 |
+
def einsum(
|
| 98 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 99 |
+
/,
|
| 100 |
+
*operands: Any,
|
| 101 |
+
casting: _CastingUnsafe,
|
| 102 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 103 |
+
out: None = ...,
|
| 104 |
+
order: _OrderKACF = ...,
|
| 105 |
+
optimize: _OptimizeKind = ...,
|
| 106 |
+
) -> Any: ...
|
| 107 |
+
@overload
|
| 108 |
+
def einsum(
|
| 109 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 110 |
+
/,
|
| 111 |
+
*operands: _ArrayLikeComplex_co,
|
| 112 |
+
out: _ArrayType,
|
| 113 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 114 |
+
order: _OrderKACF = ...,
|
| 115 |
+
casting: _CastingSafe = ...,
|
| 116 |
+
optimize: _OptimizeKind = ...,
|
| 117 |
+
) -> _ArrayType: ...
|
| 118 |
+
@overload
|
| 119 |
+
def einsum(
|
| 120 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 121 |
+
/,
|
| 122 |
+
*operands: Any,
|
| 123 |
+
out: _ArrayType,
|
| 124 |
+
casting: _CastingUnsafe,
|
| 125 |
+
dtype: None | _DTypeLikeComplex_co = ...,
|
| 126 |
+
order: _OrderKACF = ...,
|
| 127 |
+
optimize: _OptimizeKind = ...,
|
| 128 |
+
) -> _ArrayType: ...
|
| 129 |
+
|
| 130 |
+
@overload
|
| 131 |
+
def einsum(
|
| 132 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 133 |
+
/,
|
| 134 |
+
*operands: _ArrayLikeObject_co,
|
| 135 |
+
out: None = ...,
|
| 136 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 137 |
+
order: _OrderKACF = ...,
|
| 138 |
+
casting: _CastingSafe = ...,
|
| 139 |
+
optimize: _OptimizeKind = ...,
|
| 140 |
+
) -> Any: ...
|
| 141 |
+
@overload
|
| 142 |
+
def einsum(
|
| 143 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 144 |
+
/,
|
| 145 |
+
*operands: Any,
|
| 146 |
+
casting: _CastingUnsafe,
|
| 147 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 148 |
+
out: None = ...,
|
| 149 |
+
order: _OrderKACF = ...,
|
| 150 |
+
optimize: _OptimizeKind = ...,
|
| 151 |
+
) -> Any: ...
|
| 152 |
+
@overload
|
| 153 |
+
def einsum(
|
| 154 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 155 |
+
/,
|
| 156 |
+
*operands: _ArrayLikeObject_co,
|
| 157 |
+
out: _ArrayType,
|
| 158 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 159 |
+
order: _OrderKACF = ...,
|
| 160 |
+
casting: _CastingSafe = ...,
|
| 161 |
+
optimize: _OptimizeKind = ...,
|
| 162 |
+
) -> _ArrayType: ...
|
| 163 |
+
@overload
|
| 164 |
+
def einsum(
|
| 165 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 166 |
+
/,
|
| 167 |
+
*operands: Any,
|
| 168 |
+
out: _ArrayType,
|
| 169 |
+
casting: _CastingUnsafe,
|
| 170 |
+
dtype: None | _DTypeLikeObject = ...,
|
| 171 |
+
order: _OrderKACF = ...,
|
| 172 |
+
optimize: _OptimizeKind = ...,
|
| 173 |
+
) -> _ArrayType: ...
|
| 174 |
+
|
| 175 |
+
# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
|
| 176 |
+
# It is therefore excluded from the signatures below.
|
| 177 |
+
# NOTE: In practice the list consists of a `str` (first element)
|
| 178 |
+
# and a variable number of integer tuples.
|
| 179 |
+
def einsum_path(
|
| 180 |
+
subscripts: str | _ArrayLikeInt_co,
|
| 181 |
+
/,
|
| 182 |
+
*operands: _ArrayLikeComplex_co | _DTypeLikeObject,
|
| 183 |
+
optimize: _OptimizeKind = ...,
|
| 184 |
+
) -> tuple[list[Any], str]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/fromnumeric.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/fromnumeric.pyi
ADDED
|
@@ -0,0 +1,1512 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
from typing import (
|
| 3 |
+
Any,
|
| 4 |
+
Literal,
|
| 5 |
+
NoReturn,
|
| 6 |
+
Protocol,
|
| 7 |
+
SupportsIndex,
|
| 8 |
+
TypeAlias,
|
| 9 |
+
TypeVar,
|
| 10 |
+
overload,
|
| 11 |
+
type_check_only,
|
| 12 |
+
)
|
| 13 |
+
from typing_extensions import Never, deprecated
|
| 14 |
+
|
| 15 |
+
import numpy as np
|
| 16 |
+
from numpy import (
|
| 17 |
+
number,
|
| 18 |
+
uint64,
|
| 19 |
+
int_,
|
| 20 |
+
int64,
|
| 21 |
+
intp,
|
| 22 |
+
float16,
|
| 23 |
+
floating,
|
| 24 |
+
complexfloating,
|
| 25 |
+
timedelta64,
|
| 26 |
+
object_,
|
| 27 |
+
generic,
|
| 28 |
+
_AnyShapeType,
|
| 29 |
+
_OrderKACF,
|
| 30 |
+
_OrderACF,
|
| 31 |
+
_ModeKind,
|
| 32 |
+
_PartitionKind,
|
| 33 |
+
_SortKind,
|
| 34 |
+
_SortSide,
|
| 35 |
+
_CastingKind,
|
| 36 |
+
)
|
| 37 |
+
from numpy._typing import (
|
| 38 |
+
DTypeLike,
|
| 39 |
+
_DTypeLike,
|
| 40 |
+
ArrayLike,
|
| 41 |
+
_ArrayLike,
|
| 42 |
+
NDArray,
|
| 43 |
+
_NestedSequence,
|
| 44 |
+
_ShapeLike,
|
| 45 |
+
_ArrayLikeBool_co,
|
| 46 |
+
_ArrayLikeUInt_co,
|
| 47 |
+
_ArrayLikeInt_co,
|
| 48 |
+
_ArrayLikeFloat_co,
|
| 49 |
+
_ArrayLikeComplex_co,
|
| 50 |
+
_ArrayLikeObject_co,
|
| 51 |
+
_ArrayLikeTD64_co,
|
| 52 |
+
_IntLike_co,
|
| 53 |
+
_BoolLike_co,
|
| 54 |
+
_ComplexLike_co,
|
| 55 |
+
_NumberLike_co,
|
| 56 |
+
_ScalarLike_co,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
__all__ = [
|
| 60 |
+
"all",
|
| 61 |
+
"amax",
|
| 62 |
+
"amin",
|
| 63 |
+
"any",
|
| 64 |
+
"argmax",
|
| 65 |
+
"argmin",
|
| 66 |
+
"argpartition",
|
| 67 |
+
"argsort",
|
| 68 |
+
"around",
|
| 69 |
+
"choose",
|
| 70 |
+
"clip",
|
| 71 |
+
"compress",
|
| 72 |
+
"cumprod",
|
| 73 |
+
"cumsum",
|
| 74 |
+
"cumulative_prod",
|
| 75 |
+
"cumulative_sum",
|
| 76 |
+
"diagonal",
|
| 77 |
+
"mean",
|
| 78 |
+
"max",
|
| 79 |
+
"min",
|
| 80 |
+
"matrix_transpose",
|
| 81 |
+
"ndim",
|
| 82 |
+
"nonzero",
|
| 83 |
+
"partition",
|
| 84 |
+
"prod",
|
| 85 |
+
"ptp",
|
| 86 |
+
"put",
|
| 87 |
+
"ravel",
|
| 88 |
+
"repeat",
|
| 89 |
+
"reshape",
|
| 90 |
+
"resize",
|
| 91 |
+
"round",
|
| 92 |
+
"searchsorted",
|
| 93 |
+
"shape",
|
| 94 |
+
"size",
|
| 95 |
+
"sort",
|
| 96 |
+
"squeeze",
|
| 97 |
+
"std",
|
| 98 |
+
"sum",
|
| 99 |
+
"swapaxes",
|
| 100 |
+
"take",
|
| 101 |
+
"trace",
|
| 102 |
+
"transpose",
|
| 103 |
+
"var",
|
| 104 |
+
]
|
| 105 |
+
|
| 106 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 107 |
+
_SCT_uifcO = TypeVar("_SCT_uifcO", bound=number[Any] | object_)
|
| 108 |
+
_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any])
|
| 109 |
+
_SizeType = TypeVar("_SizeType", bound=int)
|
| 110 |
+
_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...])
|
| 111 |
+
_ShapeType_co = TypeVar("_ShapeType_co", bound=tuple[int, ...], covariant=True)
|
| 112 |
+
|
| 113 |
+
@type_check_only
|
| 114 |
+
class _SupportsShape(Protocol[_ShapeType_co]):
|
| 115 |
+
# NOTE: it matters that `self` is positional only
|
| 116 |
+
@property
|
| 117 |
+
def shape(self, /) -> _ShapeType_co: ...
|
| 118 |
+
|
| 119 |
+
# a "sequence" that isn't a string, bytes, bytearray, or memoryview
|
| 120 |
+
_T = TypeVar("_T")
|
| 121 |
+
_PyArray: TypeAlias = list[_T] | tuple[_T, ...]
|
| 122 |
+
# `int` also covers `bool`
|
| 123 |
+
_PyScalar: TypeAlias = int | float | complex | bytes | str
|
| 124 |
+
|
| 125 |
+
@overload
|
| 126 |
+
def take(
|
| 127 |
+
a: _ArrayLike[_SCT],
|
| 128 |
+
indices: _IntLike_co,
|
| 129 |
+
axis: None = ...,
|
| 130 |
+
out: None = ...,
|
| 131 |
+
mode: _ModeKind = ...,
|
| 132 |
+
) -> _SCT: ...
|
| 133 |
+
@overload
|
| 134 |
+
def take(
|
| 135 |
+
a: ArrayLike,
|
| 136 |
+
indices: _IntLike_co,
|
| 137 |
+
axis: None | SupportsIndex = ...,
|
| 138 |
+
out: None = ...,
|
| 139 |
+
mode: _ModeKind = ...,
|
| 140 |
+
) -> Any: ...
|
| 141 |
+
@overload
|
| 142 |
+
def take(
|
| 143 |
+
a: _ArrayLike[_SCT],
|
| 144 |
+
indices: _ArrayLikeInt_co,
|
| 145 |
+
axis: None | SupportsIndex = ...,
|
| 146 |
+
out: None = ...,
|
| 147 |
+
mode: _ModeKind = ...,
|
| 148 |
+
) -> NDArray[_SCT]: ...
|
| 149 |
+
@overload
|
| 150 |
+
def take(
|
| 151 |
+
a: ArrayLike,
|
| 152 |
+
indices: _ArrayLikeInt_co,
|
| 153 |
+
axis: None | SupportsIndex = ...,
|
| 154 |
+
out: None = ...,
|
| 155 |
+
mode: _ModeKind = ...,
|
| 156 |
+
) -> NDArray[Any]: ...
|
| 157 |
+
@overload
|
| 158 |
+
def take(
|
| 159 |
+
a: ArrayLike,
|
| 160 |
+
indices: _ArrayLikeInt_co,
|
| 161 |
+
axis: None | SupportsIndex = ...,
|
| 162 |
+
out: _ArrayType = ...,
|
| 163 |
+
mode: _ModeKind = ...,
|
| 164 |
+
) -> _ArrayType: ...
|
| 165 |
+
|
| 166 |
+
@overload
|
| 167 |
+
def reshape( # shape: index
|
| 168 |
+
a: _ArrayLike[_SCT],
|
| 169 |
+
/,
|
| 170 |
+
shape: SupportsIndex,
|
| 171 |
+
order: _OrderACF = "C",
|
| 172 |
+
*,
|
| 173 |
+
copy: bool | None = None,
|
| 174 |
+
) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ...
|
| 175 |
+
@overload
|
| 176 |
+
def reshape( # shape: (int, ...) @ _AnyShapeType
|
| 177 |
+
a: _ArrayLike[_SCT],
|
| 178 |
+
/,
|
| 179 |
+
shape: _AnyShapeType,
|
| 180 |
+
order: _OrderACF = "C",
|
| 181 |
+
*,
|
| 182 |
+
copy: bool | None = None,
|
| 183 |
+
) -> np.ndarray[_AnyShapeType, np.dtype[_SCT]]: ...
|
| 184 |
+
@overload # shape: Sequence[index]
|
| 185 |
+
def reshape(
|
| 186 |
+
a: _ArrayLike[_SCT],
|
| 187 |
+
/,
|
| 188 |
+
shape: Sequence[SupportsIndex],
|
| 189 |
+
order: _OrderACF = "C",
|
| 190 |
+
*,
|
| 191 |
+
copy: bool | None = None,
|
| 192 |
+
) -> NDArray[_SCT]: ...
|
| 193 |
+
@overload # shape: index
|
| 194 |
+
def reshape(
|
| 195 |
+
a: ArrayLike,
|
| 196 |
+
/,
|
| 197 |
+
shape: SupportsIndex,
|
| 198 |
+
order: _OrderACF = "C",
|
| 199 |
+
*,
|
| 200 |
+
copy: bool | None = None,
|
| 201 |
+
) -> np.ndarray[tuple[int], np.dtype[Any]]: ...
|
| 202 |
+
@overload
|
| 203 |
+
def reshape( # shape: (int, ...) @ _AnyShapeType
|
| 204 |
+
a: ArrayLike,
|
| 205 |
+
/,
|
| 206 |
+
shape: _AnyShapeType,
|
| 207 |
+
order: _OrderACF = "C",
|
| 208 |
+
*,
|
| 209 |
+
copy: bool | None = None,
|
| 210 |
+
) -> np.ndarray[_AnyShapeType, np.dtype[Any]]: ...
|
| 211 |
+
@overload # shape: Sequence[index]
|
| 212 |
+
def reshape(
|
| 213 |
+
a: ArrayLike,
|
| 214 |
+
/,
|
| 215 |
+
shape: Sequence[SupportsIndex],
|
| 216 |
+
order: _OrderACF = "C",
|
| 217 |
+
*,
|
| 218 |
+
copy: bool | None = None,
|
| 219 |
+
) -> NDArray[Any]: ...
|
| 220 |
+
@overload
|
| 221 |
+
@deprecated(
|
| 222 |
+
"`newshape` keyword argument is deprecated, "
|
| 223 |
+
"use `shape=...` or pass shape positionally instead. "
|
| 224 |
+
"(deprecated in NumPy 2.1)",
|
| 225 |
+
)
|
| 226 |
+
def reshape(
|
| 227 |
+
a: ArrayLike,
|
| 228 |
+
/,
|
| 229 |
+
shape: None = None,
|
| 230 |
+
order: _OrderACF = "C",
|
| 231 |
+
*,
|
| 232 |
+
newshape: _ShapeLike,
|
| 233 |
+
copy: bool | None = None,
|
| 234 |
+
) -> NDArray[Any]: ...
|
| 235 |
+
|
| 236 |
+
@overload
|
| 237 |
+
def choose(
|
| 238 |
+
a: _IntLike_co,
|
| 239 |
+
choices: ArrayLike,
|
| 240 |
+
out: None = ...,
|
| 241 |
+
mode: _ModeKind = ...,
|
| 242 |
+
) -> Any: ...
|
| 243 |
+
@overload
|
| 244 |
+
def choose(
|
| 245 |
+
a: _ArrayLikeInt_co,
|
| 246 |
+
choices: _ArrayLike[_SCT],
|
| 247 |
+
out: None = ...,
|
| 248 |
+
mode: _ModeKind = ...,
|
| 249 |
+
) -> NDArray[_SCT]: ...
|
| 250 |
+
@overload
|
| 251 |
+
def choose(
|
| 252 |
+
a: _ArrayLikeInt_co,
|
| 253 |
+
choices: ArrayLike,
|
| 254 |
+
out: None = ...,
|
| 255 |
+
mode: _ModeKind = ...,
|
| 256 |
+
) -> NDArray[Any]: ...
|
| 257 |
+
@overload
|
| 258 |
+
def choose(
|
| 259 |
+
a: _ArrayLikeInt_co,
|
| 260 |
+
choices: ArrayLike,
|
| 261 |
+
out: _ArrayType = ...,
|
| 262 |
+
mode: _ModeKind = ...,
|
| 263 |
+
) -> _ArrayType: ...
|
| 264 |
+
|
| 265 |
+
@overload
|
| 266 |
+
def repeat(
|
| 267 |
+
a: _ArrayLike[_SCT],
|
| 268 |
+
repeats: _ArrayLikeInt_co,
|
| 269 |
+
axis: None | SupportsIndex = ...,
|
| 270 |
+
) -> NDArray[_SCT]: ...
|
| 271 |
+
@overload
|
| 272 |
+
def repeat(
|
| 273 |
+
a: ArrayLike,
|
| 274 |
+
repeats: _ArrayLikeInt_co,
|
| 275 |
+
axis: None | SupportsIndex = ...,
|
| 276 |
+
) -> NDArray[Any]: ...
|
| 277 |
+
|
| 278 |
+
def put(
|
| 279 |
+
a: NDArray[Any],
|
| 280 |
+
ind: _ArrayLikeInt_co,
|
| 281 |
+
v: ArrayLike,
|
| 282 |
+
mode: _ModeKind = ...,
|
| 283 |
+
) -> None: ...
|
| 284 |
+
|
| 285 |
+
@overload
|
| 286 |
+
def swapaxes(
|
| 287 |
+
a: _ArrayLike[_SCT],
|
| 288 |
+
axis1: SupportsIndex,
|
| 289 |
+
axis2: SupportsIndex,
|
| 290 |
+
) -> NDArray[_SCT]: ...
|
| 291 |
+
@overload
|
| 292 |
+
def swapaxes(
|
| 293 |
+
a: ArrayLike,
|
| 294 |
+
axis1: SupportsIndex,
|
| 295 |
+
axis2: SupportsIndex,
|
| 296 |
+
) -> NDArray[Any]: ...
|
| 297 |
+
|
| 298 |
+
@overload
|
| 299 |
+
def transpose(
|
| 300 |
+
a: _ArrayLike[_SCT],
|
| 301 |
+
axes: None | _ShapeLike = ...
|
| 302 |
+
) -> NDArray[_SCT]: ...
|
| 303 |
+
@overload
|
| 304 |
+
def transpose(
|
| 305 |
+
a: ArrayLike,
|
| 306 |
+
axes: None | _ShapeLike = ...
|
| 307 |
+
) -> NDArray[Any]: ...
|
| 308 |
+
|
| 309 |
+
@overload
|
| 310 |
+
def matrix_transpose(x: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
|
| 311 |
+
@overload
|
| 312 |
+
def matrix_transpose(x: ArrayLike) -> NDArray[Any]: ...
|
| 313 |
+
|
| 314 |
+
@overload
|
| 315 |
+
def partition(
|
| 316 |
+
a: _ArrayLike[_SCT],
|
| 317 |
+
kth: _ArrayLikeInt_co,
|
| 318 |
+
axis: None | SupportsIndex = ...,
|
| 319 |
+
kind: _PartitionKind = ...,
|
| 320 |
+
order: None | str | Sequence[str] = ...,
|
| 321 |
+
) -> NDArray[_SCT]: ...
|
| 322 |
+
@overload
|
| 323 |
+
def partition(
|
| 324 |
+
a: ArrayLike,
|
| 325 |
+
kth: _ArrayLikeInt_co,
|
| 326 |
+
axis: None | SupportsIndex = ...,
|
| 327 |
+
kind: _PartitionKind = ...,
|
| 328 |
+
order: None | str | Sequence[str] = ...,
|
| 329 |
+
) -> NDArray[Any]: ...
|
| 330 |
+
|
| 331 |
+
def argpartition(
|
| 332 |
+
a: ArrayLike,
|
| 333 |
+
kth: _ArrayLikeInt_co,
|
| 334 |
+
axis: None | SupportsIndex = ...,
|
| 335 |
+
kind: _PartitionKind = ...,
|
| 336 |
+
order: None | str | Sequence[str] = ...,
|
| 337 |
+
) -> NDArray[intp]: ...
|
| 338 |
+
|
| 339 |
+
@overload
|
| 340 |
+
def sort(
|
| 341 |
+
a: _ArrayLike[_SCT],
|
| 342 |
+
axis: None | SupportsIndex = ...,
|
| 343 |
+
kind: None | _SortKind = ...,
|
| 344 |
+
order: None | str | Sequence[str] = ...,
|
| 345 |
+
*,
|
| 346 |
+
stable: None | bool = ...,
|
| 347 |
+
) -> NDArray[_SCT]: ...
|
| 348 |
+
@overload
|
| 349 |
+
def sort(
|
| 350 |
+
a: ArrayLike,
|
| 351 |
+
axis: None | SupportsIndex = ...,
|
| 352 |
+
kind: None | _SortKind = ...,
|
| 353 |
+
order: None | str | Sequence[str] = ...,
|
| 354 |
+
*,
|
| 355 |
+
stable: None | bool = ...,
|
| 356 |
+
) -> NDArray[Any]: ...
|
| 357 |
+
|
| 358 |
+
def argsort(
|
| 359 |
+
a: ArrayLike,
|
| 360 |
+
axis: None | SupportsIndex = ...,
|
| 361 |
+
kind: None | _SortKind = ...,
|
| 362 |
+
order: None | str | Sequence[str] = ...,
|
| 363 |
+
*,
|
| 364 |
+
stable: None | bool = ...,
|
| 365 |
+
) -> NDArray[intp]: ...
|
| 366 |
+
|
| 367 |
+
@overload
|
| 368 |
+
def argmax(
|
| 369 |
+
a: ArrayLike,
|
| 370 |
+
axis: None = ...,
|
| 371 |
+
out: None = ...,
|
| 372 |
+
*,
|
| 373 |
+
keepdims: Literal[False] = ...,
|
| 374 |
+
) -> intp: ...
|
| 375 |
+
@overload
|
| 376 |
+
def argmax(
|
| 377 |
+
a: ArrayLike,
|
| 378 |
+
axis: None | SupportsIndex = ...,
|
| 379 |
+
out: None = ...,
|
| 380 |
+
*,
|
| 381 |
+
keepdims: bool = ...,
|
| 382 |
+
) -> Any: ...
|
| 383 |
+
@overload
|
| 384 |
+
def argmax(
|
| 385 |
+
a: ArrayLike,
|
| 386 |
+
axis: None | SupportsIndex = ...,
|
| 387 |
+
out: _ArrayType = ...,
|
| 388 |
+
*,
|
| 389 |
+
keepdims: bool = ...,
|
| 390 |
+
) -> _ArrayType: ...
|
| 391 |
+
|
| 392 |
+
@overload
|
| 393 |
+
def argmin(
|
| 394 |
+
a: ArrayLike,
|
| 395 |
+
axis: None = ...,
|
| 396 |
+
out: None = ...,
|
| 397 |
+
*,
|
| 398 |
+
keepdims: Literal[False] = ...,
|
| 399 |
+
) -> intp: ...
|
| 400 |
+
@overload
|
| 401 |
+
def argmin(
|
| 402 |
+
a: ArrayLike,
|
| 403 |
+
axis: None | SupportsIndex = ...,
|
| 404 |
+
out: None = ...,
|
| 405 |
+
*,
|
| 406 |
+
keepdims: bool = ...,
|
| 407 |
+
) -> Any: ...
|
| 408 |
+
@overload
|
| 409 |
+
def argmin(
|
| 410 |
+
a: ArrayLike,
|
| 411 |
+
axis: None | SupportsIndex = ...,
|
| 412 |
+
out: _ArrayType = ...,
|
| 413 |
+
*,
|
| 414 |
+
keepdims: bool = ...,
|
| 415 |
+
) -> _ArrayType: ...
|
| 416 |
+
|
| 417 |
+
@overload
|
| 418 |
+
def searchsorted(
|
| 419 |
+
a: ArrayLike,
|
| 420 |
+
v: _ScalarLike_co,
|
| 421 |
+
side: _SortSide = ...,
|
| 422 |
+
sorter: None | _ArrayLikeInt_co = ..., # 1D int array
|
| 423 |
+
) -> intp: ...
|
| 424 |
+
@overload
|
| 425 |
+
def searchsorted(
|
| 426 |
+
a: ArrayLike,
|
| 427 |
+
v: ArrayLike,
|
| 428 |
+
side: _SortSide = ...,
|
| 429 |
+
sorter: None | _ArrayLikeInt_co = ..., # 1D int array
|
| 430 |
+
) -> NDArray[intp]: ...
|
| 431 |
+
|
| 432 |
+
# unlike `reshape`, `resize` only accepts positive integers, so literal ints can be used
|
| 433 |
+
@overload
|
| 434 |
+
def resize(a: _ArrayLike[_SCT], new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[_SCT]]: ...
|
| 435 |
+
@overload
|
| 436 |
+
def resize(a: _ArrayLike[_SCT], new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[_SCT]]: ...
|
| 437 |
+
@overload
|
| 438 |
+
def resize(a: _ArrayLike[_SCT], new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[_SCT]]: ...
|
| 439 |
+
@overload
|
| 440 |
+
def resize(a: _ArrayLike[_SCT], new_shape: Sequence[SupportsIndex]) -> NDArray[_SCT]: ...
|
| 441 |
+
@overload
|
| 442 |
+
def resize(a: ArrayLike, new_shape: _SizeType) -> np.ndarray[tuple[_SizeType], np.dtype[Any]]: ...
|
| 443 |
+
@overload
|
| 444 |
+
def resize(a: ArrayLike, new_shape: SupportsIndex) -> np.ndarray[tuple[int], np.dtype[Any]]: ...
|
| 445 |
+
@overload
|
| 446 |
+
def resize(a: ArrayLike, new_shape: _ShapeType) -> np.ndarray[_ShapeType, np.dtype[Any]]: ...
|
| 447 |
+
@overload
|
| 448 |
+
def resize(a: ArrayLike, new_shape: Sequence[SupportsIndex]) -> NDArray[Any]: ...
|
| 449 |
+
|
| 450 |
+
@overload
|
| 451 |
+
def squeeze(
|
| 452 |
+
a: _SCT,
|
| 453 |
+
axis: None | _ShapeLike = ...,
|
| 454 |
+
) -> _SCT: ...
|
| 455 |
+
@overload
|
| 456 |
+
def squeeze(
|
| 457 |
+
a: _ArrayLike[_SCT],
|
| 458 |
+
axis: None | _ShapeLike = ...,
|
| 459 |
+
) -> NDArray[_SCT]: ...
|
| 460 |
+
@overload
|
| 461 |
+
def squeeze(
|
| 462 |
+
a: ArrayLike,
|
| 463 |
+
axis: None | _ShapeLike = ...,
|
| 464 |
+
) -> NDArray[Any]: ...
|
| 465 |
+
|
| 466 |
+
@overload
|
| 467 |
+
def diagonal(
|
| 468 |
+
a: _ArrayLike[_SCT],
|
| 469 |
+
offset: SupportsIndex = ...,
|
| 470 |
+
axis1: SupportsIndex = ...,
|
| 471 |
+
axis2: SupportsIndex = ..., # >= 2D array
|
| 472 |
+
) -> NDArray[_SCT]: ...
|
| 473 |
+
@overload
|
| 474 |
+
def diagonal(
|
| 475 |
+
a: ArrayLike,
|
| 476 |
+
offset: SupportsIndex = ...,
|
| 477 |
+
axis1: SupportsIndex = ...,
|
| 478 |
+
axis2: SupportsIndex = ..., # >= 2D array
|
| 479 |
+
) -> NDArray[Any]: ...
|
| 480 |
+
|
| 481 |
+
@overload
|
| 482 |
+
def trace(
|
| 483 |
+
a: ArrayLike, # >= 2D array
|
| 484 |
+
offset: SupportsIndex = ...,
|
| 485 |
+
axis1: SupportsIndex = ...,
|
| 486 |
+
axis2: SupportsIndex = ...,
|
| 487 |
+
dtype: DTypeLike = ...,
|
| 488 |
+
out: None = ...,
|
| 489 |
+
) -> Any: ...
|
| 490 |
+
@overload
|
| 491 |
+
def trace(
|
| 492 |
+
a: ArrayLike, # >= 2D array
|
| 493 |
+
offset: SupportsIndex = ...,
|
| 494 |
+
axis1: SupportsIndex = ...,
|
| 495 |
+
axis2: SupportsIndex = ...,
|
| 496 |
+
dtype: DTypeLike = ...,
|
| 497 |
+
out: _ArrayType = ...,
|
| 498 |
+
) -> _ArrayType: ...
|
| 499 |
+
|
| 500 |
+
_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_SCT]]
|
| 501 |
+
|
| 502 |
+
@overload
|
| 503 |
+
def ravel(a: _ArrayLike[_SCT], order: _OrderKACF = "C") -> _Array1D[_SCT]: ...
|
| 504 |
+
@overload
|
| 505 |
+
def ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ...
|
| 506 |
+
@overload
|
| 507 |
+
def ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ...
|
| 508 |
+
@overload
|
| 509 |
+
def ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ...
|
| 510 |
+
@overload
|
| 511 |
+
def ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ...
|
| 512 |
+
@overload
|
| 513 |
+
def ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ...
|
| 514 |
+
@overload
|
| 515 |
+
def ravel(
|
| 516 |
+
a: complex | _NestedSequence[complex],
|
| 517 |
+
order: _OrderKACF = "C",
|
| 518 |
+
) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ...
|
| 519 |
+
@overload
|
| 520 |
+
def ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype[Any]]: ...
|
| 521 |
+
|
| 522 |
+
@overload
|
| 523 |
+
def nonzero(a: np.generic | np.ndarray[tuple[()], Any]) -> NoReturn: ...
|
| 524 |
+
@overload
|
| 525 |
+
def nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ...
|
| 526 |
+
|
| 527 |
+
# this prevents `Any` from being returned with Pyright
|
| 528 |
+
@overload
|
| 529 |
+
def shape(a: _SupportsShape[Never]) -> tuple[int, ...]: ...
|
| 530 |
+
@overload
|
| 531 |
+
def shape(a: _SupportsShape[_ShapeType]) -> _ShapeType: ...
|
| 532 |
+
@overload
|
| 533 |
+
def shape(a: _PyScalar) -> tuple[()]: ...
|
| 534 |
+
# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are
|
| 535 |
+
# subtypes of it, which would make the return types incompatible.
|
| 536 |
+
@overload
|
| 537 |
+
def shape(a: _PyArray[_PyScalar]) -> tuple[int]: ...
|
| 538 |
+
@overload
|
| 539 |
+
def shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ...
|
| 540 |
+
# this overload will be skipped by typecheckers that don't support PEP 688
|
| 541 |
+
@overload
|
| 542 |
+
def shape(a: memoryview | bytearray) -> tuple[int]: ...
|
| 543 |
+
@overload
|
| 544 |
+
def shape(a: ArrayLike) -> tuple[int, ...]: ...
|
| 545 |
+
|
| 546 |
+
@overload
|
| 547 |
+
def compress(
|
| 548 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 549 |
+
a: _ArrayLike[_SCT],
|
| 550 |
+
axis: None | SupportsIndex = ...,
|
| 551 |
+
out: None = ...,
|
| 552 |
+
) -> NDArray[_SCT]: ...
|
| 553 |
+
@overload
|
| 554 |
+
def compress(
|
| 555 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 556 |
+
a: ArrayLike,
|
| 557 |
+
axis: None | SupportsIndex = ...,
|
| 558 |
+
out: None = ...,
|
| 559 |
+
) -> NDArray[Any]: ...
|
| 560 |
+
@overload
|
| 561 |
+
def compress(
|
| 562 |
+
condition: _ArrayLikeBool_co, # 1D bool array
|
| 563 |
+
a: ArrayLike,
|
| 564 |
+
axis: None | SupportsIndex = ...,
|
| 565 |
+
out: _ArrayType = ...,
|
| 566 |
+
) -> _ArrayType: ...
|
| 567 |
+
|
| 568 |
+
@overload
|
| 569 |
+
def clip(
|
| 570 |
+
a: _SCT,
|
| 571 |
+
a_min: None | ArrayLike,
|
| 572 |
+
a_max: None | ArrayLike,
|
| 573 |
+
out: None = ...,
|
| 574 |
+
*,
|
| 575 |
+
min: None | ArrayLike = ...,
|
| 576 |
+
max: None | ArrayLike = ...,
|
| 577 |
+
dtype: None = ...,
|
| 578 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 579 |
+
order: _OrderKACF = ...,
|
| 580 |
+
subok: bool = ...,
|
| 581 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 582 |
+
casting: _CastingKind = ...,
|
| 583 |
+
) -> _SCT: ...
|
| 584 |
+
@overload
|
| 585 |
+
def clip(
|
| 586 |
+
a: _ScalarLike_co,
|
| 587 |
+
a_min: None | ArrayLike,
|
| 588 |
+
a_max: None | ArrayLike,
|
| 589 |
+
out: None = ...,
|
| 590 |
+
*,
|
| 591 |
+
min: None | ArrayLike = ...,
|
| 592 |
+
max: None | ArrayLike = ...,
|
| 593 |
+
dtype: None = ...,
|
| 594 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 595 |
+
order: _OrderKACF = ...,
|
| 596 |
+
subok: bool = ...,
|
| 597 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 598 |
+
casting: _CastingKind = ...,
|
| 599 |
+
) -> Any: ...
|
| 600 |
+
@overload
|
| 601 |
+
def clip(
|
| 602 |
+
a: _ArrayLike[_SCT],
|
| 603 |
+
a_min: None | ArrayLike,
|
| 604 |
+
a_max: None | ArrayLike,
|
| 605 |
+
out: None = ...,
|
| 606 |
+
*,
|
| 607 |
+
min: None | ArrayLike = ...,
|
| 608 |
+
max: None | ArrayLike = ...,
|
| 609 |
+
dtype: None = ...,
|
| 610 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 611 |
+
order: _OrderKACF = ...,
|
| 612 |
+
subok: bool = ...,
|
| 613 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 614 |
+
casting: _CastingKind = ...,
|
| 615 |
+
) -> NDArray[_SCT]: ...
|
| 616 |
+
@overload
|
| 617 |
+
def clip(
|
| 618 |
+
a: ArrayLike,
|
| 619 |
+
a_min: None | ArrayLike,
|
| 620 |
+
a_max: None | ArrayLike,
|
| 621 |
+
out: None = ...,
|
| 622 |
+
*,
|
| 623 |
+
min: None | ArrayLike = ...,
|
| 624 |
+
max: None | ArrayLike = ...,
|
| 625 |
+
dtype: None = ...,
|
| 626 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 627 |
+
order: _OrderKACF = ...,
|
| 628 |
+
subok: bool = ...,
|
| 629 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 630 |
+
casting: _CastingKind = ...,
|
| 631 |
+
) -> NDArray[Any]: ...
|
| 632 |
+
@overload
|
| 633 |
+
def clip(
|
| 634 |
+
a: ArrayLike,
|
| 635 |
+
a_min: None | ArrayLike,
|
| 636 |
+
a_max: None | ArrayLike,
|
| 637 |
+
out: _ArrayType = ...,
|
| 638 |
+
*,
|
| 639 |
+
min: None | ArrayLike = ...,
|
| 640 |
+
max: None | ArrayLike = ...,
|
| 641 |
+
dtype: DTypeLike,
|
| 642 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 643 |
+
order: _OrderKACF = ...,
|
| 644 |
+
subok: bool = ...,
|
| 645 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 646 |
+
casting: _CastingKind = ...,
|
| 647 |
+
) -> Any: ...
|
| 648 |
+
@overload
|
| 649 |
+
def clip(
|
| 650 |
+
a: ArrayLike,
|
| 651 |
+
a_min: None | ArrayLike,
|
| 652 |
+
a_max: None | ArrayLike,
|
| 653 |
+
out: _ArrayType,
|
| 654 |
+
*,
|
| 655 |
+
min: None | ArrayLike = ...,
|
| 656 |
+
max: None | ArrayLike = ...,
|
| 657 |
+
dtype: DTypeLike = ...,
|
| 658 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 659 |
+
order: _OrderKACF = ...,
|
| 660 |
+
subok: bool = ...,
|
| 661 |
+
signature: str | tuple[None | str, ...] = ...,
|
| 662 |
+
casting: _CastingKind = ...,
|
| 663 |
+
) -> _ArrayType: ...
|
| 664 |
+
|
| 665 |
+
@overload
|
| 666 |
+
def sum(
|
| 667 |
+
a: _ArrayLike[_SCT],
|
| 668 |
+
axis: None = ...,
|
| 669 |
+
dtype: None = ...,
|
| 670 |
+
out: None = ...,
|
| 671 |
+
keepdims: Literal[False] = ...,
|
| 672 |
+
initial: _NumberLike_co = ...,
|
| 673 |
+
where: _ArrayLikeBool_co = ...,
|
| 674 |
+
) -> _SCT: ...
|
| 675 |
+
@overload
|
| 676 |
+
def sum(
|
| 677 |
+
a: _ArrayLike[_SCT],
|
| 678 |
+
axis: None = ...,
|
| 679 |
+
dtype: None = ...,
|
| 680 |
+
out: None = ...,
|
| 681 |
+
keepdims: bool = ...,
|
| 682 |
+
initial: _NumberLike_co = ...,
|
| 683 |
+
where: _ArrayLikeBool_co = ...,
|
| 684 |
+
) -> _SCT | NDArray[_SCT]: ...
|
| 685 |
+
@overload
|
| 686 |
+
def sum(
|
| 687 |
+
a: ArrayLike,
|
| 688 |
+
axis: None,
|
| 689 |
+
dtype: _DTypeLike[_SCT],
|
| 690 |
+
out: None = ...,
|
| 691 |
+
keepdims: Literal[False] = ...,
|
| 692 |
+
initial: _NumberLike_co = ...,
|
| 693 |
+
where: _ArrayLikeBool_co = ...,
|
| 694 |
+
) -> _SCT: ...
|
| 695 |
+
@overload
|
| 696 |
+
def sum(
|
| 697 |
+
a: ArrayLike,
|
| 698 |
+
axis: None = ...,
|
| 699 |
+
*,
|
| 700 |
+
dtype: _DTypeLike[_SCT],
|
| 701 |
+
out: None = ...,
|
| 702 |
+
keepdims: Literal[False] = ...,
|
| 703 |
+
initial: _NumberLike_co = ...,
|
| 704 |
+
where: _ArrayLikeBool_co = ...,
|
| 705 |
+
) -> _SCT: ...
|
| 706 |
+
@overload
|
| 707 |
+
def sum(
|
| 708 |
+
a: ArrayLike,
|
| 709 |
+
axis: None | _ShapeLike,
|
| 710 |
+
dtype: _DTypeLike[_SCT],
|
| 711 |
+
out: None = ...,
|
| 712 |
+
keepdims: bool = ...,
|
| 713 |
+
initial: _NumberLike_co = ...,
|
| 714 |
+
where: _ArrayLikeBool_co = ...,
|
| 715 |
+
) -> _SCT | NDArray[_SCT]: ...
|
| 716 |
+
@overload
|
| 717 |
+
def sum(
|
| 718 |
+
a: ArrayLike,
|
| 719 |
+
axis: None | _ShapeLike = ...,
|
| 720 |
+
*,
|
| 721 |
+
dtype: _DTypeLike[_SCT],
|
| 722 |
+
out: None = ...,
|
| 723 |
+
keepdims: bool = ...,
|
| 724 |
+
initial: _NumberLike_co = ...,
|
| 725 |
+
where: _ArrayLikeBool_co = ...,
|
| 726 |
+
) -> _SCT | NDArray[_SCT]: ...
|
| 727 |
+
@overload
|
| 728 |
+
def sum(
|
| 729 |
+
a: ArrayLike,
|
| 730 |
+
axis: None | _ShapeLike = ...,
|
| 731 |
+
dtype: DTypeLike = ...,
|
| 732 |
+
out: None = ...,
|
| 733 |
+
keepdims: bool = ...,
|
| 734 |
+
initial: _NumberLike_co = ...,
|
| 735 |
+
where: _ArrayLikeBool_co = ...,
|
| 736 |
+
) -> Any: ...
|
| 737 |
+
@overload
|
| 738 |
+
def sum(
|
| 739 |
+
a: ArrayLike,
|
| 740 |
+
axis: None | _ShapeLike = ...,
|
| 741 |
+
dtype: DTypeLike = ...,
|
| 742 |
+
out: _ArrayType = ...,
|
| 743 |
+
keepdims: bool = ...,
|
| 744 |
+
initial: _NumberLike_co = ...,
|
| 745 |
+
where: _ArrayLikeBool_co = ...,
|
| 746 |
+
) -> _ArrayType: ...
|
| 747 |
+
|
| 748 |
+
@overload
|
| 749 |
+
def all(
|
| 750 |
+
a: ArrayLike,
|
| 751 |
+
axis: None = None,
|
| 752 |
+
out: None = None,
|
| 753 |
+
keepdims: Literal[False, 0] = False,
|
| 754 |
+
*,
|
| 755 |
+
where: _ArrayLikeBool_co = True,
|
| 756 |
+
) -> np.bool: ...
|
| 757 |
+
@overload
|
| 758 |
+
def all(
|
| 759 |
+
a: ArrayLike,
|
| 760 |
+
axis: None | int | tuple[int, ...] = None,
|
| 761 |
+
out: None = None,
|
| 762 |
+
keepdims: SupportsIndex = False,
|
| 763 |
+
*,
|
| 764 |
+
where: _ArrayLikeBool_co = True,
|
| 765 |
+
) -> np.bool | NDArray[np.bool]: ...
|
| 766 |
+
@overload
|
| 767 |
+
def all(
|
| 768 |
+
a: ArrayLike,
|
| 769 |
+
axis: None | int | tuple[int, ...],
|
| 770 |
+
out: _ArrayType,
|
| 771 |
+
keepdims: SupportsIndex = False,
|
| 772 |
+
*,
|
| 773 |
+
where: _ArrayLikeBool_co = True,
|
| 774 |
+
) -> _ArrayType: ...
|
| 775 |
+
@overload
|
| 776 |
+
def all(
|
| 777 |
+
a: ArrayLike,
|
| 778 |
+
axis: None | int | tuple[int, ...] = None,
|
| 779 |
+
*,
|
| 780 |
+
out: _ArrayType,
|
| 781 |
+
keepdims: SupportsIndex = False,
|
| 782 |
+
where: _ArrayLikeBool_co = True,
|
| 783 |
+
) -> _ArrayType: ...
|
| 784 |
+
|
| 785 |
+
@overload
|
| 786 |
+
def any(
|
| 787 |
+
a: ArrayLike,
|
| 788 |
+
axis: None = None,
|
| 789 |
+
out: None = None,
|
| 790 |
+
keepdims: Literal[False, 0] = False,
|
| 791 |
+
*,
|
| 792 |
+
where: _ArrayLikeBool_co = True,
|
| 793 |
+
) -> np.bool: ...
|
| 794 |
+
@overload
|
| 795 |
+
def any(
|
| 796 |
+
a: ArrayLike,
|
| 797 |
+
axis: None | int | tuple[int, ...] = None,
|
| 798 |
+
out: None = None,
|
| 799 |
+
keepdims: SupportsIndex = False,
|
| 800 |
+
*,
|
| 801 |
+
where: _ArrayLikeBool_co = True,
|
| 802 |
+
) -> np.bool | NDArray[np.bool]: ...
|
| 803 |
+
@overload
|
| 804 |
+
def any(
|
| 805 |
+
a: ArrayLike,
|
| 806 |
+
axis: None | int | tuple[int, ...],
|
| 807 |
+
out: _ArrayType,
|
| 808 |
+
keepdims: SupportsIndex = False,
|
| 809 |
+
*,
|
| 810 |
+
where: _ArrayLikeBool_co = True,
|
| 811 |
+
) -> _ArrayType: ...
|
| 812 |
+
@overload
|
| 813 |
+
def any(
|
| 814 |
+
a: ArrayLike,
|
| 815 |
+
axis: None | int | tuple[int, ...] = None,
|
| 816 |
+
*,
|
| 817 |
+
out: _ArrayType,
|
| 818 |
+
keepdims: SupportsIndex = False,
|
| 819 |
+
where: _ArrayLikeBool_co = True,
|
| 820 |
+
) -> _ArrayType: ...
|
| 821 |
+
|
| 822 |
+
@overload
|
| 823 |
+
def cumsum(
|
| 824 |
+
a: _ArrayLike[_SCT],
|
| 825 |
+
axis: None | SupportsIndex = ...,
|
| 826 |
+
dtype: None = ...,
|
| 827 |
+
out: None = ...,
|
| 828 |
+
) -> NDArray[_SCT]: ...
|
| 829 |
+
@overload
|
| 830 |
+
def cumsum(
|
| 831 |
+
a: ArrayLike,
|
| 832 |
+
axis: None | SupportsIndex = ...,
|
| 833 |
+
dtype: None = ...,
|
| 834 |
+
out: None = ...,
|
| 835 |
+
) -> NDArray[Any]: ...
|
| 836 |
+
@overload
|
| 837 |
+
def cumsum(
|
| 838 |
+
a: ArrayLike,
|
| 839 |
+
axis: None | SupportsIndex = ...,
|
| 840 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 841 |
+
out: None = ...,
|
| 842 |
+
) -> NDArray[_SCT]: ...
|
| 843 |
+
@overload
|
| 844 |
+
def cumsum(
|
| 845 |
+
a: ArrayLike,
|
| 846 |
+
axis: None | SupportsIndex = ...,
|
| 847 |
+
dtype: DTypeLike = ...,
|
| 848 |
+
out: None = ...,
|
| 849 |
+
) -> NDArray[Any]: ...
|
| 850 |
+
@overload
|
| 851 |
+
def cumsum(
|
| 852 |
+
a: ArrayLike,
|
| 853 |
+
axis: None | SupportsIndex = ...,
|
| 854 |
+
dtype: DTypeLike = ...,
|
| 855 |
+
out: _ArrayType = ...,
|
| 856 |
+
) -> _ArrayType: ...
|
| 857 |
+
|
| 858 |
+
@overload
|
| 859 |
+
def cumulative_sum(
|
| 860 |
+
x: _ArrayLike[_SCT],
|
| 861 |
+
/,
|
| 862 |
+
*,
|
| 863 |
+
axis: None | SupportsIndex = ...,
|
| 864 |
+
dtype: None = ...,
|
| 865 |
+
out: None = ...,
|
| 866 |
+
include_initial: bool = ...,
|
| 867 |
+
) -> NDArray[_SCT]: ...
|
| 868 |
+
@overload
|
| 869 |
+
def cumulative_sum(
|
| 870 |
+
x: ArrayLike,
|
| 871 |
+
/,
|
| 872 |
+
*,
|
| 873 |
+
axis: None | SupportsIndex = ...,
|
| 874 |
+
dtype: None = ...,
|
| 875 |
+
out: None = ...,
|
| 876 |
+
include_initial: bool = ...,
|
| 877 |
+
) -> NDArray[Any]: ...
|
| 878 |
+
@overload
|
| 879 |
+
def cumulative_sum(
|
| 880 |
+
x: ArrayLike,
|
| 881 |
+
/,
|
| 882 |
+
*,
|
| 883 |
+
axis: None | SupportsIndex = ...,
|
| 884 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 885 |
+
out: None = ...,
|
| 886 |
+
include_initial: bool = ...,
|
| 887 |
+
) -> NDArray[_SCT]: ...
|
| 888 |
+
@overload
|
| 889 |
+
def cumulative_sum(
|
| 890 |
+
x: ArrayLike,
|
| 891 |
+
/,
|
| 892 |
+
*,
|
| 893 |
+
axis: None | SupportsIndex = ...,
|
| 894 |
+
dtype: DTypeLike = ...,
|
| 895 |
+
out: None = ...,
|
| 896 |
+
include_initial: bool = ...,
|
| 897 |
+
) -> NDArray[Any]: ...
|
| 898 |
+
@overload
|
| 899 |
+
def cumulative_sum(
|
| 900 |
+
x: ArrayLike,
|
| 901 |
+
/,
|
| 902 |
+
*,
|
| 903 |
+
axis: None | SupportsIndex = ...,
|
| 904 |
+
dtype: DTypeLike = ...,
|
| 905 |
+
out: _ArrayType = ...,
|
| 906 |
+
include_initial: bool = ...,
|
| 907 |
+
) -> _ArrayType: ...
|
| 908 |
+
|
| 909 |
+
@overload
|
| 910 |
+
def ptp(
|
| 911 |
+
a: _ArrayLike[_SCT],
|
| 912 |
+
axis: None = ...,
|
| 913 |
+
out: None = ...,
|
| 914 |
+
keepdims: Literal[False] = ...,
|
| 915 |
+
) -> _SCT: ...
|
| 916 |
+
@overload
|
| 917 |
+
def ptp(
|
| 918 |
+
a: ArrayLike,
|
| 919 |
+
axis: None | _ShapeLike = ...,
|
| 920 |
+
out: None = ...,
|
| 921 |
+
keepdims: bool = ...,
|
| 922 |
+
) -> Any: ...
|
| 923 |
+
@overload
|
| 924 |
+
def ptp(
|
| 925 |
+
a: ArrayLike,
|
| 926 |
+
axis: None | _ShapeLike = ...,
|
| 927 |
+
out: _ArrayType = ...,
|
| 928 |
+
keepdims: bool = ...,
|
| 929 |
+
) -> _ArrayType: ...
|
| 930 |
+
|
| 931 |
+
@overload
|
| 932 |
+
def amax(
|
| 933 |
+
a: _ArrayLike[_SCT],
|
| 934 |
+
axis: None = ...,
|
| 935 |
+
out: None = ...,
|
| 936 |
+
keepdims: Literal[False] = ...,
|
| 937 |
+
initial: _NumberLike_co = ...,
|
| 938 |
+
where: _ArrayLikeBool_co = ...,
|
| 939 |
+
) -> _SCT: ...
|
| 940 |
+
@overload
|
| 941 |
+
def amax(
|
| 942 |
+
a: ArrayLike,
|
| 943 |
+
axis: None | _ShapeLike = ...,
|
| 944 |
+
out: None = ...,
|
| 945 |
+
keepdims: bool = ...,
|
| 946 |
+
initial: _NumberLike_co = ...,
|
| 947 |
+
where: _ArrayLikeBool_co = ...,
|
| 948 |
+
) -> Any: ...
|
| 949 |
+
@overload
|
| 950 |
+
def amax(
|
| 951 |
+
a: ArrayLike,
|
| 952 |
+
axis: None | _ShapeLike = ...,
|
| 953 |
+
out: _ArrayType = ...,
|
| 954 |
+
keepdims: bool = ...,
|
| 955 |
+
initial: _NumberLike_co = ...,
|
| 956 |
+
where: _ArrayLikeBool_co = ...,
|
| 957 |
+
) -> _ArrayType: ...
|
| 958 |
+
|
| 959 |
+
@overload
|
| 960 |
+
def amin(
|
| 961 |
+
a: _ArrayLike[_SCT],
|
| 962 |
+
axis: None = ...,
|
| 963 |
+
out: None = ...,
|
| 964 |
+
keepdims: Literal[False] = ...,
|
| 965 |
+
initial: _NumberLike_co = ...,
|
| 966 |
+
where: _ArrayLikeBool_co = ...,
|
| 967 |
+
) -> _SCT: ...
|
| 968 |
+
@overload
|
| 969 |
+
def amin(
|
| 970 |
+
a: ArrayLike,
|
| 971 |
+
axis: None | _ShapeLike = ...,
|
| 972 |
+
out: None = ...,
|
| 973 |
+
keepdims: bool = ...,
|
| 974 |
+
initial: _NumberLike_co = ...,
|
| 975 |
+
where: _ArrayLikeBool_co = ...,
|
| 976 |
+
) -> Any: ...
|
| 977 |
+
@overload
|
| 978 |
+
def amin(
|
| 979 |
+
a: ArrayLike,
|
| 980 |
+
axis: None | _ShapeLike = ...,
|
| 981 |
+
out: _ArrayType = ...,
|
| 982 |
+
keepdims: bool = ...,
|
| 983 |
+
initial: _NumberLike_co = ...,
|
| 984 |
+
where: _ArrayLikeBool_co = ...,
|
| 985 |
+
) -> _ArrayType: ...
|
| 986 |
+
|
| 987 |
+
# TODO: `np.prod()``: For object arrays `initial` does not necessarily
|
| 988 |
+
# have to be a numerical scalar.
|
| 989 |
+
# The only requirement is that it is compatible
|
| 990 |
+
# with the `.__mul__()` method(s) of the passed array's elements.
|
| 991 |
+
|
| 992 |
+
# Note that the same situation holds for all wrappers around
|
| 993 |
+
# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).
|
| 994 |
+
@overload
|
| 995 |
+
def prod(
|
| 996 |
+
a: _ArrayLikeBool_co,
|
| 997 |
+
axis: None = ...,
|
| 998 |
+
dtype: None = ...,
|
| 999 |
+
out: None = ...,
|
| 1000 |
+
keepdims: Literal[False] = ...,
|
| 1001 |
+
initial: _NumberLike_co = ...,
|
| 1002 |
+
where: _ArrayLikeBool_co = ...,
|
| 1003 |
+
) -> int_: ...
|
| 1004 |
+
@overload
|
| 1005 |
+
def prod(
|
| 1006 |
+
a: _ArrayLikeUInt_co,
|
| 1007 |
+
axis: None = ...,
|
| 1008 |
+
dtype: None = ...,
|
| 1009 |
+
out: None = ...,
|
| 1010 |
+
keepdims: Literal[False] = ...,
|
| 1011 |
+
initial: _NumberLike_co = ...,
|
| 1012 |
+
where: _ArrayLikeBool_co = ...,
|
| 1013 |
+
) -> uint64: ...
|
| 1014 |
+
@overload
|
| 1015 |
+
def prod(
|
| 1016 |
+
a: _ArrayLikeInt_co,
|
| 1017 |
+
axis: None = ...,
|
| 1018 |
+
dtype: None = ...,
|
| 1019 |
+
out: None = ...,
|
| 1020 |
+
keepdims: Literal[False] = ...,
|
| 1021 |
+
initial: _NumberLike_co = ...,
|
| 1022 |
+
where: _ArrayLikeBool_co = ...,
|
| 1023 |
+
) -> int64: ...
|
| 1024 |
+
@overload
|
| 1025 |
+
def prod(
|
| 1026 |
+
a: _ArrayLikeFloat_co,
|
| 1027 |
+
axis: None = ...,
|
| 1028 |
+
dtype: None = ...,
|
| 1029 |
+
out: None = ...,
|
| 1030 |
+
keepdims: Literal[False] = ...,
|
| 1031 |
+
initial: _NumberLike_co = ...,
|
| 1032 |
+
where: _ArrayLikeBool_co = ...,
|
| 1033 |
+
) -> floating[Any]: ...
|
| 1034 |
+
@overload
|
| 1035 |
+
def prod(
|
| 1036 |
+
a: _ArrayLikeComplex_co,
|
| 1037 |
+
axis: None = ...,
|
| 1038 |
+
dtype: None = ...,
|
| 1039 |
+
out: None = ...,
|
| 1040 |
+
keepdims: Literal[False] = ...,
|
| 1041 |
+
initial: _NumberLike_co = ...,
|
| 1042 |
+
where: _ArrayLikeBool_co = ...,
|
| 1043 |
+
) -> complexfloating[Any, Any]: ...
|
| 1044 |
+
@overload
|
| 1045 |
+
def prod(
|
| 1046 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1047 |
+
axis: None | _ShapeLike = ...,
|
| 1048 |
+
dtype: None = ...,
|
| 1049 |
+
out: None = ...,
|
| 1050 |
+
keepdims: bool = ...,
|
| 1051 |
+
initial: _NumberLike_co = ...,
|
| 1052 |
+
where: _ArrayLikeBool_co = ...,
|
| 1053 |
+
) -> Any: ...
|
| 1054 |
+
@overload
|
| 1055 |
+
def prod(
|
| 1056 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1057 |
+
axis: None = ...,
|
| 1058 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1059 |
+
out: None = ...,
|
| 1060 |
+
keepdims: Literal[False] = ...,
|
| 1061 |
+
initial: _NumberLike_co = ...,
|
| 1062 |
+
where: _ArrayLikeBool_co = ...,
|
| 1063 |
+
) -> _SCT: ...
|
| 1064 |
+
@overload
|
| 1065 |
+
def prod(
|
| 1066 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1067 |
+
axis: None | _ShapeLike = ...,
|
| 1068 |
+
dtype: None | DTypeLike = ...,
|
| 1069 |
+
out: None = ...,
|
| 1070 |
+
keepdims: bool = ...,
|
| 1071 |
+
initial: _NumberLike_co = ...,
|
| 1072 |
+
where: _ArrayLikeBool_co = ...,
|
| 1073 |
+
) -> Any: ...
|
| 1074 |
+
@overload
|
| 1075 |
+
def prod(
|
| 1076 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1077 |
+
axis: None | _ShapeLike = ...,
|
| 1078 |
+
dtype: None | DTypeLike = ...,
|
| 1079 |
+
out: _ArrayType = ...,
|
| 1080 |
+
keepdims: bool = ...,
|
| 1081 |
+
initial: _NumberLike_co = ...,
|
| 1082 |
+
where: _ArrayLikeBool_co = ...,
|
| 1083 |
+
) -> _ArrayType: ...
|
| 1084 |
+
|
| 1085 |
+
@overload
|
| 1086 |
+
def cumprod(
|
| 1087 |
+
a: _ArrayLikeBool_co,
|
| 1088 |
+
axis: None | SupportsIndex = ...,
|
| 1089 |
+
dtype: None = ...,
|
| 1090 |
+
out: None = ...,
|
| 1091 |
+
) -> NDArray[int_]: ...
|
| 1092 |
+
@overload
|
| 1093 |
+
def cumprod(
|
| 1094 |
+
a: _ArrayLikeUInt_co,
|
| 1095 |
+
axis: None | SupportsIndex = ...,
|
| 1096 |
+
dtype: None = ...,
|
| 1097 |
+
out: None = ...,
|
| 1098 |
+
) -> NDArray[uint64]: ...
|
| 1099 |
+
@overload
|
| 1100 |
+
def cumprod(
|
| 1101 |
+
a: _ArrayLikeInt_co,
|
| 1102 |
+
axis: None | SupportsIndex = ...,
|
| 1103 |
+
dtype: None = ...,
|
| 1104 |
+
out: None = ...,
|
| 1105 |
+
) -> NDArray[int64]: ...
|
| 1106 |
+
@overload
|
| 1107 |
+
def cumprod(
|
| 1108 |
+
a: _ArrayLikeFloat_co,
|
| 1109 |
+
axis: None | SupportsIndex = ...,
|
| 1110 |
+
dtype: None = ...,
|
| 1111 |
+
out: None = ...,
|
| 1112 |
+
) -> NDArray[floating[Any]]: ...
|
| 1113 |
+
@overload
|
| 1114 |
+
def cumprod(
|
| 1115 |
+
a: _ArrayLikeComplex_co,
|
| 1116 |
+
axis: None | SupportsIndex = ...,
|
| 1117 |
+
dtype: None = ...,
|
| 1118 |
+
out: None = ...,
|
| 1119 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 1120 |
+
@overload
|
| 1121 |
+
def cumprod(
|
| 1122 |
+
a: _ArrayLikeObject_co,
|
| 1123 |
+
axis: None | SupportsIndex = ...,
|
| 1124 |
+
dtype: None = ...,
|
| 1125 |
+
out: None = ...,
|
| 1126 |
+
) -> NDArray[object_]: ...
|
| 1127 |
+
@overload
|
| 1128 |
+
def cumprod(
|
| 1129 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1130 |
+
axis: None | SupportsIndex = ...,
|
| 1131 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1132 |
+
out: None = ...,
|
| 1133 |
+
) -> NDArray[_SCT]: ...
|
| 1134 |
+
@overload
|
| 1135 |
+
def cumprod(
|
| 1136 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1137 |
+
axis: None | SupportsIndex = ...,
|
| 1138 |
+
dtype: DTypeLike = ...,
|
| 1139 |
+
out: None = ...,
|
| 1140 |
+
) -> NDArray[Any]: ...
|
| 1141 |
+
@overload
|
| 1142 |
+
def cumprod(
|
| 1143 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1144 |
+
axis: None | SupportsIndex = ...,
|
| 1145 |
+
dtype: DTypeLike = ...,
|
| 1146 |
+
out: _ArrayType = ...,
|
| 1147 |
+
) -> _ArrayType: ...
|
| 1148 |
+
|
| 1149 |
+
@overload
|
| 1150 |
+
def cumulative_prod(
|
| 1151 |
+
x: _ArrayLikeBool_co,
|
| 1152 |
+
/,
|
| 1153 |
+
*,
|
| 1154 |
+
axis: None | SupportsIndex = ...,
|
| 1155 |
+
dtype: None = ...,
|
| 1156 |
+
out: None = ...,
|
| 1157 |
+
include_initial: bool = ...,
|
| 1158 |
+
) -> NDArray[int_]: ...
|
| 1159 |
+
@overload
|
| 1160 |
+
def cumulative_prod(
|
| 1161 |
+
x: _ArrayLikeUInt_co,
|
| 1162 |
+
/,
|
| 1163 |
+
*,
|
| 1164 |
+
axis: None | SupportsIndex = ...,
|
| 1165 |
+
dtype: None = ...,
|
| 1166 |
+
out: None = ...,
|
| 1167 |
+
include_initial: bool = ...,
|
| 1168 |
+
) -> NDArray[uint64]: ...
|
| 1169 |
+
@overload
|
| 1170 |
+
def cumulative_prod(
|
| 1171 |
+
x: _ArrayLikeInt_co,
|
| 1172 |
+
/,
|
| 1173 |
+
*,
|
| 1174 |
+
axis: None | SupportsIndex = ...,
|
| 1175 |
+
dtype: None = ...,
|
| 1176 |
+
out: None = ...,
|
| 1177 |
+
include_initial: bool = ...,
|
| 1178 |
+
) -> NDArray[int64]: ...
|
| 1179 |
+
@overload
|
| 1180 |
+
def cumulative_prod(
|
| 1181 |
+
x: _ArrayLikeFloat_co,
|
| 1182 |
+
/,
|
| 1183 |
+
*,
|
| 1184 |
+
axis: None | SupportsIndex = ...,
|
| 1185 |
+
dtype: None = ...,
|
| 1186 |
+
out: None = ...,
|
| 1187 |
+
include_initial: bool = ...,
|
| 1188 |
+
) -> NDArray[floating[Any]]: ...
|
| 1189 |
+
@overload
|
| 1190 |
+
def cumulative_prod(
|
| 1191 |
+
x: _ArrayLikeComplex_co,
|
| 1192 |
+
/,
|
| 1193 |
+
*,
|
| 1194 |
+
axis: None | SupportsIndex = ...,
|
| 1195 |
+
dtype: None = ...,
|
| 1196 |
+
out: None = ...,
|
| 1197 |
+
include_initial: bool = ...,
|
| 1198 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 1199 |
+
@overload
|
| 1200 |
+
def cumulative_prod(
|
| 1201 |
+
x: _ArrayLikeObject_co,
|
| 1202 |
+
/,
|
| 1203 |
+
*,
|
| 1204 |
+
axis: None | SupportsIndex = ...,
|
| 1205 |
+
dtype: None = ...,
|
| 1206 |
+
out: None = ...,
|
| 1207 |
+
include_initial: bool = ...,
|
| 1208 |
+
) -> NDArray[object_]: ...
|
| 1209 |
+
@overload
|
| 1210 |
+
def cumulative_prod(
|
| 1211 |
+
x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1212 |
+
/,
|
| 1213 |
+
*,
|
| 1214 |
+
axis: None | SupportsIndex = ...,
|
| 1215 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1216 |
+
out: None = ...,
|
| 1217 |
+
include_initial: bool = ...,
|
| 1218 |
+
) -> NDArray[_SCT]: ...
|
| 1219 |
+
@overload
|
| 1220 |
+
def cumulative_prod(
|
| 1221 |
+
x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1222 |
+
/,
|
| 1223 |
+
*,
|
| 1224 |
+
axis: None | SupportsIndex = ...,
|
| 1225 |
+
dtype: DTypeLike = ...,
|
| 1226 |
+
out: None = ...,
|
| 1227 |
+
include_initial: bool = ...,
|
| 1228 |
+
) -> NDArray[Any]: ...
|
| 1229 |
+
@overload
|
| 1230 |
+
def cumulative_prod(
|
| 1231 |
+
x: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1232 |
+
/,
|
| 1233 |
+
*,
|
| 1234 |
+
axis: None | SupportsIndex = ...,
|
| 1235 |
+
dtype: DTypeLike = ...,
|
| 1236 |
+
out: _ArrayType = ...,
|
| 1237 |
+
include_initial: bool = ...,
|
| 1238 |
+
) -> _ArrayType: ...
|
| 1239 |
+
|
| 1240 |
+
def ndim(a: ArrayLike) -> int: ...
|
| 1241 |
+
|
| 1242 |
+
def size(a: ArrayLike, axis: None | int = ...) -> int: ...
|
| 1243 |
+
|
| 1244 |
+
@overload
|
| 1245 |
+
def around(
|
| 1246 |
+
a: _BoolLike_co,
|
| 1247 |
+
decimals: SupportsIndex = ...,
|
| 1248 |
+
out: None = ...,
|
| 1249 |
+
) -> float16: ...
|
| 1250 |
+
@overload
|
| 1251 |
+
def around(
|
| 1252 |
+
a: _SCT_uifcO,
|
| 1253 |
+
decimals: SupportsIndex = ...,
|
| 1254 |
+
out: None = ...,
|
| 1255 |
+
) -> _SCT_uifcO: ...
|
| 1256 |
+
@overload
|
| 1257 |
+
def around(
|
| 1258 |
+
a: _ComplexLike_co | object_,
|
| 1259 |
+
decimals: SupportsIndex = ...,
|
| 1260 |
+
out: None = ...,
|
| 1261 |
+
) -> Any: ...
|
| 1262 |
+
@overload
|
| 1263 |
+
def around(
|
| 1264 |
+
a: _ArrayLikeBool_co,
|
| 1265 |
+
decimals: SupportsIndex = ...,
|
| 1266 |
+
out: None = ...,
|
| 1267 |
+
) -> NDArray[float16]: ...
|
| 1268 |
+
@overload
|
| 1269 |
+
def around(
|
| 1270 |
+
a: _ArrayLike[_SCT_uifcO],
|
| 1271 |
+
decimals: SupportsIndex = ...,
|
| 1272 |
+
out: None = ...,
|
| 1273 |
+
) -> NDArray[_SCT_uifcO]: ...
|
| 1274 |
+
@overload
|
| 1275 |
+
def around(
|
| 1276 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1277 |
+
decimals: SupportsIndex = ...,
|
| 1278 |
+
out: None = ...,
|
| 1279 |
+
) -> NDArray[Any]: ...
|
| 1280 |
+
@overload
|
| 1281 |
+
def around(
|
| 1282 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1283 |
+
decimals: SupportsIndex = ...,
|
| 1284 |
+
out: _ArrayType = ...,
|
| 1285 |
+
) -> _ArrayType: ...
|
| 1286 |
+
|
| 1287 |
+
@overload
|
| 1288 |
+
def mean(
|
| 1289 |
+
a: _ArrayLikeFloat_co,
|
| 1290 |
+
axis: None = ...,
|
| 1291 |
+
dtype: None = ...,
|
| 1292 |
+
out: None = ...,
|
| 1293 |
+
keepdims: Literal[False] = ...,
|
| 1294 |
+
*,
|
| 1295 |
+
where: _ArrayLikeBool_co = ...,
|
| 1296 |
+
) -> floating[Any]: ...
|
| 1297 |
+
@overload
|
| 1298 |
+
def mean(
|
| 1299 |
+
a: _ArrayLikeComplex_co,
|
| 1300 |
+
axis: None = ...,
|
| 1301 |
+
dtype: None = ...,
|
| 1302 |
+
out: None = ...,
|
| 1303 |
+
keepdims: Literal[False] = ...,
|
| 1304 |
+
*,
|
| 1305 |
+
where: _ArrayLikeBool_co = ...,
|
| 1306 |
+
) -> complexfloating[Any, Any]: ...
|
| 1307 |
+
@overload
|
| 1308 |
+
def mean(
|
| 1309 |
+
a: _ArrayLikeTD64_co,
|
| 1310 |
+
axis: None = ...,
|
| 1311 |
+
dtype: None = ...,
|
| 1312 |
+
out: None = ...,
|
| 1313 |
+
keepdims: Literal[False] = ...,
|
| 1314 |
+
*,
|
| 1315 |
+
where: _ArrayLikeBool_co = ...,
|
| 1316 |
+
) -> timedelta64: ...
|
| 1317 |
+
@overload
|
| 1318 |
+
def mean(
|
| 1319 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1320 |
+
axis: None | _ShapeLike = ...,
|
| 1321 |
+
dtype: None = ...,
|
| 1322 |
+
out: None = ...,
|
| 1323 |
+
keepdims: bool = ...,
|
| 1324 |
+
*,
|
| 1325 |
+
where: _ArrayLikeBool_co = ...,
|
| 1326 |
+
) -> Any: ...
|
| 1327 |
+
@overload
|
| 1328 |
+
def mean(
|
| 1329 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1330 |
+
axis: None = ...,
|
| 1331 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1332 |
+
out: None = ...,
|
| 1333 |
+
keepdims: Literal[False] = ...,
|
| 1334 |
+
*,
|
| 1335 |
+
where: _ArrayLikeBool_co = ...,
|
| 1336 |
+
) -> _SCT: ...
|
| 1337 |
+
@overload
|
| 1338 |
+
def mean(
|
| 1339 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1340 |
+
axis: None,
|
| 1341 |
+
dtype: _DTypeLike[_SCT],
|
| 1342 |
+
out: None = ...,
|
| 1343 |
+
keepdims: bool = ...,
|
| 1344 |
+
*,
|
| 1345 |
+
where: _ArrayLikeBool_co = ...,
|
| 1346 |
+
) -> _SCT | NDArray[_SCT]: ...
|
| 1347 |
+
@overload
|
| 1348 |
+
def mean(
|
| 1349 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1350 |
+
axis: None = ...,
|
| 1351 |
+
*,
|
| 1352 |
+
dtype: _DTypeLike[_SCT],
|
| 1353 |
+
out: None = ...,
|
| 1354 |
+
keepdims: bool = ...,
|
| 1355 |
+
where: _ArrayLikeBool_co = ...,
|
| 1356 |
+
) -> _SCT | NDArray[_SCT]: ...
|
| 1357 |
+
@overload
|
| 1358 |
+
def mean(
|
| 1359 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1360 |
+
axis: None | _ShapeLike = ...,
|
| 1361 |
+
dtype: DTypeLike = ...,
|
| 1362 |
+
out: None = ...,
|
| 1363 |
+
keepdims: bool = ...,
|
| 1364 |
+
*,
|
| 1365 |
+
where: _ArrayLikeBool_co = ...,
|
| 1366 |
+
) -> Any: ...
|
| 1367 |
+
@overload
|
| 1368 |
+
def mean(
|
| 1369 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1370 |
+
axis: None | _ShapeLike = ...,
|
| 1371 |
+
dtype: DTypeLike = ...,
|
| 1372 |
+
out: _ArrayType = ...,
|
| 1373 |
+
keepdims: bool = ...,
|
| 1374 |
+
*,
|
| 1375 |
+
where: _ArrayLikeBool_co = ...,
|
| 1376 |
+
) -> _ArrayType: ...
|
| 1377 |
+
|
| 1378 |
+
@overload
|
| 1379 |
+
def std(
|
| 1380 |
+
a: _ArrayLikeComplex_co,
|
| 1381 |
+
axis: None = ...,
|
| 1382 |
+
dtype: None = ...,
|
| 1383 |
+
out: None = ...,
|
| 1384 |
+
ddof: int | float = ...,
|
| 1385 |
+
keepdims: Literal[False] = ...,
|
| 1386 |
+
*,
|
| 1387 |
+
where: _ArrayLikeBool_co = ...,
|
| 1388 |
+
mean: _ArrayLikeComplex_co = ...,
|
| 1389 |
+
correction: int | float = ...,
|
| 1390 |
+
) -> floating[Any]: ...
|
| 1391 |
+
@overload
|
| 1392 |
+
def std(
|
| 1393 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1394 |
+
axis: None | _ShapeLike = ...,
|
| 1395 |
+
dtype: None = ...,
|
| 1396 |
+
out: None = ...,
|
| 1397 |
+
ddof: int | float = ...,
|
| 1398 |
+
keepdims: bool = ...,
|
| 1399 |
+
*,
|
| 1400 |
+
where: _ArrayLikeBool_co = ...,
|
| 1401 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1402 |
+
correction: int | float = ...,
|
| 1403 |
+
) -> Any: ...
|
| 1404 |
+
@overload
|
| 1405 |
+
def std(
|
| 1406 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1407 |
+
axis: None = ...,
|
| 1408 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1409 |
+
out: None = ...,
|
| 1410 |
+
ddof: int | float = ...,
|
| 1411 |
+
keepdims: Literal[False] = ...,
|
| 1412 |
+
*,
|
| 1413 |
+
where: _ArrayLikeBool_co = ...,
|
| 1414 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1415 |
+
correction: int | float = ...,
|
| 1416 |
+
) -> _SCT: ...
|
| 1417 |
+
@overload
|
| 1418 |
+
def std(
|
| 1419 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1420 |
+
axis: None | _ShapeLike = ...,
|
| 1421 |
+
dtype: DTypeLike = ...,
|
| 1422 |
+
out: None = ...,
|
| 1423 |
+
ddof: int | float = ...,
|
| 1424 |
+
keepdims: bool = ...,
|
| 1425 |
+
*,
|
| 1426 |
+
where: _ArrayLikeBool_co = ...,
|
| 1427 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1428 |
+
correction: int | float = ...,
|
| 1429 |
+
) -> Any: ...
|
| 1430 |
+
@overload
|
| 1431 |
+
def std(
|
| 1432 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1433 |
+
axis: None | _ShapeLike = ...,
|
| 1434 |
+
dtype: DTypeLike = ...,
|
| 1435 |
+
out: _ArrayType = ...,
|
| 1436 |
+
ddof: int | float = ...,
|
| 1437 |
+
keepdims: bool = ...,
|
| 1438 |
+
*,
|
| 1439 |
+
where: _ArrayLikeBool_co = ...,
|
| 1440 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1441 |
+
correction: int | float = ...,
|
| 1442 |
+
) -> _ArrayType: ...
|
| 1443 |
+
|
| 1444 |
+
@overload
|
| 1445 |
+
def var(
|
| 1446 |
+
a: _ArrayLikeComplex_co,
|
| 1447 |
+
axis: None = ...,
|
| 1448 |
+
dtype: None = ...,
|
| 1449 |
+
out: None = ...,
|
| 1450 |
+
ddof: int | float = ...,
|
| 1451 |
+
keepdims: Literal[False] = ...,
|
| 1452 |
+
*,
|
| 1453 |
+
where: _ArrayLikeBool_co = ...,
|
| 1454 |
+
mean: _ArrayLikeComplex_co = ...,
|
| 1455 |
+
correction: int | float = ...,
|
| 1456 |
+
) -> floating[Any]: ...
|
| 1457 |
+
@overload
|
| 1458 |
+
def var(
|
| 1459 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1460 |
+
axis: None | _ShapeLike = ...,
|
| 1461 |
+
dtype: None = ...,
|
| 1462 |
+
out: None = ...,
|
| 1463 |
+
ddof: int | float = ...,
|
| 1464 |
+
keepdims: bool = ...,
|
| 1465 |
+
*,
|
| 1466 |
+
where: _ArrayLikeBool_co = ...,
|
| 1467 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1468 |
+
correction: int | float = ...,
|
| 1469 |
+
) -> Any: ...
|
| 1470 |
+
@overload
|
| 1471 |
+
def var(
|
| 1472 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1473 |
+
axis: None = ...,
|
| 1474 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1475 |
+
out: None = ...,
|
| 1476 |
+
ddof: int | float = ...,
|
| 1477 |
+
keepdims: Literal[False] = ...,
|
| 1478 |
+
*,
|
| 1479 |
+
where: _ArrayLikeBool_co = ...,
|
| 1480 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1481 |
+
correction: int | float = ...,
|
| 1482 |
+
) -> _SCT: ...
|
| 1483 |
+
@overload
|
| 1484 |
+
def var(
|
| 1485 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1486 |
+
axis: None | _ShapeLike = ...,
|
| 1487 |
+
dtype: DTypeLike = ...,
|
| 1488 |
+
out: None = ...,
|
| 1489 |
+
ddof: int | float = ...,
|
| 1490 |
+
keepdims: bool = ...,
|
| 1491 |
+
*,
|
| 1492 |
+
where: _ArrayLikeBool_co = ...,
|
| 1493 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1494 |
+
correction: int | float = ...,
|
| 1495 |
+
) -> Any: ...
|
| 1496 |
+
@overload
|
| 1497 |
+
def var(
|
| 1498 |
+
a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
|
| 1499 |
+
axis: None | _ShapeLike = ...,
|
| 1500 |
+
dtype: DTypeLike = ...,
|
| 1501 |
+
out: _ArrayType = ...,
|
| 1502 |
+
ddof: int | float = ...,
|
| 1503 |
+
keepdims: bool = ...,
|
| 1504 |
+
*,
|
| 1505 |
+
where: _ArrayLikeBool_co = ...,
|
| 1506 |
+
mean: _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
|
| 1507 |
+
correction: int | float = ...,
|
| 1508 |
+
) -> _ArrayType: ...
|
| 1509 |
+
|
| 1510 |
+
max = amax
|
| 1511 |
+
min = amin
|
| 1512 |
+
round = around
|
janus/lib/python3.10/site-packages/numpy/_core/function_base.pyi
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import (
|
| 2 |
+
Literal as L,
|
| 3 |
+
overload,
|
| 4 |
+
Any,
|
| 5 |
+
SupportsIndex,
|
| 6 |
+
TypeVar,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
from numpy import floating, complexfloating, generic
|
| 10 |
+
from numpy._typing import (
|
| 11 |
+
NDArray,
|
| 12 |
+
DTypeLike,
|
| 13 |
+
_DTypeLike,
|
| 14 |
+
_ArrayLikeFloat_co,
|
| 15 |
+
_ArrayLikeComplex_co,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
__all__ = ["logspace", "linspace", "geomspace"]
|
| 19 |
+
|
| 20 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 21 |
+
|
| 22 |
+
@overload
|
| 23 |
+
def linspace(
|
| 24 |
+
start: _ArrayLikeFloat_co,
|
| 25 |
+
stop: _ArrayLikeFloat_co,
|
| 26 |
+
num: SupportsIndex = ...,
|
| 27 |
+
endpoint: bool = ...,
|
| 28 |
+
retstep: L[False] = ...,
|
| 29 |
+
dtype: None = ...,
|
| 30 |
+
axis: SupportsIndex = ...,
|
| 31 |
+
*,
|
| 32 |
+
device: None | L["cpu"] = ...,
|
| 33 |
+
) -> NDArray[floating[Any]]: ...
|
| 34 |
+
@overload
|
| 35 |
+
def linspace(
|
| 36 |
+
start: _ArrayLikeComplex_co,
|
| 37 |
+
stop: _ArrayLikeComplex_co,
|
| 38 |
+
num: SupportsIndex = ...,
|
| 39 |
+
endpoint: bool = ...,
|
| 40 |
+
retstep: L[False] = ...,
|
| 41 |
+
dtype: None = ...,
|
| 42 |
+
axis: SupportsIndex = ...,
|
| 43 |
+
*,
|
| 44 |
+
device: None | L["cpu"] = ...,
|
| 45 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 46 |
+
@overload
|
| 47 |
+
def linspace(
|
| 48 |
+
start: _ArrayLikeComplex_co,
|
| 49 |
+
stop: _ArrayLikeComplex_co,
|
| 50 |
+
num: SupportsIndex = ...,
|
| 51 |
+
endpoint: bool = ...,
|
| 52 |
+
retstep: L[False] = ...,
|
| 53 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 54 |
+
axis: SupportsIndex = ...,
|
| 55 |
+
*,
|
| 56 |
+
device: None | L["cpu"] = ...,
|
| 57 |
+
) -> NDArray[_SCT]: ...
|
| 58 |
+
@overload
|
| 59 |
+
def linspace(
|
| 60 |
+
start: _ArrayLikeComplex_co,
|
| 61 |
+
stop: _ArrayLikeComplex_co,
|
| 62 |
+
num: SupportsIndex = ...,
|
| 63 |
+
endpoint: bool = ...,
|
| 64 |
+
retstep: L[False] = ...,
|
| 65 |
+
dtype: DTypeLike = ...,
|
| 66 |
+
axis: SupportsIndex = ...,
|
| 67 |
+
*,
|
| 68 |
+
device: None | L["cpu"] = ...,
|
| 69 |
+
) -> NDArray[Any]: ...
|
| 70 |
+
@overload
|
| 71 |
+
def linspace(
|
| 72 |
+
start: _ArrayLikeFloat_co,
|
| 73 |
+
stop: _ArrayLikeFloat_co,
|
| 74 |
+
num: SupportsIndex = ...,
|
| 75 |
+
endpoint: bool = ...,
|
| 76 |
+
retstep: L[True] = ...,
|
| 77 |
+
dtype: None = ...,
|
| 78 |
+
axis: SupportsIndex = ...,
|
| 79 |
+
*,
|
| 80 |
+
device: None | L["cpu"] = ...,
|
| 81 |
+
) -> tuple[NDArray[floating[Any]], floating[Any]]: ...
|
| 82 |
+
@overload
|
| 83 |
+
def linspace(
|
| 84 |
+
start: _ArrayLikeComplex_co,
|
| 85 |
+
stop: _ArrayLikeComplex_co,
|
| 86 |
+
num: SupportsIndex = ...,
|
| 87 |
+
endpoint: bool = ...,
|
| 88 |
+
retstep: L[True] = ...,
|
| 89 |
+
dtype: None = ...,
|
| 90 |
+
axis: SupportsIndex = ...,
|
| 91 |
+
*,
|
| 92 |
+
device: None | L["cpu"] = ...,
|
| 93 |
+
) -> tuple[NDArray[complexfloating[Any, Any]], complexfloating[Any, Any]]: ...
|
| 94 |
+
@overload
|
| 95 |
+
def linspace(
|
| 96 |
+
start: _ArrayLikeComplex_co,
|
| 97 |
+
stop: _ArrayLikeComplex_co,
|
| 98 |
+
num: SupportsIndex = ...,
|
| 99 |
+
endpoint: bool = ...,
|
| 100 |
+
retstep: L[True] = ...,
|
| 101 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 102 |
+
axis: SupportsIndex = ...,
|
| 103 |
+
*,
|
| 104 |
+
device: None | L["cpu"] = ...,
|
| 105 |
+
) -> tuple[NDArray[_SCT], _SCT]: ...
|
| 106 |
+
@overload
|
| 107 |
+
def linspace(
|
| 108 |
+
start: _ArrayLikeComplex_co,
|
| 109 |
+
stop: _ArrayLikeComplex_co,
|
| 110 |
+
num: SupportsIndex = ...,
|
| 111 |
+
endpoint: bool = ...,
|
| 112 |
+
retstep: L[True] = ...,
|
| 113 |
+
dtype: DTypeLike = ...,
|
| 114 |
+
axis: SupportsIndex = ...,
|
| 115 |
+
*,
|
| 116 |
+
device: None | L["cpu"] = ...,
|
| 117 |
+
) -> tuple[NDArray[Any], Any]: ...
|
| 118 |
+
|
| 119 |
+
@overload
|
| 120 |
+
def logspace(
|
| 121 |
+
start: _ArrayLikeFloat_co,
|
| 122 |
+
stop: _ArrayLikeFloat_co,
|
| 123 |
+
num: SupportsIndex = ...,
|
| 124 |
+
endpoint: bool = ...,
|
| 125 |
+
base: _ArrayLikeFloat_co = ...,
|
| 126 |
+
dtype: None = ...,
|
| 127 |
+
axis: SupportsIndex = ...,
|
| 128 |
+
) -> NDArray[floating[Any]]: ...
|
| 129 |
+
@overload
|
| 130 |
+
def logspace(
|
| 131 |
+
start: _ArrayLikeComplex_co,
|
| 132 |
+
stop: _ArrayLikeComplex_co,
|
| 133 |
+
num: SupportsIndex = ...,
|
| 134 |
+
endpoint: bool = ...,
|
| 135 |
+
base: _ArrayLikeComplex_co = ...,
|
| 136 |
+
dtype: None = ...,
|
| 137 |
+
axis: SupportsIndex = ...,
|
| 138 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 139 |
+
@overload
|
| 140 |
+
def logspace(
|
| 141 |
+
start: _ArrayLikeComplex_co,
|
| 142 |
+
stop: _ArrayLikeComplex_co,
|
| 143 |
+
num: SupportsIndex = ...,
|
| 144 |
+
endpoint: bool = ...,
|
| 145 |
+
base: _ArrayLikeComplex_co = ...,
|
| 146 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 147 |
+
axis: SupportsIndex = ...,
|
| 148 |
+
) -> NDArray[_SCT]: ...
|
| 149 |
+
@overload
|
| 150 |
+
def logspace(
|
| 151 |
+
start: _ArrayLikeComplex_co,
|
| 152 |
+
stop: _ArrayLikeComplex_co,
|
| 153 |
+
num: SupportsIndex = ...,
|
| 154 |
+
endpoint: bool = ...,
|
| 155 |
+
base: _ArrayLikeComplex_co = ...,
|
| 156 |
+
dtype: DTypeLike = ...,
|
| 157 |
+
axis: SupportsIndex = ...,
|
| 158 |
+
) -> NDArray[Any]: ...
|
| 159 |
+
|
| 160 |
+
@overload
|
| 161 |
+
def geomspace(
|
| 162 |
+
start: _ArrayLikeFloat_co,
|
| 163 |
+
stop: _ArrayLikeFloat_co,
|
| 164 |
+
num: SupportsIndex = ...,
|
| 165 |
+
endpoint: bool = ...,
|
| 166 |
+
dtype: None = ...,
|
| 167 |
+
axis: SupportsIndex = ...,
|
| 168 |
+
) -> NDArray[floating[Any]]: ...
|
| 169 |
+
@overload
|
| 170 |
+
def geomspace(
|
| 171 |
+
start: _ArrayLikeComplex_co,
|
| 172 |
+
stop: _ArrayLikeComplex_co,
|
| 173 |
+
num: SupportsIndex = ...,
|
| 174 |
+
endpoint: bool = ...,
|
| 175 |
+
dtype: None = ...,
|
| 176 |
+
axis: SupportsIndex = ...,
|
| 177 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 178 |
+
@overload
|
| 179 |
+
def geomspace(
|
| 180 |
+
start: _ArrayLikeComplex_co,
|
| 181 |
+
stop: _ArrayLikeComplex_co,
|
| 182 |
+
num: SupportsIndex = ...,
|
| 183 |
+
endpoint: bool = ...,
|
| 184 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 185 |
+
axis: SupportsIndex = ...,
|
| 186 |
+
) -> NDArray[_SCT]: ...
|
| 187 |
+
@overload
|
| 188 |
+
def geomspace(
|
| 189 |
+
start: _ArrayLikeComplex_co,
|
| 190 |
+
stop: _ArrayLikeComplex_co,
|
| 191 |
+
num: SupportsIndex = ...,
|
| 192 |
+
endpoint: bool = ...,
|
| 193 |
+
dtype: DTypeLike = ...,
|
| 194 |
+
axis: SupportsIndex = ...,
|
| 195 |
+
) -> NDArray[Any]: ...
|
| 196 |
+
|
| 197 |
+
def add_newdoc(
|
| 198 |
+
place: str,
|
| 199 |
+
obj: str,
|
| 200 |
+
doc: str | tuple[str, str] | list[tuple[str, str]],
|
| 201 |
+
warn_on_python: bool = ...,
|
| 202 |
+
) -> None: ...
|
janus/lib/python3.10/site-packages/numpy/_core/getlimits.py
ADDED
|
@@ -0,0 +1,747 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Machine limits for Float32 and Float64 and (long double) if available...
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
__all__ = ['finfo', 'iinfo']
|
| 5 |
+
|
| 6 |
+
import types
|
| 7 |
+
import warnings
|
| 8 |
+
|
| 9 |
+
from .._utils import set_module
|
| 10 |
+
from ._machar import MachAr
|
| 11 |
+
from . import numeric
|
| 12 |
+
from . import numerictypes as ntypes
|
| 13 |
+
from .numeric import array, inf, nan
|
| 14 |
+
from .umath import log10, exp2, nextafter, isnan
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _fr0(a):
|
| 18 |
+
"""fix rank-0 --> rank-1"""
|
| 19 |
+
if a.ndim == 0:
|
| 20 |
+
a = a.copy()
|
| 21 |
+
a.shape = (1,)
|
| 22 |
+
return a
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _fr1(a):
|
| 26 |
+
"""fix rank > 0 --> rank-0"""
|
| 27 |
+
if a.size == 1:
|
| 28 |
+
a = a.copy()
|
| 29 |
+
a.shape = ()
|
| 30 |
+
return a
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class MachArLike:
|
| 34 |
+
""" Object to simulate MachAr instance """
|
| 35 |
+
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
|
| 36 |
+
ibeta, smallest_subnormal=None, **kwargs):
|
| 37 |
+
self.params = _MACHAR_PARAMS[ftype]
|
| 38 |
+
self.ftype = ftype
|
| 39 |
+
self.title = self.params['title']
|
| 40 |
+
# Parameter types same as for discovered MachAr object.
|
| 41 |
+
if not smallest_subnormal:
|
| 42 |
+
self._smallest_subnormal = nextafter(
|
| 43 |
+
self.ftype(0), self.ftype(1), dtype=self.ftype)
|
| 44 |
+
else:
|
| 45 |
+
self._smallest_subnormal = smallest_subnormal
|
| 46 |
+
self.epsilon = self.eps = self._float_to_float(eps)
|
| 47 |
+
self.epsneg = self._float_to_float(epsneg)
|
| 48 |
+
self.xmax = self.huge = self._float_to_float(huge)
|
| 49 |
+
self.xmin = self._float_to_float(tiny)
|
| 50 |
+
self.smallest_normal = self.tiny = self._float_to_float(tiny)
|
| 51 |
+
self.ibeta = self.params['itype'](ibeta)
|
| 52 |
+
self.__dict__.update(kwargs)
|
| 53 |
+
self.precision = int(-log10(self.eps))
|
| 54 |
+
self.resolution = self._float_to_float(
|
| 55 |
+
self._float_conv(10) ** (-self.precision))
|
| 56 |
+
self._str_eps = self._float_to_str(self.eps)
|
| 57 |
+
self._str_epsneg = self._float_to_str(self.epsneg)
|
| 58 |
+
self._str_xmin = self._float_to_str(self.xmin)
|
| 59 |
+
self._str_xmax = self._float_to_str(self.xmax)
|
| 60 |
+
self._str_resolution = self._float_to_str(self.resolution)
|
| 61 |
+
self._str_smallest_normal = self._float_to_str(self.xmin)
|
| 62 |
+
|
| 63 |
+
@property
|
| 64 |
+
def smallest_subnormal(self):
|
| 65 |
+
"""Return the value for the smallest subnormal.
|
| 66 |
+
|
| 67 |
+
Returns
|
| 68 |
+
-------
|
| 69 |
+
smallest_subnormal : float
|
| 70 |
+
value for the smallest subnormal.
|
| 71 |
+
|
| 72 |
+
Warns
|
| 73 |
+
-----
|
| 74 |
+
UserWarning
|
| 75 |
+
If the calculated value for the smallest subnormal is zero.
|
| 76 |
+
"""
|
| 77 |
+
# Check that the calculated value is not zero, in case it raises a
|
| 78 |
+
# warning.
|
| 79 |
+
value = self._smallest_subnormal
|
| 80 |
+
if self.ftype(0) == value:
|
| 81 |
+
warnings.warn(
|
| 82 |
+
'The value of the smallest subnormal for {} type '
|
| 83 |
+
'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
|
| 84 |
+
|
| 85 |
+
return self._float_to_float(value)
|
| 86 |
+
|
| 87 |
+
@property
|
| 88 |
+
def _str_smallest_subnormal(self):
|
| 89 |
+
"""Return the string representation of the smallest subnormal."""
|
| 90 |
+
return self._float_to_str(self.smallest_subnormal)
|
| 91 |
+
|
| 92 |
+
def _float_to_float(self, value):
|
| 93 |
+
"""Converts float to float.
|
| 94 |
+
|
| 95 |
+
Parameters
|
| 96 |
+
----------
|
| 97 |
+
value : float
|
| 98 |
+
value to be converted.
|
| 99 |
+
"""
|
| 100 |
+
return _fr1(self._float_conv(value))
|
| 101 |
+
|
| 102 |
+
def _float_conv(self, value):
|
| 103 |
+
"""Converts float to conv.
|
| 104 |
+
|
| 105 |
+
Parameters
|
| 106 |
+
----------
|
| 107 |
+
value : float
|
| 108 |
+
value to be converted.
|
| 109 |
+
"""
|
| 110 |
+
return array([value], self.ftype)
|
| 111 |
+
|
| 112 |
+
def _float_to_str(self, value):
|
| 113 |
+
"""Converts float to str.
|
| 114 |
+
|
| 115 |
+
Parameters
|
| 116 |
+
----------
|
| 117 |
+
value : float
|
| 118 |
+
value to be converted.
|
| 119 |
+
"""
|
| 120 |
+
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
_convert_to_float = {
|
| 124 |
+
ntypes.csingle: ntypes.single,
|
| 125 |
+
ntypes.complex128: ntypes.float64,
|
| 126 |
+
ntypes.clongdouble: ntypes.longdouble
|
| 127 |
+
}
|
| 128 |
+
|
| 129 |
+
# Parameters for creating MachAr / MachAr-like objects
|
| 130 |
+
_title_fmt = 'numpy {} precision floating point number'
|
| 131 |
+
_MACHAR_PARAMS = {
|
| 132 |
+
ntypes.double: dict(
|
| 133 |
+
itype = ntypes.int64,
|
| 134 |
+
fmt = '%24.16e',
|
| 135 |
+
title = _title_fmt.format('double')),
|
| 136 |
+
ntypes.single: dict(
|
| 137 |
+
itype = ntypes.int32,
|
| 138 |
+
fmt = '%15.7e',
|
| 139 |
+
title = _title_fmt.format('single')),
|
| 140 |
+
ntypes.longdouble: dict(
|
| 141 |
+
itype = ntypes.longlong,
|
| 142 |
+
fmt = '%s',
|
| 143 |
+
title = _title_fmt.format('long double')),
|
| 144 |
+
ntypes.half: dict(
|
| 145 |
+
itype = ntypes.int16,
|
| 146 |
+
fmt = '%12.5e',
|
| 147 |
+
title = _title_fmt.format('half'))}
|
| 148 |
+
|
| 149 |
+
# Key to identify the floating point type. Key is result of
|
| 150 |
+
#
|
| 151 |
+
# ftype = np.longdouble # or float64, float32, etc.
|
| 152 |
+
# v = (ftype(-1.0) / ftype(10.0))
|
| 153 |
+
# v.view(v.dtype.newbyteorder('<')).tobytes()
|
| 154 |
+
#
|
| 155 |
+
# Uses division to work around deficiencies in strtold on some platforms.
|
| 156 |
+
# See:
|
| 157 |
+
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
|
| 158 |
+
|
| 159 |
+
_KNOWN_TYPES = {}
|
| 160 |
+
def _register_type(machar, bytepat):
|
| 161 |
+
_KNOWN_TYPES[bytepat] = machar
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
_float_ma = {}
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def _register_known_types():
|
| 168 |
+
# Known parameters for float16
|
| 169 |
+
# See docstring of MachAr class for description of parameters.
|
| 170 |
+
f16 = ntypes.float16
|
| 171 |
+
float16_ma = MachArLike(f16,
|
| 172 |
+
machep=-10,
|
| 173 |
+
negep=-11,
|
| 174 |
+
minexp=-14,
|
| 175 |
+
maxexp=16,
|
| 176 |
+
it=10,
|
| 177 |
+
iexp=5,
|
| 178 |
+
ibeta=2,
|
| 179 |
+
irnd=5,
|
| 180 |
+
ngrd=0,
|
| 181 |
+
eps=exp2(f16(-10)),
|
| 182 |
+
epsneg=exp2(f16(-11)),
|
| 183 |
+
huge=f16(65504),
|
| 184 |
+
tiny=f16(2 ** -14))
|
| 185 |
+
_register_type(float16_ma, b'f\xae')
|
| 186 |
+
_float_ma[16] = float16_ma
|
| 187 |
+
|
| 188 |
+
# Known parameters for float32
|
| 189 |
+
f32 = ntypes.float32
|
| 190 |
+
float32_ma = MachArLike(f32,
|
| 191 |
+
machep=-23,
|
| 192 |
+
negep=-24,
|
| 193 |
+
minexp=-126,
|
| 194 |
+
maxexp=128,
|
| 195 |
+
it=23,
|
| 196 |
+
iexp=8,
|
| 197 |
+
ibeta=2,
|
| 198 |
+
irnd=5,
|
| 199 |
+
ngrd=0,
|
| 200 |
+
eps=exp2(f32(-23)),
|
| 201 |
+
epsneg=exp2(f32(-24)),
|
| 202 |
+
huge=f32((1 - 2 ** -24) * 2**128),
|
| 203 |
+
tiny=exp2(f32(-126)))
|
| 204 |
+
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
|
| 205 |
+
_float_ma[32] = float32_ma
|
| 206 |
+
|
| 207 |
+
# Known parameters for float64
|
| 208 |
+
f64 = ntypes.float64
|
| 209 |
+
epsneg_f64 = 2.0 ** -53.0
|
| 210 |
+
tiny_f64 = 2.0 ** -1022.0
|
| 211 |
+
float64_ma = MachArLike(f64,
|
| 212 |
+
machep=-52,
|
| 213 |
+
negep=-53,
|
| 214 |
+
minexp=-1022,
|
| 215 |
+
maxexp=1024,
|
| 216 |
+
it=52,
|
| 217 |
+
iexp=11,
|
| 218 |
+
ibeta=2,
|
| 219 |
+
irnd=5,
|
| 220 |
+
ngrd=0,
|
| 221 |
+
eps=2.0 ** -52.0,
|
| 222 |
+
epsneg=epsneg_f64,
|
| 223 |
+
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
|
| 224 |
+
tiny=tiny_f64)
|
| 225 |
+
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
| 226 |
+
_float_ma[64] = float64_ma
|
| 227 |
+
|
| 228 |
+
# Known parameters for IEEE 754 128-bit binary float
|
| 229 |
+
ld = ntypes.longdouble
|
| 230 |
+
epsneg_f128 = exp2(ld(-113))
|
| 231 |
+
tiny_f128 = exp2(ld(-16382))
|
| 232 |
+
# Ignore runtime error when this is not f128
|
| 233 |
+
with numeric.errstate(all='ignore'):
|
| 234 |
+
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
|
| 235 |
+
float128_ma = MachArLike(ld,
|
| 236 |
+
machep=-112,
|
| 237 |
+
negep=-113,
|
| 238 |
+
minexp=-16382,
|
| 239 |
+
maxexp=16384,
|
| 240 |
+
it=112,
|
| 241 |
+
iexp=15,
|
| 242 |
+
ibeta=2,
|
| 243 |
+
irnd=5,
|
| 244 |
+
ngrd=0,
|
| 245 |
+
eps=exp2(ld(-112)),
|
| 246 |
+
epsneg=epsneg_f128,
|
| 247 |
+
huge=huge_f128,
|
| 248 |
+
tiny=tiny_f128)
|
| 249 |
+
# IEEE 754 128-bit binary float
|
| 250 |
+
_register_type(float128_ma,
|
| 251 |
+
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
| 252 |
+
_float_ma[128] = float128_ma
|
| 253 |
+
|
| 254 |
+
# Known parameters for float80 (Intel 80-bit extended precision)
|
| 255 |
+
epsneg_f80 = exp2(ld(-64))
|
| 256 |
+
tiny_f80 = exp2(ld(-16382))
|
| 257 |
+
# Ignore runtime error when this is not f80
|
| 258 |
+
with numeric.errstate(all='ignore'):
|
| 259 |
+
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
|
| 260 |
+
float80_ma = MachArLike(ld,
|
| 261 |
+
machep=-63,
|
| 262 |
+
negep=-64,
|
| 263 |
+
minexp=-16382,
|
| 264 |
+
maxexp=16384,
|
| 265 |
+
it=63,
|
| 266 |
+
iexp=15,
|
| 267 |
+
ibeta=2,
|
| 268 |
+
irnd=5,
|
| 269 |
+
ngrd=0,
|
| 270 |
+
eps=exp2(ld(-63)),
|
| 271 |
+
epsneg=epsneg_f80,
|
| 272 |
+
huge=huge_f80,
|
| 273 |
+
tiny=tiny_f80)
|
| 274 |
+
# float80, first 10 bytes containing actual storage
|
| 275 |
+
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
|
| 276 |
+
_float_ma[80] = float80_ma
|
| 277 |
+
|
| 278 |
+
# Guessed / known parameters for double double; see:
|
| 279 |
+
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
|
| 280 |
+
# These numbers have the same exponent range as float64, but extended
|
| 281 |
+
# number of digits in the significand.
|
| 282 |
+
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
|
| 283 |
+
# As the smallest_normal in double double is so hard to calculate we set
|
| 284 |
+
# it to NaN.
|
| 285 |
+
smallest_normal_dd = nan
|
| 286 |
+
# Leave the same value for the smallest subnormal as double
|
| 287 |
+
smallest_subnormal_dd = ld(nextafter(0., 1.))
|
| 288 |
+
float_dd_ma = MachArLike(ld,
|
| 289 |
+
machep=-105,
|
| 290 |
+
negep=-106,
|
| 291 |
+
minexp=-1022,
|
| 292 |
+
maxexp=1024,
|
| 293 |
+
it=105,
|
| 294 |
+
iexp=11,
|
| 295 |
+
ibeta=2,
|
| 296 |
+
irnd=5,
|
| 297 |
+
ngrd=0,
|
| 298 |
+
eps=exp2(ld(-105)),
|
| 299 |
+
epsneg=exp2(ld(-106)),
|
| 300 |
+
huge=huge_dd,
|
| 301 |
+
tiny=smallest_normal_dd,
|
| 302 |
+
smallest_subnormal=smallest_subnormal_dd)
|
| 303 |
+
# double double; low, high order (e.g. PPC 64)
|
| 304 |
+
_register_type(float_dd_ma,
|
| 305 |
+
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
| 306 |
+
# double double; high, low order (e.g. PPC 64 le)
|
| 307 |
+
_register_type(float_dd_ma,
|
| 308 |
+
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
|
| 309 |
+
_float_ma['dd'] = float_dd_ma
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def _get_machar(ftype):
|
| 313 |
+
""" Get MachAr instance or MachAr-like instance
|
| 314 |
+
|
| 315 |
+
Get parameters for floating point type, by first trying signatures of
|
| 316 |
+
various known floating point types, then, if none match, attempting to
|
| 317 |
+
identify parameters by analysis.
|
| 318 |
+
|
| 319 |
+
Parameters
|
| 320 |
+
----------
|
| 321 |
+
ftype : class
|
| 322 |
+
Numpy floating point type class (e.g. ``np.float64``)
|
| 323 |
+
|
| 324 |
+
Returns
|
| 325 |
+
-------
|
| 326 |
+
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
|
| 327 |
+
Object giving floating point parameters for `ftype`.
|
| 328 |
+
|
| 329 |
+
Warns
|
| 330 |
+
-----
|
| 331 |
+
UserWarning
|
| 332 |
+
If the binary signature of the float type is not in the dictionary of
|
| 333 |
+
known float types.
|
| 334 |
+
"""
|
| 335 |
+
params = _MACHAR_PARAMS.get(ftype)
|
| 336 |
+
if params is None:
|
| 337 |
+
raise ValueError(repr(ftype))
|
| 338 |
+
# Detect known / suspected types
|
| 339 |
+
# ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
|
| 340 |
+
# may be deficient
|
| 341 |
+
key = (ftype(-1.0) / ftype(10.))
|
| 342 |
+
key = key.view(key.dtype.newbyteorder("<")).tobytes()
|
| 343 |
+
ma_like = None
|
| 344 |
+
if ftype == ntypes.longdouble:
|
| 345 |
+
# Could be 80 bit == 10 byte extended precision, where last bytes can
|
| 346 |
+
# be random garbage.
|
| 347 |
+
# Comparing first 10 bytes to pattern first to avoid branching on the
|
| 348 |
+
# random garbage.
|
| 349 |
+
ma_like = _KNOWN_TYPES.get(key[:10])
|
| 350 |
+
if ma_like is None:
|
| 351 |
+
# see if the full key is known.
|
| 352 |
+
ma_like = _KNOWN_TYPES.get(key)
|
| 353 |
+
if ma_like is None and len(key) == 16:
|
| 354 |
+
# machine limits could be f80 masquerading as np.float128,
|
| 355 |
+
# find all keys with length 16 and make new dict, but make the keys
|
| 356 |
+
# only 10 bytes long, the last bytes can be random garbage
|
| 357 |
+
_kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
|
| 358 |
+
ma_like = _kt.get(key[:10])
|
| 359 |
+
if ma_like is not None:
|
| 360 |
+
return ma_like
|
| 361 |
+
# Fall back to parameter discovery
|
| 362 |
+
warnings.warn(
|
| 363 |
+
f'Signature {key} for {ftype} does not match any known type: '
|
| 364 |
+
'falling back to type probe function.\n'
|
| 365 |
+
'This warnings indicates broken support for the dtype!',
|
| 366 |
+
UserWarning, stacklevel=2)
|
| 367 |
+
return _discovered_machar(ftype)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def _discovered_machar(ftype):
|
| 371 |
+
""" Create MachAr instance with found information on float types
|
| 372 |
+
|
| 373 |
+
TODO: MachAr should be retired completely ideally. We currently only
|
| 374 |
+
ever use it system with broken longdouble (valgrind, WSL).
|
| 375 |
+
"""
|
| 376 |
+
params = _MACHAR_PARAMS[ftype]
|
| 377 |
+
return MachAr(lambda v: array([v], ftype),
|
| 378 |
+
lambda v: _fr0(v.astype(params['itype']))[0],
|
| 379 |
+
lambda v: array(_fr0(v)[0], ftype),
|
| 380 |
+
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
|
| 381 |
+
params['title'])
|
| 382 |
+
|
| 383 |
+
|
| 384 |
+
@set_module('numpy')
|
| 385 |
+
class finfo:
|
| 386 |
+
"""
|
| 387 |
+
finfo(dtype)
|
| 388 |
+
|
| 389 |
+
Machine limits for floating point types.
|
| 390 |
+
|
| 391 |
+
Attributes
|
| 392 |
+
----------
|
| 393 |
+
bits : int
|
| 394 |
+
The number of bits occupied by the type.
|
| 395 |
+
dtype : dtype
|
| 396 |
+
Returns the dtype for which `finfo` returns information. For complex
|
| 397 |
+
input, the returned dtype is the associated ``float*`` dtype for its
|
| 398 |
+
real and complex components.
|
| 399 |
+
eps : float
|
| 400 |
+
The difference between 1.0 and the next smallest representable float
|
| 401 |
+
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
| 402 |
+
standard, ``eps = 2**-52``, approximately 2.22e-16.
|
| 403 |
+
epsneg : float
|
| 404 |
+
The difference between 1.0 and the next smallest representable float
|
| 405 |
+
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
| 406 |
+
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
|
| 407 |
+
iexp : int
|
| 408 |
+
The number of bits in the exponent portion of the floating point
|
| 409 |
+
representation.
|
| 410 |
+
machep : int
|
| 411 |
+
The exponent that yields `eps`.
|
| 412 |
+
max : floating point number of the appropriate type
|
| 413 |
+
The largest representable number.
|
| 414 |
+
maxexp : int
|
| 415 |
+
The smallest positive power of the base (2) that causes overflow.
|
| 416 |
+
min : floating point number of the appropriate type
|
| 417 |
+
The smallest representable number, typically ``-max``.
|
| 418 |
+
minexp : int
|
| 419 |
+
The most negative power of the base (2) consistent with there
|
| 420 |
+
being no leading 0's in the mantissa.
|
| 421 |
+
negep : int
|
| 422 |
+
The exponent that yields `epsneg`.
|
| 423 |
+
nexp : int
|
| 424 |
+
The number of bits in the exponent including its sign and bias.
|
| 425 |
+
nmant : int
|
| 426 |
+
The number of bits in the mantissa.
|
| 427 |
+
precision : int
|
| 428 |
+
The approximate number of decimal digits to which this kind of
|
| 429 |
+
float is precise.
|
| 430 |
+
resolution : floating point number of the appropriate type
|
| 431 |
+
The approximate decimal resolution of this type, i.e.,
|
| 432 |
+
``10**-precision``.
|
| 433 |
+
tiny : float
|
| 434 |
+
An alias for `smallest_normal`, kept for backwards compatibility.
|
| 435 |
+
smallest_normal : float
|
| 436 |
+
The smallest positive floating point number with 1 as leading bit in
|
| 437 |
+
the mantissa following IEEE-754 (see Notes).
|
| 438 |
+
smallest_subnormal : float
|
| 439 |
+
The smallest positive floating point number with 0 as leading bit in
|
| 440 |
+
the mantissa following IEEE-754.
|
| 441 |
+
|
| 442 |
+
Parameters
|
| 443 |
+
----------
|
| 444 |
+
dtype : float, dtype, or instance
|
| 445 |
+
Kind of floating point or complex floating point
|
| 446 |
+
data-type about which to get information.
|
| 447 |
+
|
| 448 |
+
See Also
|
| 449 |
+
--------
|
| 450 |
+
iinfo : The equivalent for integer data types.
|
| 451 |
+
spacing : The distance between a value and the nearest adjacent number
|
| 452 |
+
nextafter : The next floating point value after x1 towards x2
|
| 453 |
+
|
| 454 |
+
Notes
|
| 455 |
+
-----
|
| 456 |
+
For developers of NumPy: do not instantiate this at the module level.
|
| 457 |
+
The initial calculation of these parameters is expensive and negatively
|
| 458 |
+
impacts import times. These objects are cached, so calling ``finfo()``
|
| 459 |
+
repeatedly inside your functions is not a problem.
|
| 460 |
+
|
| 461 |
+
Note that ``smallest_normal`` is not actually the smallest positive
|
| 462 |
+
representable value in a NumPy floating point type. As in the IEEE-754
|
| 463 |
+
standard [1]_, NumPy floating point types make use of subnormal numbers to
|
| 464 |
+
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
|
| 465 |
+
may have significantly reduced precision [2]_.
|
| 466 |
+
|
| 467 |
+
This function can also be used for complex data types as well. If used,
|
| 468 |
+
the output will be the same as the corresponding real float type
|
| 469 |
+
(e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
|
| 470 |
+
However, the output is true for the real and imaginary components.
|
| 471 |
+
|
| 472 |
+
References
|
| 473 |
+
----------
|
| 474 |
+
.. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
|
| 475 |
+
pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935
|
| 476 |
+
.. [2] Wikipedia, "Denormal Numbers",
|
| 477 |
+
https://en.wikipedia.org/wiki/Denormal_number
|
| 478 |
+
|
| 479 |
+
Examples
|
| 480 |
+
--------
|
| 481 |
+
>>> import numpy as np
|
| 482 |
+
>>> np.finfo(np.float64).dtype
|
| 483 |
+
dtype('float64')
|
| 484 |
+
>>> np.finfo(np.complex64).dtype
|
| 485 |
+
dtype('float32')
|
| 486 |
+
|
| 487 |
+
"""
|
| 488 |
+
|
| 489 |
+
_finfo_cache = {}
|
| 490 |
+
|
| 491 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
| 492 |
+
|
| 493 |
+
def __new__(cls, dtype):
|
| 494 |
+
try:
|
| 495 |
+
obj = cls._finfo_cache.get(dtype) # most common path
|
| 496 |
+
if obj is not None:
|
| 497 |
+
return obj
|
| 498 |
+
except TypeError:
|
| 499 |
+
pass
|
| 500 |
+
|
| 501 |
+
if dtype is None:
|
| 502 |
+
# Deprecated in NumPy 1.25, 2023-01-16
|
| 503 |
+
warnings.warn(
|
| 504 |
+
"finfo() dtype cannot be None. This behavior will "
|
| 505 |
+
"raise an error in the future. (Deprecated in NumPy 1.25)",
|
| 506 |
+
DeprecationWarning,
|
| 507 |
+
stacklevel=2
|
| 508 |
+
)
|
| 509 |
+
|
| 510 |
+
try:
|
| 511 |
+
dtype = numeric.dtype(dtype)
|
| 512 |
+
except TypeError:
|
| 513 |
+
# In case a float instance was given
|
| 514 |
+
dtype = numeric.dtype(type(dtype))
|
| 515 |
+
|
| 516 |
+
obj = cls._finfo_cache.get(dtype)
|
| 517 |
+
if obj is not None:
|
| 518 |
+
return obj
|
| 519 |
+
dtypes = [dtype]
|
| 520 |
+
newdtype = ntypes.obj2sctype(dtype)
|
| 521 |
+
if newdtype is not dtype:
|
| 522 |
+
dtypes.append(newdtype)
|
| 523 |
+
dtype = newdtype
|
| 524 |
+
if not issubclass(dtype, numeric.inexact):
|
| 525 |
+
raise ValueError("data type %r not inexact" % (dtype))
|
| 526 |
+
obj = cls._finfo_cache.get(dtype)
|
| 527 |
+
if obj is not None:
|
| 528 |
+
return obj
|
| 529 |
+
if not issubclass(dtype, numeric.floating):
|
| 530 |
+
newdtype = _convert_to_float[dtype]
|
| 531 |
+
if newdtype is not dtype:
|
| 532 |
+
# dtype changed, for example from complex128 to float64
|
| 533 |
+
dtypes.append(newdtype)
|
| 534 |
+
dtype = newdtype
|
| 535 |
+
|
| 536 |
+
obj = cls._finfo_cache.get(dtype, None)
|
| 537 |
+
if obj is not None:
|
| 538 |
+
# the original dtype was not in the cache, but the new
|
| 539 |
+
# dtype is in the cache. we add the original dtypes to
|
| 540 |
+
# the cache and return the result
|
| 541 |
+
for dt in dtypes:
|
| 542 |
+
cls._finfo_cache[dt] = obj
|
| 543 |
+
return obj
|
| 544 |
+
obj = object.__new__(cls)._init(dtype)
|
| 545 |
+
for dt in dtypes:
|
| 546 |
+
cls._finfo_cache[dt] = obj
|
| 547 |
+
return obj
|
| 548 |
+
|
| 549 |
+
def _init(self, dtype):
|
| 550 |
+
self.dtype = numeric.dtype(dtype)
|
| 551 |
+
machar = _get_machar(dtype)
|
| 552 |
+
|
| 553 |
+
for word in ['precision', 'iexp',
|
| 554 |
+
'maxexp', 'minexp', 'negep',
|
| 555 |
+
'machep']:
|
| 556 |
+
setattr(self, word, getattr(machar, word))
|
| 557 |
+
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
|
| 558 |
+
setattr(self, word, getattr(machar, word).flat[0])
|
| 559 |
+
self.bits = self.dtype.itemsize * 8
|
| 560 |
+
self.max = machar.huge.flat[0]
|
| 561 |
+
self.min = -self.max
|
| 562 |
+
self.eps = machar.eps.flat[0]
|
| 563 |
+
self.nexp = machar.iexp
|
| 564 |
+
self.nmant = machar.it
|
| 565 |
+
self._machar = machar
|
| 566 |
+
self._str_tiny = machar._str_xmin.strip()
|
| 567 |
+
self._str_max = machar._str_xmax.strip()
|
| 568 |
+
self._str_epsneg = machar._str_epsneg.strip()
|
| 569 |
+
self._str_eps = machar._str_eps.strip()
|
| 570 |
+
self._str_resolution = machar._str_resolution.strip()
|
| 571 |
+
self._str_smallest_normal = machar._str_smallest_normal.strip()
|
| 572 |
+
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
|
| 573 |
+
return self
|
| 574 |
+
|
| 575 |
+
def __str__(self):
|
| 576 |
+
fmt = (
|
| 577 |
+
'Machine parameters for %(dtype)s\n'
|
| 578 |
+
'---------------------------------------------------------------\n'
|
| 579 |
+
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
|
| 580 |
+
'machep = %(machep)6s eps = %(_str_eps)s\n'
|
| 581 |
+
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
|
| 582 |
+
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
|
| 583 |
+
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
|
| 584 |
+
'nexp = %(nexp)6s min = -max\n'
|
| 585 |
+
'smallest_normal = %(_str_smallest_normal)s '
|
| 586 |
+
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
|
| 587 |
+
'---------------------------------------------------------------\n'
|
| 588 |
+
)
|
| 589 |
+
return fmt % self.__dict__
|
| 590 |
+
|
| 591 |
+
def __repr__(self):
|
| 592 |
+
c = self.__class__.__name__
|
| 593 |
+
d = self.__dict__.copy()
|
| 594 |
+
d['klass'] = c
|
| 595 |
+
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
|
| 596 |
+
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
|
| 597 |
+
|
| 598 |
+
@property
|
| 599 |
+
def smallest_normal(self):
|
| 600 |
+
"""Return the value for the smallest normal.
|
| 601 |
+
|
| 602 |
+
Returns
|
| 603 |
+
-------
|
| 604 |
+
smallest_normal : float
|
| 605 |
+
Value for the smallest normal.
|
| 606 |
+
|
| 607 |
+
Warns
|
| 608 |
+
-----
|
| 609 |
+
UserWarning
|
| 610 |
+
If the calculated value for the smallest normal is requested for
|
| 611 |
+
double-double.
|
| 612 |
+
"""
|
| 613 |
+
# This check is necessary because the value for smallest_normal is
|
| 614 |
+
# platform dependent for longdouble types.
|
| 615 |
+
if isnan(self._machar.smallest_normal.flat[0]):
|
| 616 |
+
warnings.warn(
|
| 617 |
+
'The value of smallest normal is undefined for double double',
|
| 618 |
+
UserWarning, stacklevel=2)
|
| 619 |
+
return self._machar.smallest_normal.flat[0]
|
| 620 |
+
|
| 621 |
+
@property
|
| 622 |
+
def tiny(self):
|
| 623 |
+
"""Return the value for tiny, alias of smallest_normal.
|
| 624 |
+
|
| 625 |
+
Returns
|
| 626 |
+
-------
|
| 627 |
+
tiny : float
|
| 628 |
+
Value for the smallest normal, alias of smallest_normal.
|
| 629 |
+
|
| 630 |
+
Warns
|
| 631 |
+
-----
|
| 632 |
+
UserWarning
|
| 633 |
+
If the calculated value for the smallest normal is requested for
|
| 634 |
+
double-double.
|
| 635 |
+
"""
|
| 636 |
+
return self.smallest_normal
|
| 637 |
+
|
| 638 |
+
|
| 639 |
+
@set_module('numpy')
|
| 640 |
+
class iinfo:
|
| 641 |
+
"""
|
| 642 |
+
iinfo(type)
|
| 643 |
+
|
| 644 |
+
Machine limits for integer types.
|
| 645 |
+
|
| 646 |
+
Attributes
|
| 647 |
+
----------
|
| 648 |
+
bits : int
|
| 649 |
+
The number of bits occupied by the type.
|
| 650 |
+
dtype : dtype
|
| 651 |
+
Returns the dtype for which `iinfo` returns information.
|
| 652 |
+
min : int
|
| 653 |
+
The smallest integer expressible by the type.
|
| 654 |
+
max : int
|
| 655 |
+
The largest integer expressible by the type.
|
| 656 |
+
|
| 657 |
+
Parameters
|
| 658 |
+
----------
|
| 659 |
+
int_type : integer type, dtype, or instance
|
| 660 |
+
The kind of integer data type to get information about.
|
| 661 |
+
|
| 662 |
+
See Also
|
| 663 |
+
--------
|
| 664 |
+
finfo : The equivalent for floating point data types.
|
| 665 |
+
|
| 666 |
+
Examples
|
| 667 |
+
--------
|
| 668 |
+
With types:
|
| 669 |
+
|
| 670 |
+
>>> import numpy as np
|
| 671 |
+
>>> ii16 = np.iinfo(np.int16)
|
| 672 |
+
>>> ii16.min
|
| 673 |
+
-32768
|
| 674 |
+
>>> ii16.max
|
| 675 |
+
32767
|
| 676 |
+
>>> ii32 = np.iinfo(np.int32)
|
| 677 |
+
>>> ii32.min
|
| 678 |
+
-2147483648
|
| 679 |
+
>>> ii32.max
|
| 680 |
+
2147483647
|
| 681 |
+
|
| 682 |
+
With instances:
|
| 683 |
+
|
| 684 |
+
>>> ii32 = np.iinfo(np.int32(10))
|
| 685 |
+
>>> ii32.min
|
| 686 |
+
-2147483648
|
| 687 |
+
>>> ii32.max
|
| 688 |
+
2147483647
|
| 689 |
+
|
| 690 |
+
"""
|
| 691 |
+
|
| 692 |
+
_min_vals = {}
|
| 693 |
+
_max_vals = {}
|
| 694 |
+
|
| 695 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
| 696 |
+
|
| 697 |
+
def __init__(self, int_type):
|
| 698 |
+
try:
|
| 699 |
+
self.dtype = numeric.dtype(int_type)
|
| 700 |
+
except TypeError:
|
| 701 |
+
self.dtype = numeric.dtype(type(int_type))
|
| 702 |
+
self.kind = self.dtype.kind
|
| 703 |
+
self.bits = self.dtype.itemsize * 8
|
| 704 |
+
self.key = "%s%d" % (self.kind, self.bits)
|
| 705 |
+
if self.kind not in 'iu':
|
| 706 |
+
raise ValueError("Invalid integer data type %r." % (self.kind,))
|
| 707 |
+
|
| 708 |
+
@property
|
| 709 |
+
def min(self):
|
| 710 |
+
"""Minimum value of given dtype."""
|
| 711 |
+
if self.kind == 'u':
|
| 712 |
+
return 0
|
| 713 |
+
else:
|
| 714 |
+
try:
|
| 715 |
+
val = iinfo._min_vals[self.key]
|
| 716 |
+
except KeyError:
|
| 717 |
+
val = int(-(1 << (self.bits-1)))
|
| 718 |
+
iinfo._min_vals[self.key] = val
|
| 719 |
+
return val
|
| 720 |
+
|
| 721 |
+
@property
|
| 722 |
+
def max(self):
|
| 723 |
+
"""Maximum value of given dtype."""
|
| 724 |
+
try:
|
| 725 |
+
val = iinfo._max_vals[self.key]
|
| 726 |
+
except KeyError:
|
| 727 |
+
if self.kind == 'u':
|
| 728 |
+
val = int((1 << self.bits) - 1)
|
| 729 |
+
else:
|
| 730 |
+
val = int((1 << (self.bits-1)) - 1)
|
| 731 |
+
iinfo._max_vals[self.key] = val
|
| 732 |
+
return val
|
| 733 |
+
|
| 734 |
+
def __str__(self):
|
| 735 |
+
"""String representation."""
|
| 736 |
+
fmt = (
|
| 737 |
+
'Machine parameters for %(dtype)s\n'
|
| 738 |
+
'---------------------------------------------------------------\n'
|
| 739 |
+
'min = %(min)s\n'
|
| 740 |
+
'max = %(max)s\n'
|
| 741 |
+
'---------------------------------------------------------------\n'
|
| 742 |
+
)
|
| 743 |
+
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
|
| 744 |
+
|
| 745 |
+
def __repr__(self):
|
| 746 |
+
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
|
| 747 |
+
self.min, self.max, self.dtype)
|
janus/lib/python3.10/site-packages/numpy/_core/getlimits.pyi
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy import finfo, iinfo
|
| 2 |
+
|
| 3 |
+
__all__ = ["finfo", "iinfo"]
|
janus/lib/python3.10/site-packages/numpy/_core/memmap.pyi
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy import memmap
|
| 2 |
+
|
| 3 |
+
__all__ = ["memmap"]
|
janus/lib/python3.10/site-packages/numpy/_core/multiarray.pyi
ADDED
|
@@ -0,0 +1,1348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TODO: Sort out any and all missing functions in this namespace
|
| 2 |
+
import datetime as dt
|
| 3 |
+
from _typeshed import StrOrBytesPath, SupportsLenAndGetItem
|
| 4 |
+
from collections.abc import Sequence, Callable, Iterable
|
| 5 |
+
from typing import (
|
| 6 |
+
Literal as L,
|
| 7 |
+
Any,
|
| 8 |
+
TypeAlias,
|
| 9 |
+
overload,
|
| 10 |
+
TypeVar,
|
| 11 |
+
TypedDict,
|
| 12 |
+
SupportsIndex,
|
| 13 |
+
final,
|
| 14 |
+
Final,
|
| 15 |
+
Protocol,
|
| 16 |
+
ClassVar,
|
| 17 |
+
type_check_only,
|
| 18 |
+
)
|
| 19 |
+
from typing_extensions import CapsuleType, Unpack
|
| 20 |
+
|
| 21 |
+
import numpy as np
|
| 22 |
+
from numpy import ( # type: ignore[attr-defined]
|
| 23 |
+
# Re-exports
|
| 24 |
+
busdaycalendar,
|
| 25 |
+
broadcast,
|
| 26 |
+
correlate,
|
| 27 |
+
count_nonzero,
|
| 28 |
+
dtype,
|
| 29 |
+
einsum as c_einsum,
|
| 30 |
+
flatiter,
|
| 31 |
+
from_dlpack,
|
| 32 |
+
interp,
|
| 33 |
+
matmul,
|
| 34 |
+
ndarray,
|
| 35 |
+
nditer,
|
| 36 |
+
vecdot,
|
| 37 |
+
|
| 38 |
+
# The rest
|
| 39 |
+
ufunc,
|
| 40 |
+
str_,
|
| 41 |
+
uint8,
|
| 42 |
+
intp,
|
| 43 |
+
int_,
|
| 44 |
+
float64,
|
| 45 |
+
timedelta64,
|
| 46 |
+
datetime64,
|
| 47 |
+
generic,
|
| 48 |
+
unsignedinteger,
|
| 49 |
+
signedinteger,
|
| 50 |
+
floating,
|
| 51 |
+
complexfloating,
|
| 52 |
+
_OrderKACF,
|
| 53 |
+
_OrderCF,
|
| 54 |
+
_CastingKind,
|
| 55 |
+
_ModeKind,
|
| 56 |
+
_SupportsBuffer,
|
| 57 |
+
_SupportsFileMethods,
|
| 58 |
+
_CopyMode,
|
| 59 |
+
_NDIterFlagsKind,
|
| 60 |
+
_NDIterFlagsOp,
|
| 61 |
+
)
|
| 62 |
+
from numpy.lib._array_utils_impl import normalize_axis_index
|
| 63 |
+
|
| 64 |
+
from numpy._typing import (
|
| 65 |
+
# Shapes
|
| 66 |
+
_ShapeLike,
|
| 67 |
+
|
| 68 |
+
# DTypes
|
| 69 |
+
DTypeLike,
|
| 70 |
+
_DTypeLike,
|
| 71 |
+
_SupportsDType,
|
| 72 |
+
|
| 73 |
+
# Arrays
|
| 74 |
+
NDArray,
|
| 75 |
+
ArrayLike,
|
| 76 |
+
_ArrayLike,
|
| 77 |
+
_SupportsArrayFunc,
|
| 78 |
+
_NestedSequence,
|
| 79 |
+
_ArrayLikeBool_co,
|
| 80 |
+
_ArrayLikeUInt_co,
|
| 81 |
+
_ArrayLikeInt_co,
|
| 82 |
+
_ArrayLikeFloat_co,
|
| 83 |
+
_ArrayLikeComplex_co,
|
| 84 |
+
_ArrayLikeTD64_co,
|
| 85 |
+
_ArrayLikeDT64_co,
|
| 86 |
+
_ArrayLikeObject_co,
|
| 87 |
+
_ArrayLikeStr_co,
|
| 88 |
+
_ArrayLikeBytes_co,
|
| 89 |
+
_ScalarLike_co,
|
| 90 |
+
_IntLike_co,
|
| 91 |
+
_FloatLike_co,
|
| 92 |
+
_TD64Like_co,
|
| 93 |
+
)
|
| 94 |
+
from numpy._typing._ufunc import (
|
| 95 |
+
_2PTuple,
|
| 96 |
+
_PyFunc_Nin1_Nout1,
|
| 97 |
+
_PyFunc_Nin2_Nout1,
|
| 98 |
+
_PyFunc_Nin3P_Nout1,
|
| 99 |
+
_PyFunc_Nin1P_Nout2P,
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
__all__ = [
|
| 103 |
+
"_ARRAY_API",
|
| 104 |
+
"ALLOW_THREADS",
|
| 105 |
+
"BUFSIZE",
|
| 106 |
+
"CLIP",
|
| 107 |
+
"DATETIMEUNITS",
|
| 108 |
+
"ITEM_HASOBJECT",
|
| 109 |
+
"ITEM_IS_POINTER",
|
| 110 |
+
"LIST_PICKLE",
|
| 111 |
+
"MAXDIMS",
|
| 112 |
+
"MAY_SHARE_BOUNDS",
|
| 113 |
+
"MAY_SHARE_EXACT",
|
| 114 |
+
"NEEDS_INIT",
|
| 115 |
+
"NEEDS_PYAPI",
|
| 116 |
+
"RAISE",
|
| 117 |
+
"USE_GETITEM",
|
| 118 |
+
"USE_SETITEM",
|
| 119 |
+
"WRAP",
|
| 120 |
+
"_flagdict",
|
| 121 |
+
"from_dlpack",
|
| 122 |
+
"_place",
|
| 123 |
+
"_reconstruct",
|
| 124 |
+
"_vec_string",
|
| 125 |
+
"_monotonicity",
|
| 126 |
+
"add_docstring",
|
| 127 |
+
"arange",
|
| 128 |
+
"array",
|
| 129 |
+
"asarray",
|
| 130 |
+
"asanyarray",
|
| 131 |
+
"ascontiguousarray",
|
| 132 |
+
"asfortranarray",
|
| 133 |
+
"bincount",
|
| 134 |
+
"broadcast",
|
| 135 |
+
"busday_count",
|
| 136 |
+
"busday_offset",
|
| 137 |
+
"busdaycalendar",
|
| 138 |
+
"can_cast",
|
| 139 |
+
"compare_chararrays",
|
| 140 |
+
"concatenate",
|
| 141 |
+
"copyto",
|
| 142 |
+
"correlate",
|
| 143 |
+
"correlate2",
|
| 144 |
+
"count_nonzero",
|
| 145 |
+
"c_einsum",
|
| 146 |
+
"datetime_as_string",
|
| 147 |
+
"datetime_data",
|
| 148 |
+
"dot",
|
| 149 |
+
"dragon4_positional",
|
| 150 |
+
"dragon4_scientific",
|
| 151 |
+
"dtype",
|
| 152 |
+
"empty",
|
| 153 |
+
"empty_like",
|
| 154 |
+
"error",
|
| 155 |
+
"flagsobj",
|
| 156 |
+
"flatiter",
|
| 157 |
+
"format_longfloat",
|
| 158 |
+
"frombuffer",
|
| 159 |
+
"fromfile",
|
| 160 |
+
"fromiter",
|
| 161 |
+
"fromstring",
|
| 162 |
+
"get_handler_name",
|
| 163 |
+
"get_handler_version",
|
| 164 |
+
"inner",
|
| 165 |
+
"interp",
|
| 166 |
+
"interp_complex",
|
| 167 |
+
"is_busday",
|
| 168 |
+
"lexsort",
|
| 169 |
+
"matmul",
|
| 170 |
+
"vecdot",
|
| 171 |
+
"may_share_memory",
|
| 172 |
+
"min_scalar_type",
|
| 173 |
+
"ndarray",
|
| 174 |
+
"nditer",
|
| 175 |
+
"nested_iters",
|
| 176 |
+
"normalize_axis_index",
|
| 177 |
+
"packbits",
|
| 178 |
+
"promote_types",
|
| 179 |
+
"putmask",
|
| 180 |
+
"ravel_multi_index",
|
| 181 |
+
"result_type",
|
| 182 |
+
"scalar",
|
| 183 |
+
"set_datetimeparse_function",
|
| 184 |
+
"set_typeDict",
|
| 185 |
+
"shares_memory",
|
| 186 |
+
"typeinfo",
|
| 187 |
+
"unpackbits",
|
| 188 |
+
"unravel_index",
|
| 189 |
+
"vdot",
|
| 190 |
+
"where",
|
| 191 |
+
"zeros",
|
| 192 |
+
]
|
| 193 |
+
|
| 194 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
| 195 |
+
_T_contra = TypeVar("_T_contra", contravariant=True)
|
| 196 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 197 |
+
_DType = TypeVar("_DType", bound=np.dtype[Any])
|
| 198 |
+
_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
|
| 199 |
+
_ArrayType_co = TypeVar(
|
| 200 |
+
"_ArrayType_co",
|
| 201 |
+
bound=ndarray[Any, Any],
|
| 202 |
+
covariant=True,
|
| 203 |
+
)
|
| 204 |
+
_ReturnType = TypeVar("_ReturnType")
|
| 205 |
+
_IDType = TypeVar("_IDType")
|
| 206 |
+
_Nin = TypeVar("_Nin", bound=int)
|
| 207 |
+
_Nout = TypeVar("_Nout", bound=int)
|
| 208 |
+
|
| 209 |
+
_SizeType = TypeVar("_SizeType", bound=int)
|
| 210 |
+
_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...])
|
| 211 |
+
_1DArray: TypeAlias = ndarray[tuple[_SizeType], dtype[_SCT]]
|
| 212 |
+
_Array: TypeAlias = ndarray[_ShapeType, dtype[_SCT]]
|
| 213 |
+
|
| 214 |
+
# Valid time units
|
| 215 |
+
_UnitKind: TypeAlias = L[
|
| 216 |
+
"Y",
|
| 217 |
+
"M",
|
| 218 |
+
"D",
|
| 219 |
+
"h",
|
| 220 |
+
"m",
|
| 221 |
+
"s",
|
| 222 |
+
"ms",
|
| 223 |
+
"us", "μs",
|
| 224 |
+
"ns",
|
| 225 |
+
"ps",
|
| 226 |
+
"fs",
|
| 227 |
+
"as",
|
| 228 |
+
]
|
| 229 |
+
_RollKind: TypeAlias = L[ # `raise` is deliberately excluded
|
| 230 |
+
"nat",
|
| 231 |
+
"forward",
|
| 232 |
+
"following",
|
| 233 |
+
"backward",
|
| 234 |
+
"preceding",
|
| 235 |
+
"modifiedfollowing",
|
| 236 |
+
"modifiedpreceding",
|
| 237 |
+
]
|
| 238 |
+
|
| 239 |
+
@type_check_only
|
| 240 |
+
class _SupportsArray(Protocol[_ArrayType_co]):
|
| 241 |
+
def __array__(self, /) -> _ArrayType_co: ...
|
| 242 |
+
|
| 243 |
+
@type_check_only
|
| 244 |
+
class _KwargsEmpty(TypedDict, total=False):
|
| 245 |
+
device: None | L["cpu"]
|
| 246 |
+
like: None | _SupportsArrayFunc
|
| 247 |
+
|
| 248 |
+
@type_check_only
|
| 249 |
+
class _ConstructorEmpty(Protocol):
|
| 250 |
+
# 1-D shape
|
| 251 |
+
@overload
|
| 252 |
+
def __call__(
|
| 253 |
+
self, /,
|
| 254 |
+
shape: _SizeType,
|
| 255 |
+
dtype: None = ...,
|
| 256 |
+
order: _OrderCF = ...,
|
| 257 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 258 |
+
) -> _Array[tuple[_SizeType], float64]: ...
|
| 259 |
+
@overload
|
| 260 |
+
def __call__(
|
| 261 |
+
self, /,
|
| 262 |
+
shape: _SizeType,
|
| 263 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 264 |
+
order: _OrderCF = ...,
|
| 265 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 266 |
+
) -> ndarray[tuple[_SizeType], _DType]: ...
|
| 267 |
+
@overload
|
| 268 |
+
def __call__(
|
| 269 |
+
self, /,
|
| 270 |
+
shape: _SizeType,
|
| 271 |
+
dtype: type[_SCT],
|
| 272 |
+
order: _OrderCF = ...,
|
| 273 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 274 |
+
) -> _Array[tuple[_SizeType], _SCT]: ...
|
| 275 |
+
@overload
|
| 276 |
+
def __call__(
|
| 277 |
+
self, /,
|
| 278 |
+
shape: _SizeType,
|
| 279 |
+
dtype: DTypeLike,
|
| 280 |
+
order: _OrderCF = ...,
|
| 281 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 282 |
+
) -> _Array[tuple[_SizeType], Any]: ...
|
| 283 |
+
|
| 284 |
+
# known shape
|
| 285 |
+
@overload
|
| 286 |
+
def __call__(
|
| 287 |
+
self, /,
|
| 288 |
+
shape: _ShapeType,
|
| 289 |
+
dtype: None = ...,
|
| 290 |
+
order: _OrderCF = ...,
|
| 291 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 292 |
+
) -> _Array[_ShapeType, float64]: ...
|
| 293 |
+
@overload
|
| 294 |
+
def __call__(
|
| 295 |
+
self, /,
|
| 296 |
+
shape: _ShapeType,
|
| 297 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 298 |
+
order: _OrderCF = ...,
|
| 299 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 300 |
+
) -> ndarray[_ShapeType, _DType]: ...
|
| 301 |
+
@overload
|
| 302 |
+
def __call__(
|
| 303 |
+
self, /,
|
| 304 |
+
shape: _ShapeType,
|
| 305 |
+
dtype: type[_SCT],
|
| 306 |
+
order: _OrderCF = ...,
|
| 307 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 308 |
+
) -> _Array[_ShapeType, _SCT]: ...
|
| 309 |
+
@overload
|
| 310 |
+
def __call__(
|
| 311 |
+
self, /,
|
| 312 |
+
shape: _ShapeType,
|
| 313 |
+
dtype: DTypeLike,
|
| 314 |
+
order: _OrderCF = ...,
|
| 315 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 316 |
+
) -> _Array[_ShapeType, Any]: ...
|
| 317 |
+
|
| 318 |
+
# unknown shape
|
| 319 |
+
@overload
|
| 320 |
+
def __call__(
|
| 321 |
+
self, /,
|
| 322 |
+
shape: _ShapeLike,
|
| 323 |
+
dtype: None = ...,
|
| 324 |
+
order: _OrderCF = ...,
|
| 325 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 326 |
+
) -> NDArray[float64]: ...
|
| 327 |
+
@overload
|
| 328 |
+
def __call__(
|
| 329 |
+
self, /,
|
| 330 |
+
shape: _ShapeLike,
|
| 331 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 332 |
+
order: _OrderCF = ...,
|
| 333 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 334 |
+
) -> ndarray[Any, _DType]: ...
|
| 335 |
+
@overload
|
| 336 |
+
def __call__(
|
| 337 |
+
self, /,
|
| 338 |
+
shape: _ShapeLike,
|
| 339 |
+
dtype: type[_SCT],
|
| 340 |
+
order: _OrderCF = ...,
|
| 341 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 342 |
+
) -> NDArray[_SCT]: ...
|
| 343 |
+
@overload
|
| 344 |
+
def __call__(
|
| 345 |
+
self, /,
|
| 346 |
+
shape: _ShapeLike,
|
| 347 |
+
dtype: DTypeLike,
|
| 348 |
+
order: _OrderCF = ...,
|
| 349 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 350 |
+
) -> NDArray[Any]: ...
|
| 351 |
+
|
| 352 |
+
error: Final = Exception
|
| 353 |
+
|
| 354 |
+
# from ._multiarray_umath
|
| 355 |
+
ITEM_HASOBJECT: Final[L[1]]
|
| 356 |
+
LIST_PICKLE: Final[L[2]]
|
| 357 |
+
ITEM_IS_POINTER: Final[L[4]]
|
| 358 |
+
NEEDS_INIT: Final[L[8]]
|
| 359 |
+
NEEDS_PYAPI: Final[L[16]]
|
| 360 |
+
USE_GETITEM: Final[L[32]]
|
| 361 |
+
USE_SETITEM: Final[L[64]]
|
| 362 |
+
DATETIMEUNITS: Final[CapsuleType]
|
| 363 |
+
_ARRAY_API: Final[CapsuleType]
|
| 364 |
+
_flagdict: Final[dict[str, int]]
|
| 365 |
+
_monotonicity: Final[Callable[..., object]]
|
| 366 |
+
_place: Final[Callable[..., object]]
|
| 367 |
+
_reconstruct: Final[Callable[..., object]]
|
| 368 |
+
_vec_string: Final[Callable[..., object]]
|
| 369 |
+
correlate2: Final[Callable[..., object]]
|
| 370 |
+
dragon4_positional: Final[Callable[..., object]]
|
| 371 |
+
dragon4_scientific: Final[Callable[..., object]]
|
| 372 |
+
interp_complex: Final[Callable[..., object]]
|
| 373 |
+
set_datetimeparse_function: Final[Callable[..., object]]
|
| 374 |
+
def get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ...
|
| 375 |
+
def get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ...
|
| 376 |
+
def format_longfloat(x: np.longdouble, precision: int) -> str: ...
|
| 377 |
+
def scalar(dtype: _DType, object: bytes | object = ...) -> ndarray[tuple[()], _DType]: ...
|
| 378 |
+
def set_typeDict(dict_: dict[str, np.dtype[Any]], /) -> None: ...
|
| 379 |
+
typeinfo: Final[dict[str, np.dtype[np.generic]]]
|
| 380 |
+
|
| 381 |
+
ALLOW_THREADS: Final[int] # 0 or 1 (system-specific)
|
| 382 |
+
BUFSIZE: L[8192]
|
| 383 |
+
CLIP: L[0]
|
| 384 |
+
WRAP: L[1]
|
| 385 |
+
RAISE: L[2]
|
| 386 |
+
MAXDIMS: L[32]
|
| 387 |
+
MAY_SHARE_BOUNDS: L[0]
|
| 388 |
+
MAY_SHARE_EXACT: L[-1]
|
| 389 |
+
tracemalloc_domain: L[389047]
|
| 390 |
+
|
| 391 |
+
zeros: Final[_ConstructorEmpty]
|
| 392 |
+
empty: Final[_ConstructorEmpty]
|
| 393 |
+
|
| 394 |
+
@overload
|
| 395 |
+
def empty_like(
|
| 396 |
+
prototype: _ArrayType,
|
| 397 |
+
dtype: None = ...,
|
| 398 |
+
order: _OrderKACF = ...,
|
| 399 |
+
subok: bool = ...,
|
| 400 |
+
shape: None | _ShapeLike = ...,
|
| 401 |
+
*,
|
| 402 |
+
device: None | L["cpu"] = ...,
|
| 403 |
+
) -> _ArrayType: ...
|
| 404 |
+
@overload
|
| 405 |
+
def empty_like(
|
| 406 |
+
prototype: _ArrayLike[_SCT],
|
| 407 |
+
dtype: None = ...,
|
| 408 |
+
order: _OrderKACF = ...,
|
| 409 |
+
subok: bool = ...,
|
| 410 |
+
shape: None | _ShapeLike = ...,
|
| 411 |
+
*,
|
| 412 |
+
device: None | L["cpu"] = ...,
|
| 413 |
+
) -> NDArray[_SCT]: ...
|
| 414 |
+
@overload
|
| 415 |
+
def empty_like(
|
| 416 |
+
prototype: object,
|
| 417 |
+
dtype: None = ...,
|
| 418 |
+
order: _OrderKACF = ...,
|
| 419 |
+
subok: bool = ...,
|
| 420 |
+
shape: None | _ShapeLike = ...,
|
| 421 |
+
*,
|
| 422 |
+
device: None | L["cpu"] = ...,
|
| 423 |
+
) -> NDArray[Any]: ...
|
| 424 |
+
@overload
|
| 425 |
+
def empty_like(
|
| 426 |
+
prototype: Any,
|
| 427 |
+
dtype: _DTypeLike[_SCT],
|
| 428 |
+
order: _OrderKACF = ...,
|
| 429 |
+
subok: bool = ...,
|
| 430 |
+
shape: None | _ShapeLike = ...,
|
| 431 |
+
*,
|
| 432 |
+
device: None | L["cpu"] = ...,
|
| 433 |
+
) -> NDArray[_SCT]: ...
|
| 434 |
+
@overload
|
| 435 |
+
def empty_like(
|
| 436 |
+
prototype: Any,
|
| 437 |
+
dtype: DTypeLike,
|
| 438 |
+
order: _OrderKACF = ...,
|
| 439 |
+
subok: bool = ...,
|
| 440 |
+
shape: None | _ShapeLike = ...,
|
| 441 |
+
*,
|
| 442 |
+
device: None | L["cpu"] = ...,
|
| 443 |
+
) -> NDArray[Any]: ...
|
| 444 |
+
|
| 445 |
+
@overload
|
| 446 |
+
def array(
|
| 447 |
+
object: _ArrayType,
|
| 448 |
+
dtype: None = ...,
|
| 449 |
+
*,
|
| 450 |
+
copy: None | bool | _CopyMode = ...,
|
| 451 |
+
order: _OrderKACF = ...,
|
| 452 |
+
subok: L[True],
|
| 453 |
+
ndmin: int = ...,
|
| 454 |
+
like: None | _SupportsArrayFunc = ...,
|
| 455 |
+
) -> _ArrayType: ...
|
| 456 |
+
@overload
|
| 457 |
+
def array(
|
| 458 |
+
object: _SupportsArray[_ArrayType],
|
| 459 |
+
dtype: None = ...,
|
| 460 |
+
*,
|
| 461 |
+
copy: None | bool | _CopyMode = ...,
|
| 462 |
+
order: _OrderKACF = ...,
|
| 463 |
+
subok: L[True],
|
| 464 |
+
ndmin: L[0] = ...,
|
| 465 |
+
like: None | _SupportsArrayFunc = ...,
|
| 466 |
+
) -> _ArrayType: ...
|
| 467 |
+
@overload
|
| 468 |
+
def array(
|
| 469 |
+
object: _ArrayLike[_SCT],
|
| 470 |
+
dtype: None = ...,
|
| 471 |
+
*,
|
| 472 |
+
copy: None | bool | _CopyMode = ...,
|
| 473 |
+
order: _OrderKACF = ...,
|
| 474 |
+
subok: bool = ...,
|
| 475 |
+
ndmin: int = ...,
|
| 476 |
+
like: None | _SupportsArrayFunc = ...,
|
| 477 |
+
) -> NDArray[_SCT]: ...
|
| 478 |
+
@overload
|
| 479 |
+
def array(
|
| 480 |
+
object: object,
|
| 481 |
+
dtype: None = ...,
|
| 482 |
+
*,
|
| 483 |
+
copy: None | bool | _CopyMode = ...,
|
| 484 |
+
order: _OrderKACF = ...,
|
| 485 |
+
subok: bool = ...,
|
| 486 |
+
ndmin: int = ...,
|
| 487 |
+
like: None | _SupportsArrayFunc = ...,
|
| 488 |
+
) -> NDArray[Any]: ...
|
| 489 |
+
@overload
|
| 490 |
+
def array(
|
| 491 |
+
object: Any,
|
| 492 |
+
dtype: _DTypeLike[_SCT],
|
| 493 |
+
*,
|
| 494 |
+
copy: None | bool | _CopyMode = ...,
|
| 495 |
+
order: _OrderKACF = ...,
|
| 496 |
+
subok: bool = ...,
|
| 497 |
+
ndmin: int = ...,
|
| 498 |
+
like: None | _SupportsArrayFunc = ...,
|
| 499 |
+
) -> NDArray[_SCT]: ...
|
| 500 |
+
@overload
|
| 501 |
+
def array(
|
| 502 |
+
object: Any,
|
| 503 |
+
dtype: DTypeLike,
|
| 504 |
+
*,
|
| 505 |
+
copy: None | bool | _CopyMode = ...,
|
| 506 |
+
order: _OrderKACF = ...,
|
| 507 |
+
subok: bool = ...,
|
| 508 |
+
ndmin: int = ...,
|
| 509 |
+
like: None | _SupportsArrayFunc = ...,
|
| 510 |
+
) -> NDArray[Any]: ...
|
| 511 |
+
|
| 512 |
+
@overload
|
| 513 |
+
def unravel_index( # type: ignore[misc]
|
| 514 |
+
indices: _IntLike_co,
|
| 515 |
+
shape: _ShapeLike,
|
| 516 |
+
order: _OrderCF = ...,
|
| 517 |
+
) -> tuple[intp, ...]: ...
|
| 518 |
+
@overload
|
| 519 |
+
def unravel_index(
|
| 520 |
+
indices: _ArrayLikeInt_co,
|
| 521 |
+
shape: _ShapeLike,
|
| 522 |
+
order: _OrderCF = ...,
|
| 523 |
+
) -> tuple[NDArray[intp], ...]: ...
|
| 524 |
+
|
| 525 |
+
@overload
|
| 526 |
+
def ravel_multi_index( # type: ignore[misc]
|
| 527 |
+
multi_index: Sequence[_IntLike_co],
|
| 528 |
+
dims: Sequence[SupportsIndex],
|
| 529 |
+
mode: _ModeKind | tuple[_ModeKind, ...] = ...,
|
| 530 |
+
order: _OrderCF = ...,
|
| 531 |
+
) -> intp: ...
|
| 532 |
+
@overload
|
| 533 |
+
def ravel_multi_index(
|
| 534 |
+
multi_index: Sequence[_ArrayLikeInt_co],
|
| 535 |
+
dims: Sequence[SupportsIndex],
|
| 536 |
+
mode: _ModeKind | tuple[_ModeKind, ...] = ...,
|
| 537 |
+
order: _OrderCF = ...,
|
| 538 |
+
) -> NDArray[intp]: ...
|
| 539 |
+
|
| 540 |
+
# NOTE: Allow any sequence of array-like objects
|
| 541 |
+
@overload
|
| 542 |
+
def concatenate( # type: ignore[misc]
|
| 543 |
+
arrays: _ArrayLike[_SCT],
|
| 544 |
+
/,
|
| 545 |
+
axis: None | SupportsIndex = ...,
|
| 546 |
+
out: None = ...,
|
| 547 |
+
*,
|
| 548 |
+
dtype: None = ...,
|
| 549 |
+
casting: None | _CastingKind = ...
|
| 550 |
+
) -> NDArray[_SCT]: ...
|
| 551 |
+
@overload
|
| 552 |
+
def concatenate( # type: ignore[misc]
|
| 553 |
+
arrays: SupportsLenAndGetItem[ArrayLike],
|
| 554 |
+
/,
|
| 555 |
+
axis: None | SupportsIndex = ...,
|
| 556 |
+
out: None = ...,
|
| 557 |
+
*,
|
| 558 |
+
dtype: None = ...,
|
| 559 |
+
casting: None | _CastingKind = ...
|
| 560 |
+
) -> NDArray[Any]: ...
|
| 561 |
+
@overload
|
| 562 |
+
def concatenate( # type: ignore[misc]
|
| 563 |
+
arrays: SupportsLenAndGetItem[ArrayLike],
|
| 564 |
+
/,
|
| 565 |
+
axis: None | SupportsIndex = ...,
|
| 566 |
+
out: None = ...,
|
| 567 |
+
*,
|
| 568 |
+
dtype: _DTypeLike[_SCT],
|
| 569 |
+
casting: None | _CastingKind = ...
|
| 570 |
+
) -> NDArray[_SCT]: ...
|
| 571 |
+
@overload
|
| 572 |
+
def concatenate( # type: ignore[misc]
|
| 573 |
+
arrays: SupportsLenAndGetItem[ArrayLike],
|
| 574 |
+
/,
|
| 575 |
+
axis: None | SupportsIndex = ...,
|
| 576 |
+
out: None = ...,
|
| 577 |
+
*,
|
| 578 |
+
dtype: DTypeLike,
|
| 579 |
+
casting: None | _CastingKind = ...
|
| 580 |
+
) -> NDArray[Any]: ...
|
| 581 |
+
@overload
|
| 582 |
+
def concatenate(
|
| 583 |
+
arrays: SupportsLenAndGetItem[ArrayLike],
|
| 584 |
+
/,
|
| 585 |
+
axis: None | SupportsIndex = ...,
|
| 586 |
+
out: _ArrayType = ...,
|
| 587 |
+
*,
|
| 588 |
+
dtype: DTypeLike = ...,
|
| 589 |
+
casting: None | _CastingKind = ...
|
| 590 |
+
) -> _ArrayType: ...
|
| 591 |
+
|
| 592 |
+
def inner(
|
| 593 |
+
a: ArrayLike,
|
| 594 |
+
b: ArrayLike,
|
| 595 |
+
/,
|
| 596 |
+
) -> Any: ...
|
| 597 |
+
|
| 598 |
+
@overload
|
| 599 |
+
def where(
|
| 600 |
+
condition: ArrayLike,
|
| 601 |
+
/,
|
| 602 |
+
) -> tuple[NDArray[intp], ...]: ...
|
| 603 |
+
@overload
|
| 604 |
+
def where(
|
| 605 |
+
condition: ArrayLike,
|
| 606 |
+
x: ArrayLike,
|
| 607 |
+
y: ArrayLike,
|
| 608 |
+
/,
|
| 609 |
+
) -> NDArray[Any]: ...
|
| 610 |
+
|
| 611 |
+
def lexsort(
|
| 612 |
+
keys: ArrayLike,
|
| 613 |
+
axis: None | SupportsIndex = ...,
|
| 614 |
+
) -> Any: ...
|
| 615 |
+
|
| 616 |
+
def can_cast(
|
| 617 |
+
from_: ArrayLike | DTypeLike,
|
| 618 |
+
to: DTypeLike,
|
| 619 |
+
casting: None | _CastingKind = ...,
|
| 620 |
+
) -> bool: ...
|
| 621 |
+
|
| 622 |
+
def min_scalar_type(
|
| 623 |
+
a: ArrayLike, /,
|
| 624 |
+
) -> dtype[Any]: ...
|
| 625 |
+
|
| 626 |
+
def result_type(
|
| 627 |
+
*arrays_and_dtypes: ArrayLike | DTypeLike,
|
| 628 |
+
) -> dtype[Any]: ...
|
| 629 |
+
|
| 630 |
+
@overload
|
| 631 |
+
def dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...
|
| 632 |
+
@overload
|
| 633 |
+
def dot(a: ArrayLike, b: ArrayLike, out: _ArrayType) -> _ArrayType: ...
|
| 634 |
+
|
| 635 |
+
@overload
|
| 636 |
+
def vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc]
|
| 637 |
+
@overload
|
| 638 |
+
def vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger[Any]: ... # type: ignore[misc]
|
| 639 |
+
@overload
|
| 640 |
+
def vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger[Any]: ... # type: ignore[misc]
|
| 641 |
+
@overload
|
| 642 |
+
def vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating[Any]: ... # type: ignore[misc]
|
| 643 |
+
@overload
|
| 644 |
+
def vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating[Any, Any]: ... # type: ignore[misc]
|
| 645 |
+
@overload
|
| 646 |
+
def vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...
|
| 647 |
+
@overload
|
| 648 |
+
def vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...
|
| 649 |
+
@overload
|
| 650 |
+
def vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...
|
| 651 |
+
|
| 652 |
+
def bincount(
|
| 653 |
+
x: ArrayLike,
|
| 654 |
+
/,
|
| 655 |
+
weights: None | ArrayLike = ...,
|
| 656 |
+
minlength: SupportsIndex = ...,
|
| 657 |
+
) -> NDArray[intp]: ...
|
| 658 |
+
|
| 659 |
+
def copyto(
|
| 660 |
+
dst: NDArray[Any],
|
| 661 |
+
src: ArrayLike,
|
| 662 |
+
casting: None | _CastingKind = ...,
|
| 663 |
+
where: None | _ArrayLikeBool_co = ...,
|
| 664 |
+
) -> None: ...
|
| 665 |
+
|
| 666 |
+
def putmask(
|
| 667 |
+
a: NDArray[Any],
|
| 668 |
+
/,
|
| 669 |
+
mask: _ArrayLikeBool_co,
|
| 670 |
+
values: ArrayLike,
|
| 671 |
+
) -> None: ...
|
| 672 |
+
|
| 673 |
+
def packbits(
|
| 674 |
+
a: _ArrayLikeInt_co,
|
| 675 |
+
/,
|
| 676 |
+
axis: None | SupportsIndex = ...,
|
| 677 |
+
bitorder: L["big", "little"] = ...,
|
| 678 |
+
) -> NDArray[uint8]: ...
|
| 679 |
+
|
| 680 |
+
def unpackbits(
|
| 681 |
+
a: _ArrayLike[uint8],
|
| 682 |
+
/,
|
| 683 |
+
axis: None | SupportsIndex = ...,
|
| 684 |
+
count: None | SupportsIndex = ...,
|
| 685 |
+
bitorder: L["big", "little"] = ...,
|
| 686 |
+
) -> NDArray[uint8]: ...
|
| 687 |
+
|
| 688 |
+
def shares_memory(
|
| 689 |
+
a: object,
|
| 690 |
+
b: object,
|
| 691 |
+
/,
|
| 692 |
+
max_work: None | int = ...,
|
| 693 |
+
) -> bool: ...
|
| 694 |
+
|
| 695 |
+
def may_share_memory(
|
| 696 |
+
a: object,
|
| 697 |
+
b: object,
|
| 698 |
+
/,
|
| 699 |
+
max_work: None | int = ...,
|
| 700 |
+
) -> bool: ...
|
| 701 |
+
|
| 702 |
+
@overload
|
| 703 |
+
def asarray(
|
| 704 |
+
a: _ArrayLike[_SCT],
|
| 705 |
+
dtype: None = ...,
|
| 706 |
+
order: _OrderKACF = ...,
|
| 707 |
+
*,
|
| 708 |
+
device: None | L["cpu"] = ...,
|
| 709 |
+
copy: None | bool = ...,
|
| 710 |
+
like: None | _SupportsArrayFunc = ...,
|
| 711 |
+
) -> NDArray[_SCT]: ...
|
| 712 |
+
@overload
|
| 713 |
+
def asarray(
|
| 714 |
+
a: object,
|
| 715 |
+
dtype: None = ...,
|
| 716 |
+
order: _OrderKACF = ...,
|
| 717 |
+
*,
|
| 718 |
+
device: None | L["cpu"] = ...,
|
| 719 |
+
copy: None | bool = ...,
|
| 720 |
+
like: None | _SupportsArrayFunc = ...,
|
| 721 |
+
) -> NDArray[Any]: ...
|
| 722 |
+
@overload
|
| 723 |
+
def asarray(
|
| 724 |
+
a: Any,
|
| 725 |
+
dtype: _DTypeLike[_SCT],
|
| 726 |
+
order: _OrderKACF = ...,
|
| 727 |
+
*,
|
| 728 |
+
device: None | L["cpu"] = ...,
|
| 729 |
+
copy: None | bool = ...,
|
| 730 |
+
like: None | _SupportsArrayFunc = ...,
|
| 731 |
+
) -> NDArray[_SCT]: ...
|
| 732 |
+
@overload
|
| 733 |
+
def asarray(
|
| 734 |
+
a: Any,
|
| 735 |
+
dtype: DTypeLike,
|
| 736 |
+
order: _OrderKACF = ...,
|
| 737 |
+
*,
|
| 738 |
+
device: None | L["cpu"] = ...,
|
| 739 |
+
copy: None | bool = ...,
|
| 740 |
+
like: None | _SupportsArrayFunc = ...,
|
| 741 |
+
) -> NDArray[Any]: ...
|
| 742 |
+
|
| 743 |
+
@overload
|
| 744 |
+
def asanyarray(
|
| 745 |
+
a: _ArrayType, # Preserve subclass-information
|
| 746 |
+
dtype: None = ...,
|
| 747 |
+
order: _OrderKACF = ...,
|
| 748 |
+
*,
|
| 749 |
+
device: None | L["cpu"] = ...,
|
| 750 |
+
copy: None | bool = ...,
|
| 751 |
+
like: None | _SupportsArrayFunc = ...,
|
| 752 |
+
) -> _ArrayType: ...
|
| 753 |
+
@overload
|
| 754 |
+
def asanyarray(
|
| 755 |
+
a: _ArrayLike[_SCT],
|
| 756 |
+
dtype: None = ...,
|
| 757 |
+
order: _OrderKACF = ...,
|
| 758 |
+
*,
|
| 759 |
+
device: None | L["cpu"] = ...,
|
| 760 |
+
copy: None | bool = ...,
|
| 761 |
+
like: None | _SupportsArrayFunc = ...,
|
| 762 |
+
) -> NDArray[_SCT]: ...
|
| 763 |
+
@overload
|
| 764 |
+
def asanyarray(
|
| 765 |
+
a: object,
|
| 766 |
+
dtype: None = ...,
|
| 767 |
+
order: _OrderKACF = ...,
|
| 768 |
+
*,
|
| 769 |
+
device: None | L["cpu"] = ...,
|
| 770 |
+
copy: None | bool = ...,
|
| 771 |
+
like: None | _SupportsArrayFunc = ...,
|
| 772 |
+
) -> NDArray[Any]: ...
|
| 773 |
+
@overload
|
| 774 |
+
def asanyarray(
|
| 775 |
+
a: Any,
|
| 776 |
+
dtype: _DTypeLike[_SCT],
|
| 777 |
+
order: _OrderKACF = ...,
|
| 778 |
+
*,
|
| 779 |
+
device: None | L["cpu"] = ...,
|
| 780 |
+
copy: None | bool = ...,
|
| 781 |
+
like: None | _SupportsArrayFunc = ...,
|
| 782 |
+
) -> NDArray[_SCT]: ...
|
| 783 |
+
@overload
|
| 784 |
+
def asanyarray(
|
| 785 |
+
a: Any,
|
| 786 |
+
dtype: DTypeLike,
|
| 787 |
+
order: _OrderKACF = ...,
|
| 788 |
+
*,
|
| 789 |
+
device: None | L["cpu"] = ...,
|
| 790 |
+
copy: None | bool = ...,
|
| 791 |
+
like: None | _SupportsArrayFunc = ...,
|
| 792 |
+
) -> NDArray[Any]: ...
|
| 793 |
+
|
| 794 |
+
@overload
|
| 795 |
+
def ascontiguousarray(
|
| 796 |
+
a: _ArrayLike[_SCT],
|
| 797 |
+
dtype: None = ...,
|
| 798 |
+
*,
|
| 799 |
+
like: None | _SupportsArrayFunc = ...,
|
| 800 |
+
) -> NDArray[_SCT]: ...
|
| 801 |
+
@overload
|
| 802 |
+
def ascontiguousarray(
|
| 803 |
+
a: object,
|
| 804 |
+
dtype: None = ...,
|
| 805 |
+
*,
|
| 806 |
+
like: None | _SupportsArrayFunc = ...,
|
| 807 |
+
) -> NDArray[Any]: ...
|
| 808 |
+
@overload
|
| 809 |
+
def ascontiguousarray(
|
| 810 |
+
a: Any,
|
| 811 |
+
dtype: _DTypeLike[_SCT],
|
| 812 |
+
*,
|
| 813 |
+
like: None | _SupportsArrayFunc = ...,
|
| 814 |
+
) -> NDArray[_SCT]: ...
|
| 815 |
+
@overload
|
| 816 |
+
def ascontiguousarray(
|
| 817 |
+
a: Any,
|
| 818 |
+
dtype: DTypeLike,
|
| 819 |
+
*,
|
| 820 |
+
like: None | _SupportsArrayFunc = ...,
|
| 821 |
+
) -> NDArray[Any]: ...
|
| 822 |
+
|
| 823 |
+
@overload
|
| 824 |
+
def asfortranarray(
|
| 825 |
+
a: _ArrayLike[_SCT],
|
| 826 |
+
dtype: None = ...,
|
| 827 |
+
*,
|
| 828 |
+
like: None | _SupportsArrayFunc = ...,
|
| 829 |
+
) -> NDArray[_SCT]: ...
|
| 830 |
+
@overload
|
| 831 |
+
def asfortranarray(
|
| 832 |
+
a: object,
|
| 833 |
+
dtype: None = ...,
|
| 834 |
+
*,
|
| 835 |
+
like: None | _SupportsArrayFunc = ...,
|
| 836 |
+
) -> NDArray[Any]: ...
|
| 837 |
+
@overload
|
| 838 |
+
def asfortranarray(
|
| 839 |
+
a: Any,
|
| 840 |
+
dtype: _DTypeLike[_SCT],
|
| 841 |
+
*,
|
| 842 |
+
like: None | _SupportsArrayFunc = ...,
|
| 843 |
+
) -> NDArray[_SCT]: ...
|
| 844 |
+
@overload
|
| 845 |
+
def asfortranarray(
|
| 846 |
+
a: Any,
|
| 847 |
+
dtype: DTypeLike,
|
| 848 |
+
*,
|
| 849 |
+
like: None | _SupportsArrayFunc = ...,
|
| 850 |
+
) -> NDArray[Any]: ...
|
| 851 |
+
|
| 852 |
+
def promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype[Any]: ...
|
| 853 |
+
|
| 854 |
+
# `sep` is a de facto mandatory argument, as its default value is deprecated
|
| 855 |
+
@overload
|
| 856 |
+
def fromstring(
|
| 857 |
+
string: str | bytes,
|
| 858 |
+
dtype: None = ...,
|
| 859 |
+
count: SupportsIndex = ...,
|
| 860 |
+
*,
|
| 861 |
+
sep: str,
|
| 862 |
+
like: None | _SupportsArrayFunc = ...,
|
| 863 |
+
) -> NDArray[float64]: ...
|
| 864 |
+
@overload
|
| 865 |
+
def fromstring(
|
| 866 |
+
string: str | bytes,
|
| 867 |
+
dtype: _DTypeLike[_SCT],
|
| 868 |
+
count: SupportsIndex = ...,
|
| 869 |
+
*,
|
| 870 |
+
sep: str,
|
| 871 |
+
like: None | _SupportsArrayFunc = ...,
|
| 872 |
+
) -> NDArray[_SCT]: ...
|
| 873 |
+
@overload
|
| 874 |
+
def fromstring(
|
| 875 |
+
string: str | bytes,
|
| 876 |
+
dtype: DTypeLike,
|
| 877 |
+
count: SupportsIndex = ...,
|
| 878 |
+
*,
|
| 879 |
+
sep: str,
|
| 880 |
+
like: None | _SupportsArrayFunc = ...,
|
| 881 |
+
) -> NDArray[Any]: ...
|
| 882 |
+
|
| 883 |
+
@overload
|
| 884 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 885 |
+
func: Callable[[Any], _ReturnType], /,
|
| 886 |
+
nin: L[1],
|
| 887 |
+
nout: L[1],
|
| 888 |
+
*,
|
| 889 |
+
identity: None = ...,
|
| 890 |
+
) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ...
|
| 891 |
+
@overload
|
| 892 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 893 |
+
func: Callable[[Any], _ReturnType], /,
|
| 894 |
+
nin: L[1],
|
| 895 |
+
nout: L[1],
|
| 896 |
+
*,
|
| 897 |
+
identity: _IDType,
|
| 898 |
+
) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ...
|
| 899 |
+
@overload
|
| 900 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 901 |
+
func: Callable[[Any, Any], _ReturnType], /,
|
| 902 |
+
nin: L[2],
|
| 903 |
+
nout: L[1],
|
| 904 |
+
*,
|
| 905 |
+
identity: None = ...,
|
| 906 |
+
) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ...
|
| 907 |
+
@overload
|
| 908 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 909 |
+
func: Callable[[Any, Any], _ReturnType], /,
|
| 910 |
+
nin: L[2],
|
| 911 |
+
nout: L[1],
|
| 912 |
+
*,
|
| 913 |
+
identity: _IDType,
|
| 914 |
+
) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ...
|
| 915 |
+
@overload
|
| 916 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 917 |
+
func: Callable[..., _ReturnType], /,
|
| 918 |
+
nin: _Nin,
|
| 919 |
+
nout: L[1],
|
| 920 |
+
*,
|
| 921 |
+
identity: None = ...,
|
| 922 |
+
) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ...
|
| 923 |
+
@overload
|
| 924 |
+
def frompyfunc( # type: ignore[overload-overlap]
|
| 925 |
+
func: Callable[..., _ReturnType], /,
|
| 926 |
+
nin: _Nin,
|
| 927 |
+
nout: L[1],
|
| 928 |
+
*,
|
| 929 |
+
identity: _IDType,
|
| 930 |
+
) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ...
|
| 931 |
+
@overload
|
| 932 |
+
def frompyfunc(
|
| 933 |
+
func: Callable[..., _2PTuple[_ReturnType]], /,
|
| 934 |
+
nin: _Nin,
|
| 935 |
+
nout: _Nout,
|
| 936 |
+
*,
|
| 937 |
+
identity: None = ...,
|
| 938 |
+
) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ...
|
| 939 |
+
@overload
|
| 940 |
+
def frompyfunc(
|
| 941 |
+
func: Callable[..., _2PTuple[_ReturnType]], /,
|
| 942 |
+
nin: _Nin,
|
| 943 |
+
nout: _Nout,
|
| 944 |
+
*,
|
| 945 |
+
identity: _IDType,
|
| 946 |
+
) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ...
|
| 947 |
+
@overload
|
| 948 |
+
def frompyfunc(
|
| 949 |
+
func: Callable[..., Any], /,
|
| 950 |
+
nin: SupportsIndex,
|
| 951 |
+
nout: SupportsIndex,
|
| 952 |
+
*,
|
| 953 |
+
identity: None | object = ...,
|
| 954 |
+
) -> ufunc: ...
|
| 955 |
+
|
| 956 |
+
@overload
|
| 957 |
+
def fromfile(
|
| 958 |
+
file: StrOrBytesPath | _SupportsFileMethods,
|
| 959 |
+
dtype: None = ...,
|
| 960 |
+
count: SupportsIndex = ...,
|
| 961 |
+
sep: str = ...,
|
| 962 |
+
offset: SupportsIndex = ...,
|
| 963 |
+
*,
|
| 964 |
+
like: None | _SupportsArrayFunc = ...,
|
| 965 |
+
) -> NDArray[float64]: ...
|
| 966 |
+
@overload
|
| 967 |
+
def fromfile(
|
| 968 |
+
file: StrOrBytesPath | _SupportsFileMethods,
|
| 969 |
+
dtype: _DTypeLike[_SCT],
|
| 970 |
+
count: SupportsIndex = ...,
|
| 971 |
+
sep: str = ...,
|
| 972 |
+
offset: SupportsIndex = ...,
|
| 973 |
+
*,
|
| 974 |
+
like: None | _SupportsArrayFunc = ...,
|
| 975 |
+
) -> NDArray[_SCT]: ...
|
| 976 |
+
@overload
|
| 977 |
+
def fromfile(
|
| 978 |
+
file: StrOrBytesPath | _SupportsFileMethods,
|
| 979 |
+
dtype: DTypeLike,
|
| 980 |
+
count: SupportsIndex = ...,
|
| 981 |
+
sep: str = ...,
|
| 982 |
+
offset: SupportsIndex = ...,
|
| 983 |
+
*,
|
| 984 |
+
like: None | _SupportsArrayFunc = ...,
|
| 985 |
+
) -> NDArray[Any]: ...
|
| 986 |
+
|
| 987 |
+
@overload
|
| 988 |
+
def fromiter(
|
| 989 |
+
iter: Iterable[Any],
|
| 990 |
+
dtype: _DTypeLike[_SCT],
|
| 991 |
+
count: SupportsIndex = ...,
|
| 992 |
+
*,
|
| 993 |
+
like: None | _SupportsArrayFunc = ...,
|
| 994 |
+
) -> NDArray[_SCT]: ...
|
| 995 |
+
@overload
|
| 996 |
+
def fromiter(
|
| 997 |
+
iter: Iterable[Any],
|
| 998 |
+
dtype: DTypeLike,
|
| 999 |
+
count: SupportsIndex = ...,
|
| 1000 |
+
*,
|
| 1001 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1002 |
+
) -> NDArray[Any]: ...
|
| 1003 |
+
|
| 1004 |
+
@overload
|
| 1005 |
+
def frombuffer(
|
| 1006 |
+
buffer: _SupportsBuffer,
|
| 1007 |
+
dtype: None = ...,
|
| 1008 |
+
count: SupportsIndex = ...,
|
| 1009 |
+
offset: SupportsIndex = ...,
|
| 1010 |
+
*,
|
| 1011 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1012 |
+
) -> NDArray[float64]: ...
|
| 1013 |
+
@overload
|
| 1014 |
+
def frombuffer(
|
| 1015 |
+
buffer: _SupportsBuffer,
|
| 1016 |
+
dtype: _DTypeLike[_SCT],
|
| 1017 |
+
count: SupportsIndex = ...,
|
| 1018 |
+
offset: SupportsIndex = ...,
|
| 1019 |
+
*,
|
| 1020 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1021 |
+
) -> NDArray[_SCT]: ...
|
| 1022 |
+
@overload
|
| 1023 |
+
def frombuffer(
|
| 1024 |
+
buffer: _SupportsBuffer,
|
| 1025 |
+
dtype: DTypeLike,
|
| 1026 |
+
count: SupportsIndex = ...,
|
| 1027 |
+
offset: SupportsIndex = ...,
|
| 1028 |
+
*,
|
| 1029 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1030 |
+
) -> NDArray[Any]: ...
|
| 1031 |
+
|
| 1032 |
+
@overload
|
| 1033 |
+
def arange( # type: ignore[misc]
|
| 1034 |
+
stop: _IntLike_co,
|
| 1035 |
+
/, *,
|
| 1036 |
+
dtype: None = ...,
|
| 1037 |
+
device: None | L["cpu"] = ...,
|
| 1038 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1039 |
+
) -> _1DArray[int, signedinteger[Any]]: ...
|
| 1040 |
+
@overload
|
| 1041 |
+
def arange( # type: ignore[misc]
|
| 1042 |
+
start: _IntLike_co,
|
| 1043 |
+
stop: _IntLike_co,
|
| 1044 |
+
step: _IntLike_co = ...,
|
| 1045 |
+
dtype: None = ...,
|
| 1046 |
+
*,
|
| 1047 |
+
device: None | L["cpu"] = ...,
|
| 1048 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1049 |
+
) -> _1DArray[int, signedinteger[Any]]: ...
|
| 1050 |
+
@overload
|
| 1051 |
+
def arange( # type: ignore[misc]
|
| 1052 |
+
stop: _FloatLike_co,
|
| 1053 |
+
/, *,
|
| 1054 |
+
dtype: None = ...,
|
| 1055 |
+
device: None | L["cpu"] = ...,
|
| 1056 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1057 |
+
) -> _1DArray[int, floating[Any]]: ...
|
| 1058 |
+
@overload
|
| 1059 |
+
def arange( # type: ignore[misc]
|
| 1060 |
+
start: _FloatLike_co,
|
| 1061 |
+
stop: _FloatLike_co,
|
| 1062 |
+
step: _FloatLike_co = ...,
|
| 1063 |
+
dtype: None = ...,
|
| 1064 |
+
*,
|
| 1065 |
+
device: None | L["cpu"] = ...,
|
| 1066 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1067 |
+
) -> _1DArray[int, floating[Any]]: ...
|
| 1068 |
+
@overload
|
| 1069 |
+
def arange(
|
| 1070 |
+
stop: _TD64Like_co,
|
| 1071 |
+
/, *,
|
| 1072 |
+
dtype: None = ...,
|
| 1073 |
+
device: None | L["cpu"] = ...,
|
| 1074 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1075 |
+
) -> _1DArray[int, timedelta64]: ...
|
| 1076 |
+
@overload
|
| 1077 |
+
def arange(
|
| 1078 |
+
start: _TD64Like_co,
|
| 1079 |
+
stop: _TD64Like_co,
|
| 1080 |
+
step: _TD64Like_co = ...,
|
| 1081 |
+
dtype: None = ...,
|
| 1082 |
+
*,
|
| 1083 |
+
device: None | L["cpu"] = ...,
|
| 1084 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1085 |
+
) -> _1DArray[int, timedelta64]: ...
|
| 1086 |
+
@overload
|
| 1087 |
+
def arange( # both start and stop must always be specified for datetime64
|
| 1088 |
+
start: datetime64,
|
| 1089 |
+
stop: datetime64,
|
| 1090 |
+
step: datetime64 = ...,
|
| 1091 |
+
dtype: None = ...,
|
| 1092 |
+
*,
|
| 1093 |
+
device: None | L["cpu"] = ...,
|
| 1094 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1095 |
+
) -> _1DArray[int, datetime64]: ...
|
| 1096 |
+
@overload
|
| 1097 |
+
def arange(
|
| 1098 |
+
stop: Any,
|
| 1099 |
+
/, *,
|
| 1100 |
+
dtype: _DTypeLike[_SCT],
|
| 1101 |
+
device: None | L["cpu"] = ...,
|
| 1102 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1103 |
+
) -> _1DArray[int, _SCT]: ...
|
| 1104 |
+
@overload
|
| 1105 |
+
def arange(
|
| 1106 |
+
start: Any,
|
| 1107 |
+
stop: Any,
|
| 1108 |
+
step: Any = ...,
|
| 1109 |
+
dtype: _DTypeLike[_SCT] = ...,
|
| 1110 |
+
*,
|
| 1111 |
+
device: None | L["cpu"] = ...,
|
| 1112 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1113 |
+
) -> _1DArray[int, _SCT]: ...
|
| 1114 |
+
@overload
|
| 1115 |
+
def arange(
|
| 1116 |
+
stop: Any, /,
|
| 1117 |
+
*,
|
| 1118 |
+
dtype: DTypeLike,
|
| 1119 |
+
device: None | L["cpu"] = ...,
|
| 1120 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1121 |
+
) -> _1DArray[int, Any]: ...
|
| 1122 |
+
@overload
|
| 1123 |
+
def arange(
|
| 1124 |
+
start: Any,
|
| 1125 |
+
stop: Any,
|
| 1126 |
+
step: Any = ...,
|
| 1127 |
+
dtype: DTypeLike = ...,
|
| 1128 |
+
*,
|
| 1129 |
+
device: None | L["cpu"] = ...,
|
| 1130 |
+
like: None | _SupportsArrayFunc = ...,
|
| 1131 |
+
) -> _1DArray[int, Any]: ...
|
| 1132 |
+
|
| 1133 |
+
def datetime_data(
|
| 1134 |
+
dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,
|
| 1135 |
+
) -> tuple[str, int]: ...
|
| 1136 |
+
|
| 1137 |
+
# The datetime functions perform unsafe casts to `datetime64[D]`,
|
| 1138 |
+
# so a lot of different argument types are allowed here
|
| 1139 |
+
|
| 1140 |
+
@overload
|
| 1141 |
+
def busday_count( # type: ignore[misc]
|
| 1142 |
+
begindates: _ScalarLike_co | dt.date,
|
| 1143 |
+
enddates: _ScalarLike_co | dt.date,
|
| 1144 |
+
weekmask: ArrayLike = ...,
|
| 1145 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1146 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1147 |
+
out: None = ...,
|
| 1148 |
+
) -> int_: ...
|
| 1149 |
+
@overload
|
| 1150 |
+
def busday_count( # type: ignore[misc]
|
| 1151 |
+
begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1152 |
+
enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1153 |
+
weekmask: ArrayLike = ...,
|
| 1154 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1155 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1156 |
+
out: None = ...,
|
| 1157 |
+
) -> NDArray[int_]: ...
|
| 1158 |
+
@overload
|
| 1159 |
+
def busday_count(
|
| 1160 |
+
begindates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1161 |
+
enddates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1162 |
+
weekmask: ArrayLike = ...,
|
| 1163 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1164 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1165 |
+
out: _ArrayType = ...,
|
| 1166 |
+
) -> _ArrayType: ...
|
| 1167 |
+
|
| 1168 |
+
# `roll="raise"` is (more or less?) equivalent to `casting="safe"`
|
| 1169 |
+
@overload
|
| 1170 |
+
def busday_offset( # type: ignore[misc]
|
| 1171 |
+
dates: datetime64 | dt.date,
|
| 1172 |
+
offsets: _TD64Like_co | dt.timedelta,
|
| 1173 |
+
roll: L["raise"] = ...,
|
| 1174 |
+
weekmask: ArrayLike = ...,
|
| 1175 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1176 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1177 |
+
out: None = ...,
|
| 1178 |
+
) -> datetime64: ...
|
| 1179 |
+
@overload
|
| 1180 |
+
def busday_offset( # type: ignore[misc]
|
| 1181 |
+
dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
|
| 1182 |
+
offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
|
| 1183 |
+
roll: L["raise"] = ...,
|
| 1184 |
+
weekmask: ArrayLike = ...,
|
| 1185 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1186 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1187 |
+
out: None = ...,
|
| 1188 |
+
) -> NDArray[datetime64]: ...
|
| 1189 |
+
@overload
|
| 1190 |
+
def busday_offset( # type: ignore[misc]
|
| 1191 |
+
dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],
|
| 1192 |
+
offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],
|
| 1193 |
+
roll: L["raise"] = ...,
|
| 1194 |
+
weekmask: ArrayLike = ...,
|
| 1195 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1196 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1197 |
+
out: _ArrayType = ...,
|
| 1198 |
+
) -> _ArrayType: ...
|
| 1199 |
+
@overload
|
| 1200 |
+
def busday_offset( # type: ignore[misc]
|
| 1201 |
+
dates: _ScalarLike_co | dt.date,
|
| 1202 |
+
offsets: _ScalarLike_co | dt.timedelta,
|
| 1203 |
+
roll: _RollKind,
|
| 1204 |
+
weekmask: ArrayLike = ...,
|
| 1205 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1206 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1207 |
+
out: None = ...,
|
| 1208 |
+
) -> datetime64: ...
|
| 1209 |
+
@overload
|
| 1210 |
+
def busday_offset( # type: ignore[misc]
|
| 1211 |
+
dates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1212 |
+
offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
|
| 1213 |
+
roll: _RollKind,
|
| 1214 |
+
weekmask: ArrayLike = ...,
|
| 1215 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1216 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1217 |
+
out: None = ...,
|
| 1218 |
+
) -> NDArray[datetime64]: ...
|
| 1219 |
+
@overload
|
| 1220 |
+
def busday_offset(
|
| 1221 |
+
dates: ArrayLike | dt.date | _NestedSequence[dt.date],
|
| 1222 |
+
offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],
|
| 1223 |
+
roll: _RollKind,
|
| 1224 |
+
weekmask: ArrayLike = ...,
|
| 1225 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1226 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1227 |
+
out: _ArrayType = ...,
|
| 1228 |
+
) -> _ArrayType: ...
|
| 1229 |
+
|
| 1230 |
+
@overload
|
| 1231 |
+
def is_busday( # type: ignore[misc]
|
| 1232 |
+
dates: _ScalarLike_co | dt.date,
|
| 1233 |
+
weekmask: ArrayLike = ...,
|
| 1234 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1235 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1236 |
+
out: None = ...,
|
| 1237 |
+
) -> np.bool: ...
|
| 1238 |
+
@overload
|
| 1239 |
+
def is_busday( # type: ignore[misc]
|
| 1240 |
+
dates: ArrayLike | _NestedSequence[dt.date],
|
| 1241 |
+
weekmask: ArrayLike = ...,
|
| 1242 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1243 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1244 |
+
out: None = ...,
|
| 1245 |
+
) -> NDArray[np.bool]: ...
|
| 1246 |
+
@overload
|
| 1247 |
+
def is_busday(
|
| 1248 |
+
dates: ArrayLike | _NestedSequence[dt.date],
|
| 1249 |
+
weekmask: ArrayLike = ...,
|
| 1250 |
+
holidays: None | ArrayLike | dt.date | _NestedSequence[dt.date] = ...,
|
| 1251 |
+
busdaycal: None | busdaycalendar = ...,
|
| 1252 |
+
out: _ArrayType = ...,
|
| 1253 |
+
) -> _ArrayType: ...
|
| 1254 |
+
|
| 1255 |
+
@overload
|
| 1256 |
+
def datetime_as_string( # type: ignore[misc]
|
| 1257 |
+
arr: datetime64 | dt.date,
|
| 1258 |
+
unit: None | L["auto"] | _UnitKind = ...,
|
| 1259 |
+
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
|
| 1260 |
+
casting: _CastingKind = ...,
|
| 1261 |
+
) -> str_: ...
|
| 1262 |
+
@overload
|
| 1263 |
+
def datetime_as_string(
|
| 1264 |
+
arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],
|
| 1265 |
+
unit: None | L["auto"] | _UnitKind = ...,
|
| 1266 |
+
timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,
|
| 1267 |
+
casting: _CastingKind = ...,
|
| 1268 |
+
) -> NDArray[str_]: ...
|
| 1269 |
+
|
| 1270 |
+
@overload
|
| 1271 |
+
def compare_chararrays(
|
| 1272 |
+
a1: _ArrayLikeStr_co,
|
| 1273 |
+
a2: _ArrayLikeStr_co,
|
| 1274 |
+
cmp: L["<", "<=", "==", ">=", ">", "!="],
|
| 1275 |
+
rstrip: bool,
|
| 1276 |
+
) -> NDArray[np.bool]: ...
|
| 1277 |
+
@overload
|
| 1278 |
+
def compare_chararrays(
|
| 1279 |
+
a1: _ArrayLikeBytes_co,
|
| 1280 |
+
a2: _ArrayLikeBytes_co,
|
| 1281 |
+
cmp: L["<", "<=", "==", ">=", ">", "!="],
|
| 1282 |
+
rstrip: bool,
|
| 1283 |
+
) -> NDArray[np.bool]: ...
|
| 1284 |
+
|
| 1285 |
+
def add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...
|
| 1286 |
+
|
| 1287 |
+
_GetItemKeys: TypeAlias = L[
|
| 1288 |
+
"C", "CONTIGUOUS", "C_CONTIGUOUS",
|
| 1289 |
+
"F", "FORTRAN", "F_CONTIGUOUS",
|
| 1290 |
+
"W", "WRITEABLE",
|
| 1291 |
+
"B", "BEHAVED",
|
| 1292 |
+
"O", "OWNDATA",
|
| 1293 |
+
"A", "ALIGNED",
|
| 1294 |
+
"X", "WRITEBACKIFCOPY",
|
| 1295 |
+
"CA", "CARRAY",
|
| 1296 |
+
"FA", "FARRAY",
|
| 1297 |
+
"FNC",
|
| 1298 |
+
"FORC",
|
| 1299 |
+
]
|
| 1300 |
+
_SetItemKeys: TypeAlias = L[
|
| 1301 |
+
"A", "ALIGNED",
|
| 1302 |
+
"W", "WRITEABLE",
|
| 1303 |
+
"X", "WRITEBACKIFCOPY",
|
| 1304 |
+
]
|
| 1305 |
+
|
| 1306 |
+
@final
|
| 1307 |
+
class flagsobj:
|
| 1308 |
+
__hash__: ClassVar[None] # type: ignore[assignment]
|
| 1309 |
+
aligned: bool
|
| 1310 |
+
# NOTE: deprecated
|
| 1311 |
+
# updateifcopy: bool
|
| 1312 |
+
writeable: bool
|
| 1313 |
+
writebackifcopy: bool
|
| 1314 |
+
@property
|
| 1315 |
+
def behaved(self) -> bool: ...
|
| 1316 |
+
@property
|
| 1317 |
+
def c_contiguous(self) -> bool: ...
|
| 1318 |
+
@property
|
| 1319 |
+
def carray(self) -> bool: ...
|
| 1320 |
+
@property
|
| 1321 |
+
def contiguous(self) -> bool: ...
|
| 1322 |
+
@property
|
| 1323 |
+
def f_contiguous(self) -> bool: ...
|
| 1324 |
+
@property
|
| 1325 |
+
def farray(self) -> bool: ...
|
| 1326 |
+
@property
|
| 1327 |
+
def fnc(self) -> bool: ...
|
| 1328 |
+
@property
|
| 1329 |
+
def forc(self) -> bool: ...
|
| 1330 |
+
@property
|
| 1331 |
+
def fortran(self) -> bool: ...
|
| 1332 |
+
@property
|
| 1333 |
+
def num(self) -> int: ...
|
| 1334 |
+
@property
|
| 1335 |
+
def owndata(self) -> bool: ...
|
| 1336 |
+
def __getitem__(self, key: _GetItemKeys) -> bool: ...
|
| 1337 |
+
def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ...
|
| 1338 |
+
|
| 1339 |
+
def nested_iters(
|
| 1340 |
+
op: ArrayLike | Sequence[ArrayLike],
|
| 1341 |
+
axes: Sequence[Sequence[SupportsIndex]],
|
| 1342 |
+
flags: None | Sequence[_NDIterFlagsKind] = ...,
|
| 1343 |
+
op_flags: None | Sequence[Sequence[_NDIterFlagsOp]] = ...,
|
| 1344 |
+
op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,
|
| 1345 |
+
order: _OrderKACF = ...,
|
| 1346 |
+
casting: _CastingKind = ...,
|
| 1347 |
+
buffersize: SupportsIndex = ...,
|
| 1348 |
+
) -> tuple[nditer, ...]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/numeric.pyi
ADDED
|
@@ -0,0 +1,886 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Callable, Sequence
|
| 2 |
+
from typing import (
|
| 3 |
+
Any,
|
| 4 |
+
Final,
|
| 5 |
+
TypeAlias,
|
| 6 |
+
overload,
|
| 7 |
+
TypeVar,
|
| 8 |
+
Literal as L,
|
| 9 |
+
SupportsAbs,
|
| 10 |
+
SupportsIndex,
|
| 11 |
+
NoReturn,
|
| 12 |
+
TypeGuard,
|
| 13 |
+
)
|
| 14 |
+
from typing_extensions import Unpack
|
| 15 |
+
|
| 16 |
+
import numpy as np
|
| 17 |
+
from numpy import (
|
| 18 |
+
# re-exports
|
| 19 |
+
bitwise_not,
|
| 20 |
+
False_,
|
| 21 |
+
True_,
|
| 22 |
+
broadcast,
|
| 23 |
+
dtype,
|
| 24 |
+
flatiter,
|
| 25 |
+
from_dlpack,
|
| 26 |
+
inf,
|
| 27 |
+
little_endian,
|
| 28 |
+
matmul,
|
| 29 |
+
vecdot,
|
| 30 |
+
nan,
|
| 31 |
+
ndarray,
|
| 32 |
+
nditer,
|
| 33 |
+
newaxis,
|
| 34 |
+
ufunc,
|
| 35 |
+
|
| 36 |
+
# other
|
| 37 |
+
generic,
|
| 38 |
+
unsignedinteger,
|
| 39 |
+
signedinteger,
|
| 40 |
+
floating,
|
| 41 |
+
complexfloating,
|
| 42 |
+
int_,
|
| 43 |
+
intp,
|
| 44 |
+
float64,
|
| 45 |
+
timedelta64,
|
| 46 |
+
object_,
|
| 47 |
+
_OrderKACF,
|
| 48 |
+
_OrderCF,
|
| 49 |
+
)
|
| 50 |
+
from .multiarray import (
|
| 51 |
+
# re-exports
|
| 52 |
+
arange,
|
| 53 |
+
array,
|
| 54 |
+
asarray,
|
| 55 |
+
asanyarray,
|
| 56 |
+
ascontiguousarray,
|
| 57 |
+
asfortranarray,
|
| 58 |
+
can_cast,
|
| 59 |
+
concatenate,
|
| 60 |
+
copyto,
|
| 61 |
+
dot,
|
| 62 |
+
empty,
|
| 63 |
+
empty_like,
|
| 64 |
+
frombuffer,
|
| 65 |
+
fromfile,
|
| 66 |
+
fromiter,
|
| 67 |
+
fromstring,
|
| 68 |
+
inner,
|
| 69 |
+
lexsort,
|
| 70 |
+
may_share_memory,
|
| 71 |
+
min_scalar_type,
|
| 72 |
+
nested_iters,
|
| 73 |
+
putmask,
|
| 74 |
+
promote_types,
|
| 75 |
+
result_type,
|
| 76 |
+
shares_memory,
|
| 77 |
+
vdot,
|
| 78 |
+
where,
|
| 79 |
+
zeros,
|
| 80 |
+
|
| 81 |
+
# other
|
| 82 |
+
_Array,
|
| 83 |
+
_ConstructorEmpty,
|
| 84 |
+
_KwargsEmpty,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
from numpy._typing import (
|
| 88 |
+
ArrayLike,
|
| 89 |
+
NDArray,
|
| 90 |
+
DTypeLike,
|
| 91 |
+
_SupportsDType,
|
| 92 |
+
_ShapeLike,
|
| 93 |
+
_DTypeLike,
|
| 94 |
+
_ArrayLike,
|
| 95 |
+
_SupportsArrayFunc,
|
| 96 |
+
_ScalarLike_co,
|
| 97 |
+
_ArrayLikeBool_co,
|
| 98 |
+
_ArrayLikeUInt_co,
|
| 99 |
+
_ArrayLikeInt_co,
|
| 100 |
+
_ArrayLikeFloat_co,
|
| 101 |
+
_ArrayLikeComplex_co,
|
| 102 |
+
_ArrayLikeTD64_co,
|
| 103 |
+
_ArrayLikeObject_co,
|
| 104 |
+
_ArrayLikeUnknown,
|
| 105 |
+
)
|
| 106 |
+
|
| 107 |
+
__all__ = [
|
| 108 |
+
"newaxis",
|
| 109 |
+
"ndarray",
|
| 110 |
+
"flatiter",
|
| 111 |
+
"nditer",
|
| 112 |
+
"nested_iters",
|
| 113 |
+
"ufunc",
|
| 114 |
+
"arange",
|
| 115 |
+
"array",
|
| 116 |
+
"asarray",
|
| 117 |
+
"asanyarray",
|
| 118 |
+
"ascontiguousarray",
|
| 119 |
+
"asfortranarray",
|
| 120 |
+
"zeros",
|
| 121 |
+
"count_nonzero",
|
| 122 |
+
"empty",
|
| 123 |
+
"broadcast",
|
| 124 |
+
"dtype",
|
| 125 |
+
"fromstring",
|
| 126 |
+
"fromfile",
|
| 127 |
+
"frombuffer",
|
| 128 |
+
"from_dlpack",
|
| 129 |
+
"where",
|
| 130 |
+
"argwhere",
|
| 131 |
+
"copyto",
|
| 132 |
+
"concatenate",
|
| 133 |
+
"lexsort",
|
| 134 |
+
"astype",
|
| 135 |
+
"can_cast",
|
| 136 |
+
"promote_types",
|
| 137 |
+
"min_scalar_type",
|
| 138 |
+
"result_type",
|
| 139 |
+
"isfortran",
|
| 140 |
+
"empty_like",
|
| 141 |
+
"zeros_like",
|
| 142 |
+
"ones_like",
|
| 143 |
+
"correlate",
|
| 144 |
+
"convolve",
|
| 145 |
+
"inner",
|
| 146 |
+
"dot",
|
| 147 |
+
"outer",
|
| 148 |
+
"vdot",
|
| 149 |
+
"roll",
|
| 150 |
+
"rollaxis",
|
| 151 |
+
"moveaxis",
|
| 152 |
+
"cross",
|
| 153 |
+
"tensordot",
|
| 154 |
+
"little_endian",
|
| 155 |
+
"fromiter",
|
| 156 |
+
"array_equal",
|
| 157 |
+
"array_equiv",
|
| 158 |
+
"indices",
|
| 159 |
+
"fromfunction",
|
| 160 |
+
"isclose",
|
| 161 |
+
"isscalar",
|
| 162 |
+
"binary_repr",
|
| 163 |
+
"base_repr",
|
| 164 |
+
"ones",
|
| 165 |
+
"identity",
|
| 166 |
+
"allclose",
|
| 167 |
+
"putmask",
|
| 168 |
+
"flatnonzero",
|
| 169 |
+
"inf",
|
| 170 |
+
"nan",
|
| 171 |
+
"False_",
|
| 172 |
+
"True_",
|
| 173 |
+
"bitwise_not",
|
| 174 |
+
"full",
|
| 175 |
+
"full_like",
|
| 176 |
+
"matmul",
|
| 177 |
+
"vecdot",
|
| 178 |
+
"shares_memory",
|
| 179 |
+
"may_share_memory",
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
_T = TypeVar("_T")
|
| 183 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 184 |
+
_DType = TypeVar("_DType", bound=np.dtype[Any])
|
| 185 |
+
_ArrayType = TypeVar("_ArrayType", bound=np.ndarray[Any, Any])
|
| 186 |
+
_SizeType = TypeVar("_SizeType", bound=int)
|
| 187 |
+
_ShapeType = TypeVar("_ShapeType", bound=tuple[int, ...])
|
| 188 |
+
|
| 189 |
+
_CorrelateMode: TypeAlias = L["valid", "same", "full"]
|
| 190 |
+
|
| 191 |
+
@overload
|
| 192 |
+
def zeros_like(
|
| 193 |
+
a: _ArrayType,
|
| 194 |
+
dtype: None = ...,
|
| 195 |
+
order: _OrderKACF = ...,
|
| 196 |
+
subok: L[True] = ...,
|
| 197 |
+
shape: None = ...,
|
| 198 |
+
*,
|
| 199 |
+
device: None | L["cpu"] = ...,
|
| 200 |
+
) -> _ArrayType: ...
|
| 201 |
+
@overload
|
| 202 |
+
def zeros_like(
|
| 203 |
+
a: _ArrayLike[_SCT],
|
| 204 |
+
dtype: None = ...,
|
| 205 |
+
order: _OrderKACF = ...,
|
| 206 |
+
subok: bool = ...,
|
| 207 |
+
shape: None | _ShapeLike = ...,
|
| 208 |
+
*,
|
| 209 |
+
device: None | L["cpu"] = ...,
|
| 210 |
+
) -> NDArray[_SCT]: ...
|
| 211 |
+
@overload
|
| 212 |
+
def zeros_like(
|
| 213 |
+
a: object,
|
| 214 |
+
dtype: None = ...,
|
| 215 |
+
order: _OrderKACF = ...,
|
| 216 |
+
subok: bool = ...,
|
| 217 |
+
shape: None | _ShapeLike= ...,
|
| 218 |
+
*,
|
| 219 |
+
device: None | L["cpu"] = ...,
|
| 220 |
+
) -> NDArray[Any]: ...
|
| 221 |
+
@overload
|
| 222 |
+
def zeros_like(
|
| 223 |
+
a: Any,
|
| 224 |
+
dtype: _DTypeLike[_SCT],
|
| 225 |
+
order: _OrderKACF = ...,
|
| 226 |
+
subok: bool = ...,
|
| 227 |
+
shape: None | _ShapeLike= ...,
|
| 228 |
+
*,
|
| 229 |
+
device: None | L["cpu"] = ...,
|
| 230 |
+
) -> NDArray[_SCT]: ...
|
| 231 |
+
@overload
|
| 232 |
+
def zeros_like(
|
| 233 |
+
a: Any,
|
| 234 |
+
dtype: DTypeLike,
|
| 235 |
+
order: _OrderKACF = ...,
|
| 236 |
+
subok: bool = ...,
|
| 237 |
+
shape: None | _ShapeLike= ...,
|
| 238 |
+
*,
|
| 239 |
+
device: None | L["cpu"] = ...,
|
| 240 |
+
) -> NDArray[Any]: ...
|
| 241 |
+
|
| 242 |
+
ones: Final[_ConstructorEmpty]
|
| 243 |
+
|
| 244 |
+
@overload
|
| 245 |
+
def ones_like(
|
| 246 |
+
a: _ArrayType,
|
| 247 |
+
dtype: None = ...,
|
| 248 |
+
order: _OrderKACF = ...,
|
| 249 |
+
subok: L[True] = ...,
|
| 250 |
+
shape: None = ...,
|
| 251 |
+
*,
|
| 252 |
+
device: None | L["cpu"] = ...,
|
| 253 |
+
) -> _ArrayType: ...
|
| 254 |
+
@overload
|
| 255 |
+
def ones_like(
|
| 256 |
+
a: _ArrayLike[_SCT],
|
| 257 |
+
dtype: None = ...,
|
| 258 |
+
order: _OrderKACF = ...,
|
| 259 |
+
subok: bool = ...,
|
| 260 |
+
shape: None | _ShapeLike = ...,
|
| 261 |
+
*,
|
| 262 |
+
device: None | L["cpu"] = ...,
|
| 263 |
+
) -> NDArray[_SCT]: ...
|
| 264 |
+
@overload
|
| 265 |
+
def ones_like(
|
| 266 |
+
a: object,
|
| 267 |
+
dtype: None = ...,
|
| 268 |
+
order: _OrderKACF = ...,
|
| 269 |
+
subok: bool = ...,
|
| 270 |
+
shape: None | _ShapeLike= ...,
|
| 271 |
+
*,
|
| 272 |
+
device: None | L["cpu"] = ...,
|
| 273 |
+
) -> NDArray[Any]: ...
|
| 274 |
+
@overload
|
| 275 |
+
def ones_like(
|
| 276 |
+
a: Any,
|
| 277 |
+
dtype: _DTypeLike[_SCT],
|
| 278 |
+
order: _OrderKACF = ...,
|
| 279 |
+
subok: bool = ...,
|
| 280 |
+
shape: None | _ShapeLike= ...,
|
| 281 |
+
*,
|
| 282 |
+
device: None | L["cpu"] = ...,
|
| 283 |
+
) -> NDArray[_SCT]: ...
|
| 284 |
+
@overload
|
| 285 |
+
def ones_like(
|
| 286 |
+
a: Any,
|
| 287 |
+
dtype: DTypeLike,
|
| 288 |
+
order: _OrderKACF = ...,
|
| 289 |
+
subok: bool = ...,
|
| 290 |
+
shape: None | _ShapeLike= ...,
|
| 291 |
+
*,
|
| 292 |
+
device: None | L["cpu"] = ...,
|
| 293 |
+
) -> NDArray[Any]: ...
|
| 294 |
+
|
| 295 |
+
# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview
|
| 296 |
+
# 1-D shape
|
| 297 |
+
@overload
|
| 298 |
+
def full(
|
| 299 |
+
shape: _SizeType,
|
| 300 |
+
fill_value: _SCT,
|
| 301 |
+
dtype: None = ...,
|
| 302 |
+
order: _OrderCF = ...,
|
| 303 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 304 |
+
) -> _Array[tuple[_SizeType], _SCT]: ...
|
| 305 |
+
@overload
|
| 306 |
+
def full(
|
| 307 |
+
shape: _SizeType,
|
| 308 |
+
fill_value: Any,
|
| 309 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 310 |
+
order: _OrderCF = ...,
|
| 311 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 312 |
+
) -> np.ndarray[tuple[_SizeType], _DType]: ...
|
| 313 |
+
@overload
|
| 314 |
+
def full(
|
| 315 |
+
shape: _SizeType,
|
| 316 |
+
fill_value: Any,
|
| 317 |
+
dtype: type[_SCT],
|
| 318 |
+
order: _OrderCF = ...,
|
| 319 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 320 |
+
) -> _Array[tuple[_SizeType], _SCT]: ...
|
| 321 |
+
@overload
|
| 322 |
+
def full(
|
| 323 |
+
shape: _SizeType,
|
| 324 |
+
fill_value: Any,
|
| 325 |
+
dtype: None | DTypeLike = ...,
|
| 326 |
+
order: _OrderCF = ...,
|
| 327 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 328 |
+
) -> _Array[tuple[_SizeType], Any]: ...
|
| 329 |
+
# known shape
|
| 330 |
+
@overload
|
| 331 |
+
def full(
|
| 332 |
+
shape: _ShapeType,
|
| 333 |
+
fill_value: _SCT,
|
| 334 |
+
dtype: None = ...,
|
| 335 |
+
order: _OrderCF = ...,
|
| 336 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 337 |
+
) -> _Array[_ShapeType, _SCT]: ...
|
| 338 |
+
@overload
|
| 339 |
+
def full(
|
| 340 |
+
shape: _ShapeType,
|
| 341 |
+
fill_value: Any,
|
| 342 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 343 |
+
order: _OrderCF = ...,
|
| 344 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 345 |
+
) -> np.ndarray[_ShapeType, _DType]: ...
|
| 346 |
+
@overload
|
| 347 |
+
def full(
|
| 348 |
+
shape: _ShapeType,
|
| 349 |
+
fill_value: Any,
|
| 350 |
+
dtype: type[_SCT],
|
| 351 |
+
order: _OrderCF = ...,
|
| 352 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 353 |
+
) -> _Array[_ShapeType, _SCT]: ...
|
| 354 |
+
@overload
|
| 355 |
+
def full(
|
| 356 |
+
shape: _ShapeType,
|
| 357 |
+
fill_value: Any,
|
| 358 |
+
dtype: None | DTypeLike = ...,
|
| 359 |
+
order: _OrderCF = ...,
|
| 360 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 361 |
+
) -> _Array[_ShapeType, Any]: ...
|
| 362 |
+
# unknown shape
|
| 363 |
+
@overload
|
| 364 |
+
def full(
|
| 365 |
+
shape: _ShapeLike,
|
| 366 |
+
fill_value: _SCT,
|
| 367 |
+
dtype: None = ...,
|
| 368 |
+
order: _OrderCF = ...,
|
| 369 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 370 |
+
) -> NDArray[_SCT]: ...
|
| 371 |
+
@overload
|
| 372 |
+
def full(
|
| 373 |
+
shape: _ShapeLike,
|
| 374 |
+
fill_value: Any,
|
| 375 |
+
dtype: _DType | _SupportsDType[_DType],
|
| 376 |
+
order: _OrderCF = ...,
|
| 377 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 378 |
+
) -> np.ndarray[Any, _DType]: ...
|
| 379 |
+
@overload
|
| 380 |
+
def full(
|
| 381 |
+
shape: _ShapeLike,
|
| 382 |
+
fill_value: Any,
|
| 383 |
+
dtype: type[_SCT],
|
| 384 |
+
order: _OrderCF = ...,
|
| 385 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 386 |
+
) -> NDArray[_SCT]: ...
|
| 387 |
+
@overload
|
| 388 |
+
def full(
|
| 389 |
+
shape: _ShapeLike,
|
| 390 |
+
fill_value: Any,
|
| 391 |
+
dtype: None | DTypeLike = ...,
|
| 392 |
+
order: _OrderCF = ...,
|
| 393 |
+
**kwargs: Unpack[_KwargsEmpty],
|
| 394 |
+
) -> NDArray[Any]: ...
|
| 395 |
+
|
| 396 |
+
@overload
|
| 397 |
+
def full_like(
|
| 398 |
+
a: _ArrayType,
|
| 399 |
+
fill_value: Any,
|
| 400 |
+
dtype: None = ...,
|
| 401 |
+
order: _OrderKACF = ...,
|
| 402 |
+
subok: L[True] = ...,
|
| 403 |
+
shape: None = ...,
|
| 404 |
+
*,
|
| 405 |
+
device: None | L["cpu"] = ...,
|
| 406 |
+
) -> _ArrayType: ...
|
| 407 |
+
@overload
|
| 408 |
+
def full_like(
|
| 409 |
+
a: _ArrayLike[_SCT],
|
| 410 |
+
fill_value: Any,
|
| 411 |
+
dtype: None = ...,
|
| 412 |
+
order: _OrderKACF = ...,
|
| 413 |
+
subok: bool = ...,
|
| 414 |
+
shape: None | _ShapeLike = ...,
|
| 415 |
+
*,
|
| 416 |
+
device: None | L["cpu"] = ...,
|
| 417 |
+
) -> NDArray[_SCT]: ...
|
| 418 |
+
@overload
|
| 419 |
+
def full_like(
|
| 420 |
+
a: object,
|
| 421 |
+
fill_value: Any,
|
| 422 |
+
dtype: None = ...,
|
| 423 |
+
order: _OrderKACF = ...,
|
| 424 |
+
subok: bool = ...,
|
| 425 |
+
shape: None | _ShapeLike= ...,
|
| 426 |
+
*,
|
| 427 |
+
device: None | L["cpu"] = ...,
|
| 428 |
+
) -> NDArray[Any]: ...
|
| 429 |
+
@overload
|
| 430 |
+
def full_like(
|
| 431 |
+
a: Any,
|
| 432 |
+
fill_value: Any,
|
| 433 |
+
dtype: _DTypeLike[_SCT],
|
| 434 |
+
order: _OrderKACF = ...,
|
| 435 |
+
subok: bool = ...,
|
| 436 |
+
shape: None | _ShapeLike= ...,
|
| 437 |
+
*,
|
| 438 |
+
device: None | L["cpu"] = ...,
|
| 439 |
+
) -> NDArray[_SCT]: ...
|
| 440 |
+
@overload
|
| 441 |
+
def full_like(
|
| 442 |
+
a: Any,
|
| 443 |
+
fill_value: Any,
|
| 444 |
+
dtype: DTypeLike,
|
| 445 |
+
order: _OrderKACF = ...,
|
| 446 |
+
subok: bool = ...,
|
| 447 |
+
shape: None | _ShapeLike= ...,
|
| 448 |
+
*,
|
| 449 |
+
device: None | L["cpu"] = ...,
|
| 450 |
+
) -> NDArray[Any]: ...
|
| 451 |
+
|
| 452 |
+
@overload
|
| 453 |
+
def count_nonzero(
|
| 454 |
+
a: ArrayLike,
|
| 455 |
+
axis: None = ...,
|
| 456 |
+
*,
|
| 457 |
+
keepdims: L[False] = ...,
|
| 458 |
+
) -> int: ...
|
| 459 |
+
@overload
|
| 460 |
+
def count_nonzero(
|
| 461 |
+
a: ArrayLike,
|
| 462 |
+
axis: _ShapeLike = ...,
|
| 463 |
+
*,
|
| 464 |
+
keepdims: bool = ...,
|
| 465 |
+
) -> Any: ... # TODO: np.intp or ndarray[np.intp]
|
| 466 |
+
|
| 467 |
+
def isfortran(a: NDArray[Any] | generic) -> bool: ...
|
| 468 |
+
|
| 469 |
+
def argwhere(a: ArrayLike) -> NDArray[intp]: ...
|
| 470 |
+
|
| 471 |
+
def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
|
| 472 |
+
|
| 473 |
+
@overload
|
| 474 |
+
def correlate(
|
| 475 |
+
a: _ArrayLikeUnknown,
|
| 476 |
+
v: _ArrayLikeUnknown,
|
| 477 |
+
mode: _CorrelateMode = ...,
|
| 478 |
+
) -> NDArray[Any]: ...
|
| 479 |
+
@overload
|
| 480 |
+
def correlate(
|
| 481 |
+
a: _ArrayLikeBool_co,
|
| 482 |
+
v: _ArrayLikeBool_co,
|
| 483 |
+
mode: _CorrelateMode = ...,
|
| 484 |
+
) -> NDArray[np.bool]: ...
|
| 485 |
+
@overload
|
| 486 |
+
def correlate(
|
| 487 |
+
a: _ArrayLikeUInt_co,
|
| 488 |
+
v: _ArrayLikeUInt_co,
|
| 489 |
+
mode: _CorrelateMode = ...,
|
| 490 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 491 |
+
@overload
|
| 492 |
+
def correlate(
|
| 493 |
+
a: _ArrayLikeInt_co,
|
| 494 |
+
v: _ArrayLikeInt_co,
|
| 495 |
+
mode: _CorrelateMode = ...,
|
| 496 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 497 |
+
@overload
|
| 498 |
+
def correlate(
|
| 499 |
+
a: _ArrayLikeFloat_co,
|
| 500 |
+
v: _ArrayLikeFloat_co,
|
| 501 |
+
mode: _CorrelateMode = ...,
|
| 502 |
+
) -> NDArray[floating[Any]]: ...
|
| 503 |
+
@overload
|
| 504 |
+
def correlate(
|
| 505 |
+
a: _ArrayLikeComplex_co,
|
| 506 |
+
v: _ArrayLikeComplex_co,
|
| 507 |
+
mode: _CorrelateMode = ...,
|
| 508 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 509 |
+
@overload
|
| 510 |
+
def correlate(
|
| 511 |
+
a: _ArrayLikeTD64_co,
|
| 512 |
+
v: _ArrayLikeTD64_co,
|
| 513 |
+
mode: _CorrelateMode = ...,
|
| 514 |
+
) -> NDArray[timedelta64]: ...
|
| 515 |
+
@overload
|
| 516 |
+
def correlate(
|
| 517 |
+
a: _ArrayLikeObject_co,
|
| 518 |
+
v: _ArrayLikeObject_co,
|
| 519 |
+
mode: _CorrelateMode = ...,
|
| 520 |
+
) -> NDArray[object_]: ...
|
| 521 |
+
|
| 522 |
+
@overload
|
| 523 |
+
def convolve(
|
| 524 |
+
a: _ArrayLikeUnknown,
|
| 525 |
+
v: _ArrayLikeUnknown,
|
| 526 |
+
mode: _CorrelateMode = ...,
|
| 527 |
+
) -> NDArray[Any]: ...
|
| 528 |
+
@overload
|
| 529 |
+
def convolve(
|
| 530 |
+
a: _ArrayLikeBool_co,
|
| 531 |
+
v: _ArrayLikeBool_co,
|
| 532 |
+
mode: _CorrelateMode = ...,
|
| 533 |
+
) -> NDArray[np.bool]: ...
|
| 534 |
+
@overload
|
| 535 |
+
def convolve(
|
| 536 |
+
a: _ArrayLikeUInt_co,
|
| 537 |
+
v: _ArrayLikeUInt_co,
|
| 538 |
+
mode: _CorrelateMode = ...,
|
| 539 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 540 |
+
@overload
|
| 541 |
+
def convolve(
|
| 542 |
+
a: _ArrayLikeInt_co,
|
| 543 |
+
v: _ArrayLikeInt_co,
|
| 544 |
+
mode: _CorrelateMode = ...,
|
| 545 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 546 |
+
@overload
|
| 547 |
+
def convolve(
|
| 548 |
+
a: _ArrayLikeFloat_co,
|
| 549 |
+
v: _ArrayLikeFloat_co,
|
| 550 |
+
mode: _CorrelateMode = ...,
|
| 551 |
+
) -> NDArray[floating[Any]]: ...
|
| 552 |
+
@overload
|
| 553 |
+
def convolve(
|
| 554 |
+
a: _ArrayLikeComplex_co,
|
| 555 |
+
v: _ArrayLikeComplex_co,
|
| 556 |
+
mode: _CorrelateMode = ...,
|
| 557 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 558 |
+
@overload
|
| 559 |
+
def convolve(
|
| 560 |
+
a: _ArrayLikeTD64_co,
|
| 561 |
+
v: _ArrayLikeTD64_co,
|
| 562 |
+
mode: _CorrelateMode = ...,
|
| 563 |
+
) -> NDArray[timedelta64]: ...
|
| 564 |
+
@overload
|
| 565 |
+
def convolve(
|
| 566 |
+
a: _ArrayLikeObject_co,
|
| 567 |
+
v: _ArrayLikeObject_co,
|
| 568 |
+
mode: _CorrelateMode = ...,
|
| 569 |
+
) -> NDArray[object_]: ...
|
| 570 |
+
|
| 571 |
+
@overload
|
| 572 |
+
def outer(
|
| 573 |
+
a: _ArrayLikeUnknown,
|
| 574 |
+
b: _ArrayLikeUnknown,
|
| 575 |
+
out: None = ...,
|
| 576 |
+
) -> NDArray[Any]: ...
|
| 577 |
+
@overload
|
| 578 |
+
def outer(
|
| 579 |
+
a: _ArrayLikeBool_co,
|
| 580 |
+
b: _ArrayLikeBool_co,
|
| 581 |
+
out: None = ...,
|
| 582 |
+
) -> NDArray[np.bool]: ...
|
| 583 |
+
@overload
|
| 584 |
+
def outer(
|
| 585 |
+
a: _ArrayLikeUInt_co,
|
| 586 |
+
b: _ArrayLikeUInt_co,
|
| 587 |
+
out: None = ...,
|
| 588 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 589 |
+
@overload
|
| 590 |
+
def outer(
|
| 591 |
+
a: _ArrayLikeInt_co,
|
| 592 |
+
b: _ArrayLikeInt_co,
|
| 593 |
+
out: None = ...,
|
| 594 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 595 |
+
@overload
|
| 596 |
+
def outer(
|
| 597 |
+
a: _ArrayLikeFloat_co,
|
| 598 |
+
b: _ArrayLikeFloat_co,
|
| 599 |
+
out: None = ...,
|
| 600 |
+
) -> NDArray[floating[Any]]: ...
|
| 601 |
+
@overload
|
| 602 |
+
def outer(
|
| 603 |
+
a: _ArrayLikeComplex_co,
|
| 604 |
+
b: _ArrayLikeComplex_co,
|
| 605 |
+
out: None = ...,
|
| 606 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 607 |
+
@overload
|
| 608 |
+
def outer(
|
| 609 |
+
a: _ArrayLikeTD64_co,
|
| 610 |
+
b: _ArrayLikeTD64_co,
|
| 611 |
+
out: None = ...,
|
| 612 |
+
) -> NDArray[timedelta64]: ...
|
| 613 |
+
@overload
|
| 614 |
+
def outer(
|
| 615 |
+
a: _ArrayLikeObject_co,
|
| 616 |
+
b: _ArrayLikeObject_co,
|
| 617 |
+
out: None = ...,
|
| 618 |
+
) -> NDArray[object_]: ...
|
| 619 |
+
@overload
|
| 620 |
+
def outer(
|
| 621 |
+
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
| 622 |
+
b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
| 623 |
+
out: _ArrayType,
|
| 624 |
+
) -> _ArrayType: ...
|
| 625 |
+
|
| 626 |
+
@overload
|
| 627 |
+
def tensordot(
|
| 628 |
+
a: _ArrayLikeUnknown,
|
| 629 |
+
b: _ArrayLikeUnknown,
|
| 630 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 631 |
+
) -> NDArray[Any]: ...
|
| 632 |
+
@overload
|
| 633 |
+
def tensordot(
|
| 634 |
+
a: _ArrayLikeBool_co,
|
| 635 |
+
b: _ArrayLikeBool_co,
|
| 636 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 637 |
+
) -> NDArray[np.bool]: ...
|
| 638 |
+
@overload
|
| 639 |
+
def tensordot(
|
| 640 |
+
a: _ArrayLikeUInt_co,
|
| 641 |
+
b: _ArrayLikeUInt_co,
|
| 642 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 643 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 644 |
+
@overload
|
| 645 |
+
def tensordot(
|
| 646 |
+
a: _ArrayLikeInt_co,
|
| 647 |
+
b: _ArrayLikeInt_co,
|
| 648 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 649 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 650 |
+
@overload
|
| 651 |
+
def tensordot(
|
| 652 |
+
a: _ArrayLikeFloat_co,
|
| 653 |
+
b: _ArrayLikeFloat_co,
|
| 654 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 655 |
+
) -> NDArray[floating[Any]]: ...
|
| 656 |
+
@overload
|
| 657 |
+
def tensordot(
|
| 658 |
+
a: _ArrayLikeComplex_co,
|
| 659 |
+
b: _ArrayLikeComplex_co,
|
| 660 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 661 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 662 |
+
@overload
|
| 663 |
+
def tensordot(
|
| 664 |
+
a: _ArrayLikeTD64_co,
|
| 665 |
+
b: _ArrayLikeTD64_co,
|
| 666 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 667 |
+
) -> NDArray[timedelta64]: ...
|
| 668 |
+
@overload
|
| 669 |
+
def tensordot(
|
| 670 |
+
a: _ArrayLikeObject_co,
|
| 671 |
+
b: _ArrayLikeObject_co,
|
| 672 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
| 673 |
+
) -> NDArray[object_]: ...
|
| 674 |
+
|
| 675 |
+
@overload
|
| 676 |
+
def roll(
|
| 677 |
+
a: _ArrayLike[_SCT],
|
| 678 |
+
shift: _ShapeLike,
|
| 679 |
+
axis: None | _ShapeLike = ...,
|
| 680 |
+
) -> NDArray[_SCT]: ...
|
| 681 |
+
@overload
|
| 682 |
+
def roll(
|
| 683 |
+
a: ArrayLike,
|
| 684 |
+
shift: _ShapeLike,
|
| 685 |
+
axis: None | _ShapeLike = ...,
|
| 686 |
+
) -> NDArray[Any]: ...
|
| 687 |
+
|
| 688 |
+
def rollaxis(
|
| 689 |
+
a: NDArray[_SCT],
|
| 690 |
+
axis: int,
|
| 691 |
+
start: int = ...,
|
| 692 |
+
) -> NDArray[_SCT]: ...
|
| 693 |
+
|
| 694 |
+
def moveaxis(
|
| 695 |
+
a: NDArray[_SCT],
|
| 696 |
+
source: _ShapeLike,
|
| 697 |
+
destination: _ShapeLike,
|
| 698 |
+
) -> NDArray[_SCT]: ...
|
| 699 |
+
|
| 700 |
+
@overload
|
| 701 |
+
def cross(
|
| 702 |
+
x1: _ArrayLikeUnknown,
|
| 703 |
+
x2: _ArrayLikeUnknown,
|
| 704 |
+
axisa: int = ...,
|
| 705 |
+
axisb: int = ...,
|
| 706 |
+
axisc: int = ...,
|
| 707 |
+
axis: None | int = ...,
|
| 708 |
+
) -> NDArray[Any]: ...
|
| 709 |
+
@overload
|
| 710 |
+
def cross(
|
| 711 |
+
x1: _ArrayLikeBool_co,
|
| 712 |
+
x2: _ArrayLikeBool_co,
|
| 713 |
+
axisa: int = ...,
|
| 714 |
+
axisb: int = ...,
|
| 715 |
+
axisc: int = ...,
|
| 716 |
+
axis: None | int = ...,
|
| 717 |
+
) -> NoReturn: ...
|
| 718 |
+
@overload
|
| 719 |
+
def cross(
|
| 720 |
+
x1: _ArrayLikeUInt_co,
|
| 721 |
+
x2: _ArrayLikeUInt_co,
|
| 722 |
+
axisa: int = ...,
|
| 723 |
+
axisb: int = ...,
|
| 724 |
+
axisc: int = ...,
|
| 725 |
+
axis: None | int = ...,
|
| 726 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
| 727 |
+
@overload
|
| 728 |
+
def cross(
|
| 729 |
+
x1: _ArrayLikeInt_co,
|
| 730 |
+
x2: _ArrayLikeInt_co,
|
| 731 |
+
axisa: int = ...,
|
| 732 |
+
axisb: int = ...,
|
| 733 |
+
axisc: int = ...,
|
| 734 |
+
axis: None | int = ...,
|
| 735 |
+
) -> NDArray[signedinteger[Any]]: ...
|
| 736 |
+
@overload
|
| 737 |
+
def cross(
|
| 738 |
+
x1: _ArrayLikeFloat_co,
|
| 739 |
+
x2: _ArrayLikeFloat_co,
|
| 740 |
+
axisa: int = ...,
|
| 741 |
+
axisb: int = ...,
|
| 742 |
+
axisc: int = ...,
|
| 743 |
+
axis: None | int = ...,
|
| 744 |
+
) -> NDArray[floating[Any]]: ...
|
| 745 |
+
@overload
|
| 746 |
+
def cross(
|
| 747 |
+
x1: _ArrayLikeComplex_co,
|
| 748 |
+
x2: _ArrayLikeComplex_co,
|
| 749 |
+
axisa: int = ...,
|
| 750 |
+
axisb: int = ...,
|
| 751 |
+
axisc: int = ...,
|
| 752 |
+
axis: None | int = ...,
|
| 753 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
| 754 |
+
@overload
|
| 755 |
+
def cross(
|
| 756 |
+
x1: _ArrayLikeObject_co,
|
| 757 |
+
x2: _ArrayLikeObject_co,
|
| 758 |
+
axisa: int = ...,
|
| 759 |
+
axisb: int = ...,
|
| 760 |
+
axisc: int = ...,
|
| 761 |
+
axis: None | int = ...,
|
| 762 |
+
) -> NDArray[object_]: ...
|
| 763 |
+
|
| 764 |
+
@overload
|
| 765 |
+
def indices(
|
| 766 |
+
dimensions: Sequence[int],
|
| 767 |
+
dtype: type[int] = ...,
|
| 768 |
+
sparse: L[False] = ...,
|
| 769 |
+
) -> NDArray[int_]: ...
|
| 770 |
+
@overload
|
| 771 |
+
def indices(
|
| 772 |
+
dimensions: Sequence[int],
|
| 773 |
+
dtype: type[int] = ...,
|
| 774 |
+
sparse: L[True] = ...,
|
| 775 |
+
) -> tuple[NDArray[int_], ...]: ...
|
| 776 |
+
@overload
|
| 777 |
+
def indices(
|
| 778 |
+
dimensions: Sequence[int],
|
| 779 |
+
dtype: _DTypeLike[_SCT],
|
| 780 |
+
sparse: L[False] = ...,
|
| 781 |
+
) -> NDArray[_SCT]: ...
|
| 782 |
+
@overload
|
| 783 |
+
def indices(
|
| 784 |
+
dimensions: Sequence[int],
|
| 785 |
+
dtype: _DTypeLike[_SCT],
|
| 786 |
+
sparse: L[True],
|
| 787 |
+
) -> tuple[NDArray[_SCT], ...]: ...
|
| 788 |
+
@overload
|
| 789 |
+
def indices(
|
| 790 |
+
dimensions: Sequence[int],
|
| 791 |
+
dtype: DTypeLike,
|
| 792 |
+
sparse: L[False] = ...,
|
| 793 |
+
) -> NDArray[Any]: ...
|
| 794 |
+
@overload
|
| 795 |
+
def indices(
|
| 796 |
+
dimensions: Sequence[int],
|
| 797 |
+
dtype: DTypeLike,
|
| 798 |
+
sparse: L[True],
|
| 799 |
+
) -> tuple[NDArray[Any], ...]: ...
|
| 800 |
+
|
| 801 |
+
def fromfunction(
|
| 802 |
+
function: Callable[..., _T],
|
| 803 |
+
shape: Sequence[int],
|
| 804 |
+
*,
|
| 805 |
+
dtype: DTypeLike = ...,
|
| 806 |
+
like: _SupportsArrayFunc = ...,
|
| 807 |
+
**kwargs: Any,
|
| 808 |
+
) -> _T: ...
|
| 809 |
+
|
| 810 |
+
def isscalar(element: object) -> TypeGuard[
|
| 811 |
+
generic | bool | int | float | complex | str | bytes | memoryview
|
| 812 |
+
]: ...
|
| 813 |
+
|
| 814 |
+
def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ...
|
| 815 |
+
|
| 816 |
+
def base_repr(
|
| 817 |
+
number: SupportsAbs[float],
|
| 818 |
+
base: float = ...,
|
| 819 |
+
padding: SupportsIndex = ...,
|
| 820 |
+
) -> str: ...
|
| 821 |
+
|
| 822 |
+
@overload
|
| 823 |
+
def identity(
|
| 824 |
+
n: int,
|
| 825 |
+
dtype: None = ...,
|
| 826 |
+
*,
|
| 827 |
+
like: _SupportsArrayFunc = ...,
|
| 828 |
+
) -> NDArray[float64]: ...
|
| 829 |
+
@overload
|
| 830 |
+
def identity(
|
| 831 |
+
n: int,
|
| 832 |
+
dtype: _DTypeLike[_SCT],
|
| 833 |
+
*,
|
| 834 |
+
like: _SupportsArrayFunc = ...,
|
| 835 |
+
) -> NDArray[_SCT]: ...
|
| 836 |
+
@overload
|
| 837 |
+
def identity(
|
| 838 |
+
n: int,
|
| 839 |
+
dtype: DTypeLike,
|
| 840 |
+
*,
|
| 841 |
+
like: _SupportsArrayFunc = ...,
|
| 842 |
+
) -> NDArray[Any]: ...
|
| 843 |
+
|
| 844 |
+
def allclose(
|
| 845 |
+
a: ArrayLike,
|
| 846 |
+
b: ArrayLike,
|
| 847 |
+
rtol: ArrayLike = ...,
|
| 848 |
+
atol: ArrayLike = ...,
|
| 849 |
+
equal_nan: bool = ...,
|
| 850 |
+
) -> bool: ...
|
| 851 |
+
|
| 852 |
+
@overload
|
| 853 |
+
def isclose(
|
| 854 |
+
a: _ScalarLike_co,
|
| 855 |
+
b: _ScalarLike_co,
|
| 856 |
+
rtol: ArrayLike = ...,
|
| 857 |
+
atol: ArrayLike = ...,
|
| 858 |
+
equal_nan: bool = ...,
|
| 859 |
+
) -> np.bool: ...
|
| 860 |
+
@overload
|
| 861 |
+
def isclose(
|
| 862 |
+
a: ArrayLike,
|
| 863 |
+
b: ArrayLike,
|
| 864 |
+
rtol: ArrayLike = ...,
|
| 865 |
+
atol: ArrayLike = ...,
|
| 866 |
+
equal_nan: bool = ...,
|
| 867 |
+
) -> NDArray[np.bool]: ...
|
| 868 |
+
|
| 869 |
+
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
|
| 870 |
+
|
| 871 |
+
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
|
| 872 |
+
|
| 873 |
+
@overload
|
| 874 |
+
def astype(
|
| 875 |
+
x: ndarray[_ShapeType, dtype[Any]],
|
| 876 |
+
dtype: _DTypeLike[_SCT],
|
| 877 |
+
copy: bool = ...,
|
| 878 |
+
device: None | L["cpu"] = ...,
|
| 879 |
+
) -> ndarray[_ShapeType, dtype[_SCT]]: ...
|
| 880 |
+
@overload
|
| 881 |
+
def astype(
|
| 882 |
+
x: ndarray[_ShapeType, dtype[Any]],
|
| 883 |
+
dtype: DTypeLike,
|
| 884 |
+
copy: bool = ...,
|
| 885 |
+
device: None | L["cpu"] = ...,
|
| 886 |
+
) -> ndarray[_ShapeType, dtype[Any]]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/numerictypes.py
ADDED
|
@@ -0,0 +1,629 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
numerictypes: Define the numeric type objects
|
| 3 |
+
|
| 4 |
+
This module is designed so "from numerictypes import \\*" is safe.
|
| 5 |
+
Exported symbols include:
|
| 6 |
+
|
| 7 |
+
Dictionary with all registered number types (including aliases):
|
| 8 |
+
sctypeDict
|
| 9 |
+
|
| 10 |
+
Type objects (not all will be available, depends on platform):
|
| 11 |
+
see variable sctypes for which ones you have
|
| 12 |
+
|
| 13 |
+
Bit-width names
|
| 14 |
+
|
| 15 |
+
int8 int16 int32 int64 int128
|
| 16 |
+
uint8 uint16 uint32 uint64 uint128
|
| 17 |
+
float16 float32 float64 float96 float128 float256
|
| 18 |
+
complex32 complex64 complex128 complex192 complex256 complex512
|
| 19 |
+
datetime64 timedelta64
|
| 20 |
+
|
| 21 |
+
c-based names
|
| 22 |
+
|
| 23 |
+
bool
|
| 24 |
+
|
| 25 |
+
object_
|
| 26 |
+
|
| 27 |
+
void, str_
|
| 28 |
+
|
| 29 |
+
byte, ubyte,
|
| 30 |
+
short, ushort
|
| 31 |
+
intc, uintc,
|
| 32 |
+
intp, uintp,
|
| 33 |
+
int_, uint,
|
| 34 |
+
longlong, ulonglong,
|
| 35 |
+
|
| 36 |
+
single, csingle,
|
| 37 |
+
double, cdouble,
|
| 38 |
+
longdouble, clongdouble,
|
| 39 |
+
|
| 40 |
+
As part of the type-hierarchy: xx -- is bit-width
|
| 41 |
+
|
| 42 |
+
generic
|
| 43 |
+
+-> bool (kind=b)
|
| 44 |
+
+-> number
|
| 45 |
+
| +-> integer
|
| 46 |
+
| | +-> signedinteger (intxx) (kind=i)
|
| 47 |
+
| | | byte
|
| 48 |
+
| | | short
|
| 49 |
+
| | | intc
|
| 50 |
+
| | | intp
|
| 51 |
+
| | | int_
|
| 52 |
+
| | | longlong
|
| 53 |
+
| | \\-> unsignedinteger (uintxx) (kind=u)
|
| 54 |
+
| | ubyte
|
| 55 |
+
| | ushort
|
| 56 |
+
| | uintc
|
| 57 |
+
| | uintp
|
| 58 |
+
| | uint
|
| 59 |
+
| | ulonglong
|
| 60 |
+
| +-> inexact
|
| 61 |
+
| +-> floating (floatxx) (kind=f)
|
| 62 |
+
| | half
|
| 63 |
+
| | single
|
| 64 |
+
| | double
|
| 65 |
+
| | longdouble
|
| 66 |
+
| \\-> complexfloating (complexxx) (kind=c)
|
| 67 |
+
| csingle
|
| 68 |
+
| cdouble
|
| 69 |
+
| clongdouble
|
| 70 |
+
+-> flexible
|
| 71 |
+
| +-> character
|
| 72 |
+
| | bytes_ (kind=S)
|
| 73 |
+
| | str_ (kind=U)
|
| 74 |
+
| |
|
| 75 |
+
| \\-> void (kind=V)
|
| 76 |
+
\\-> object_ (not used much) (kind=O)
|
| 77 |
+
|
| 78 |
+
"""
|
| 79 |
+
import numbers
|
| 80 |
+
import warnings
|
| 81 |
+
|
| 82 |
+
from . import multiarray as ma
|
| 83 |
+
from .multiarray import (
|
| 84 |
+
ndarray, dtype, datetime_data, datetime_as_string,
|
| 85 |
+
busday_offset, busday_count, is_busday, busdaycalendar
|
| 86 |
+
)
|
| 87 |
+
from .._utils import set_module
|
| 88 |
+
|
| 89 |
+
# we add more at the bottom
|
| 90 |
+
__all__ = [
|
| 91 |
+
'ScalarType', 'typecodes', 'issubdtype', 'datetime_data',
|
| 92 |
+
'datetime_as_string', 'busday_offset', 'busday_count',
|
| 93 |
+
'is_busday', 'busdaycalendar', 'isdtype'
|
| 94 |
+
]
|
| 95 |
+
|
| 96 |
+
# we don't need all these imports, but we need to keep them for compatibility
|
| 97 |
+
# for users using np._core.numerictypes.UPPER_TABLE
|
| 98 |
+
from ._string_helpers import ( # noqa: F401
|
| 99 |
+
english_lower, english_upper, english_capitalize, LOWER_TABLE, UPPER_TABLE
|
| 100 |
+
)
|
| 101 |
+
|
| 102 |
+
from ._type_aliases import (
|
| 103 |
+
sctypeDict, allTypes, sctypes
|
| 104 |
+
)
|
| 105 |
+
from ._dtype import _kind_name
|
| 106 |
+
|
| 107 |
+
# we don't export these for import *, but we do want them accessible
|
| 108 |
+
# as numerictypes.bool, etc.
|
| 109 |
+
from builtins import bool, int, float, complex, object, str, bytes # noqa: F401, UP029
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
# We use this later
|
| 113 |
+
generic = allTypes['generic']
|
| 114 |
+
|
| 115 |
+
genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
|
| 116 |
+
'int32', 'uint32', 'int64', 'uint64', 'int128',
|
| 117 |
+
'uint128', 'float16',
|
| 118 |
+
'float32', 'float64', 'float80', 'float96', 'float128',
|
| 119 |
+
'float256',
|
| 120 |
+
'complex32', 'complex64', 'complex128', 'complex160',
|
| 121 |
+
'complex192', 'complex256', 'complex512', 'object']
|
| 122 |
+
|
| 123 |
+
@set_module('numpy')
|
| 124 |
+
def maximum_sctype(t):
|
| 125 |
+
"""
|
| 126 |
+
Return the scalar type of highest precision of the same kind as the input.
|
| 127 |
+
|
| 128 |
+
.. deprecated:: 2.0
|
| 129 |
+
Use an explicit dtype like int64 or float64 instead.
|
| 130 |
+
|
| 131 |
+
Parameters
|
| 132 |
+
----------
|
| 133 |
+
t : dtype or dtype specifier
|
| 134 |
+
The input data type. This can be a `dtype` object or an object that
|
| 135 |
+
is convertible to a `dtype`.
|
| 136 |
+
|
| 137 |
+
Returns
|
| 138 |
+
-------
|
| 139 |
+
out : dtype
|
| 140 |
+
The highest precision data type of the same kind (`dtype.kind`) as `t`.
|
| 141 |
+
|
| 142 |
+
See Also
|
| 143 |
+
--------
|
| 144 |
+
obj2sctype, mintypecode, sctype2char
|
| 145 |
+
dtype
|
| 146 |
+
|
| 147 |
+
Examples
|
| 148 |
+
--------
|
| 149 |
+
>>> from numpy._core.numerictypes import maximum_sctype
|
| 150 |
+
>>> maximum_sctype(int)
|
| 151 |
+
<class 'numpy.int64'>
|
| 152 |
+
>>> maximum_sctype(np.uint8)
|
| 153 |
+
<class 'numpy.uint64'>
|
| 154 |
+
>>> maximum_sctype(complex)
|
| 155 |
+
<class 'numpy.complex256'> # may vary
|
| 156 |
+
|
| 157 |
+
>>> maximum_sctype(str)
|
| 158 |
+
<class 'numpy.str_'>
|
| 159 |
+
|
| 160 |
+
>>> maximum_sctype('i2')
|
| 161 |
+
<class 'numpy.int64'>
|
| 162 |
+
>>> maximum_sctype('f4')
|
| 163 |
+
<class 'numpy.float128'> # may vary
|
| 164 |
+
|
| 165 |
+
"""
|
| 166 |
+
|
| 167 |
+
# Deprecated in NumPy 2.0, 2023-07-11
|
| 168 |
+
warnings.warn(
|
| 169 |
+
"`maximum_sctype` is deprecated. Use an explicit dtype like int64 "
|
| 170 |
+
"or float64 instead. (deprecated in NumPy 2.0)",
|
| 171 |
+
DeprecationWarning,
|
| 172 |
+
stacklevel=2
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
g = obj2sctype(t)
|
| 176 |
+
if g is None:
|
| 177 |
+
return t
|
| 178 |
+
t = g
|
| 179 |
+
base = _kind_name(dtype(t))
|
| 180 |
+
if base in sctypes:
|
| 181 |
+
return sctypes[base][-1]
|
| 182 |
+
else:
|
| 183 |
+
return t
|
| 184 |
+
|
| 185 |
+
|
| 186 |
+
@set_module('numpy')
|
| 187 |
+
def issctype(rep):
|
| 188 |
+
"""
|
| 189 |
+
Determines whether the given object represents a scalar data-type.
|
| 190 |
+
|
| 191 |
+
Parameters
|
| 192 |
+
----------
|
| 193 |
+
rep : any
|
| 194 |
+
If `rep` is an instance of a scalar dtype, True is returned. If not,
|
| 195 |
+
False is returned.
|
| 196 |
+
|
| 197 |
+
Returns
|
| 198 |
+
-------
|
| 199 |
+
out : bool
|
| 200 |
+
Boolean result of check whether `rep` is a scalar dtype.
|
| 201 |
+
|
| 202 |
+
See Also
|
| 203 |
+
--------
|
| 204 |
+
issubsctype, issubdtype, obj2sctype, sctype2char
|
| 205 |
+
|
| 206 |
+
Examples
|
| 207 |
+
--------
|
| 208 |
+
>>> from numpy._core.numerictypes import issctype
|
| 209 |
+
>>> issctype(np.int32)
|
| 210 |
+
True
|
| 211 |
+
>>> issctype(list)
|
| 212 |
+
False
|
| 213 |
+
>>> issctype(1.1)
|
| 214 |
+
False
|
| 215 |
+
|
| 216 |
+
Strings are also a scalar type:
|
| 217 |
+
|
| 218 |
+
>>> issctype(np.dtype('str'))
|
| 219 |
+
True
|
| 220 |
+
|
| 221 |
+
"""
|
| 222 |
+
if not isinstance(rep, (type, dtype)):
|
| 223 |
+
return False
|
| 224 |
+
try:
|
| 225 |
+
res = obj2sctype(rep)
|
| 226 |
+
if res and res != object_:
|
| 227 |
+
return True
|
| 228 |
+
else:
|
| 229 |
+
return False
|
| 230 |
+
except Exception:
|
| 231 |
+
return False
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
@set_module('numpy')
|
| 235 |
+
def obj2sctype(rep, default=None):
|
| 236 |
+
"""
|
| 237 |
+
Return the scalar dtype or NumPy equivalent of Python type of an object.
|
| 238 |
+
|
| 239 |
+
Parameters
|
| 240 |
+
----------
|
| 241 |
+
rep : any
|
| 242 |
+
The object of which the type is returned.
|
| 243 |
+
default : any, optional
|
| 244 |
+
If given, this is returned for objects whose types can not be
|
| 245 |
+
determined. If not given, None is returned for those objects.
|
| 246 |
+
|
| 247 |
+
Returns
|
| 248 |
+
-------
|
| 249 |
+
dtype : dtype or Python type
|
| 250 |
+
The data type of `rep`.
|
| 251 |
+
|
| 252 |
+
See Also
|
| 253 |
+
--------
|
| 254 |
+
sctype2char, issctype, issubsctype, issubdtype
|
| 255 |
+
|
| 256 |
+
Examples
|
| 257 |
+
--------
|
| 258 |
+
>>> from numpy._core.numerictypes import obj2sctype
|
| 259 |
+
>>> obj2sctype(np.int32)
|
| 260 |
+
<class 'numpy.int32'>
|
| 261 |
+
>>> obj2sctype(np.array([1., 2.]))
|
| 262 |
+
<class 'numpy.float64'>
|
| 263 |
+
>>> obj2sctype(np.array([1.j]))
|
| 264 |
+
<class 'numpy.complex128'>
|
| 265 |
+
|
| 266 |
+
>>> obj2sctype(dict)
|
| 267 |
+
<class 'numpy.object_'>
|
| 268 |
+
>>> obj2sctype('string')
|
| 269 |
+
|
| 270 |
+
>>> obj2sctype(1, default=list)
|
| 271 |
+
<class 'list'>
|
| 272 |
+
|
| 273 |
+
"""
|
| 274 |
+
# prevent abstract classes being upcast
|
| 275 |
+
if isinstance(rep, type) and issubclass(rep, generic):
|
| 276 |
+
return rep
|
| 277 |
+
# extract dtype from arrays
|
| 278 |
+
if isinstance(rep, ndarray):
|
| 279 |
+
return rep.dtype.type
|
| 280 |
+
# fall back on dtype to convert
|
| 281 |
+
try:
|
| 282 |
+
res = dtype(rep)
|
| 283 |
+
except Exception:
|
| 284 |
+
return default
|
| 285 |
+
else:
|
| 286 |
+
return res.type
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
@set_module('numpy')
|
| 290 |
+
def issubclass_(arg1, arg2):
|
| 291 |
+
"""
|
| 292 |
+
Determine if a class is a subclass of a second class.
|
| 293 |
+
|
| 294 |
+
`issubclass_` is equivalent to the Python built-in ``issubclass``,
|
| 295 |
+
except that it returns False instead of raising a TypeError if one
|
| 296 |
+
of the arguments is not a class.
|
| 297 |
+
|
| 298 |
+
Parameters
|
| 299 |
+
----------
|
| 300 |
+
arg1 : class
|
| 301 |
+
Input class. True is returned if `arg1` is a subclass of `arg2`.
|
| 302 |
+
arg2 : class or tuple of classes.
|
| 303 |
+
Input class. If a tuple of classes, True is returned if `arg1` is a
|
| 304 |
+
subclass of any of the tuple elements.
|
| 305 |
+
|
| 306 |
+
Returns
|
| 307 |
+
-------
|
| 308 |
+
out : bool
|
| 309 |
+
Whether `arg1` is a subclass of `arg2` or not.
|
| 310 |
+
|
| 311 |
+
See Also
|
| 312 |
+
--------
|
| 313 |
+
issubsctype, issubdtype, issctype
|
| 314 |
+
|
| 315 |
+
Examples
|
| 316 |
+
--------
|
| 317 |
+
>>> np.issubclass_(np.int32, int)
|
| 318 |
+
False
|
| 319 |
+
>>> np.issubclass_(np.int32, float)
|
| 320 |
+
False
|
| 321 |
+
>>> np.issubclass_(np.float64, float)
|
| 322 |
+
True
|
| 323 |
+
|
| 324 |
+
"""
|
| 325 |
+
try:
|
| 326 |
+
return issubclass(arg1, arg2)
|
| 327 |
+
except TypeError:
|
| 328 |
+
return False
|
| 329 |
+
|
| 330 |
+
|
| 331 |
+
@set_module('numpy')
|
| 332 |
+
def issubsctype(arg1, arg2):
|
| 333 |
+
"""
|
| 334 |
+
Determine if the first argument is a subclass of the second argument.
|
| 335 |
+
|
| 336 |
+
Parameters
|
| 337 |
+
----------
|
| 338 |
+
arg1, arg2 : dtype or dtype specifier
|
| 339 |
+
Data-types.
|
| 340 |
+
|
| 341 |
+
Returns
|
| 342 |
+
-------
|
| 343 |
+
out : bool
|
| 344 |
+
The result.
|
| 345 |
+
|
| 346 |
+
See Also
|
| 347 |
+
--------
|
| 348 |
+
issctype, issubdtype, obj2sctype
|
| 349 |
+
|
| 350 |
+
Examples
|
| 351 |
+
--------
|
| 352 |
+
>>> from numpy._core import issubsctype
|
| 353 |
+
>>> issubsctype('S8', str)
|
| 354 |
+
False
|
| 355 |
+
>>> issubsctype(np.array([1]), int)
|
| 356 |
+
True
|
| 357 |
+
>>> issubsctype(np.array([1]), float)
|
| 358 |
+
False
|
| 359 |
+
|
| 360 |
+
"""
|
| 361 |
+
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class _PreprocessDTypeError(Exception):
|
| 365 |
+
pass
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def _preprocess_dtype(dtype):
|
| 369 |
+
"""
|
| 370 |
+
Preprocess dtype argument by:
|
| 371 |
+
1. fetching type from a data type
|
| 372 |
+
2. verifying that types are built-in NumPy dtypes
|
| 373 |
+
"""
|
| 374 |
+
if isinstance(dtype, ma.dtype):
|
| 375 |
+
dtype = dtype.type
|
| 376 |
+
if isinstance(dtype, ndarray) or dtype not in allTypes.values():
|
| 377 |
+
raise _PreprocessDTypeError
|
| 378 |
+
return dtype
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
@set_module('numpy')
|
| 382 |
+
def isdtype(dtype, kind):
|
| 383 |
+
"""
|
| 384 |
+
Determine if a provided dtype is of a specified data type ``kind``.
|
| 385 |
+
|
| 386 |
+
This function only supports built-in NumPy's data types.
|
| 387 |
+
Third-party dtypes are not yet supported.
|
| 388 |
+
|
| 389 |
+
Parameters
|
| 390 |
+
----------
|
| 391 |
+
dtype : dtype
|
| 392 |
+
The input dtype.
|
| 393 |
+
kind : dtype or str or tuple of dtypes/strs.
|
| 394 |
+
dtype or dtype kind. Allowed dtype kinds are:
|
| 395 |
+
* ``'bool'`` : boolean kind
|
| 396 |
+
* ``'signed integer'`` : signed integer data types
|
| 397 |
+
* ``'unsigned integer'`` : unsigned integer data types
|
| 398 |
+
* ``'integral'`` : integer data types
|
| 399 |
+
* ``'real floating'`` : real-valued floating-point data types
|
| 400 |
+
* ``'complex floating'`` : complex floating-point data types
|
| 401 |
+
* ``'numeric'`` : numeric data types
|
| 402 |
+
|
| 403 |
+
Returns
|
| 404 |
+
-------
|
| 405 |
+
out : bool
|
| 406 |
+
|
| 407 |
+
See Also
|
| 408 |
+
--------
|
| 409 |
+
issubdtype
|
| 410 |
+
|
| 411 |
+
Examples
|
| 412 |
+
--------
|
| 413 |
+
>>> import numpy as np
|
| 414 |
+
>>> np.isdtype(np.float32, np.float64)
|
| 415 |
+
False
|
| 416 |
+
>>> np.isdtype(np.float32, "real floating")
|
| 417 |
+
True
|
| 418 |
+
>>> np.isdtype(np.complex128, ("real floating", "complex floating"))
|
| 419 |
+
True
|
| 420 |
+
|
| 421 |
+
"""
|
| 422 |
+
try:
|
| 423 |
+
dtype = _preprocess_dtype(dtype)
|
| 424 |
+
except _PreprocessDTypeError:
|
| 425 |
+
raise TypeError(
|
| 426 |
+
"dtype argument must be a NumPy dtype, "
|
| 427 |
+
f"but it is a {type(dtype)}."
|
| 428 |
+
) from None
|
| 429 |
+
|
| 430 |
+
input_kinds = kind if isinstance(kind, tuple) else (kind,)
|
| 431 |
+
|
| 432 |
+
processed_kinds = set()
|
| 433 |
+
|
| 434 |
+
for kind in input_kinds:
|
| 435 |
+
if kind == "bool":
|
| 436 |
+
processed_kinds.add(allTypes["bool"])
|
| 437 |
+
elif kind == "signed integer":
|
| 438 |
+
processed_kinds.update(sctypes["int"])
|
| 439 |
+
elif kind == "unsigned integer":
|
| 440 |
+
processed_kinds.update(sctypes["uint"])
|
| 441 |
+
elif kind == "integral":
|
| 442 |
+
processed_kinds.update(sctypes["int"] + sctypes["uint"])
|
| 443 |
+
elif kind == "real floating":
|
| 444 |
+
processed_kinds.update(sctypes["float"])
|
| 445 |
+
elif kind == "complex floating":
|
| 446 |
+
processed_kinds.update(sctypes["complex"])
|
| 447 |
+
elif kind == "numeric":
|
| 448 |
+
processed_kinds.update(
|
| 449 |
+
sctypes["int"] + sctypes["uint"] +
|
| 450 |
+
sctypes["float"] + sctypes["complex"]
|
| 451 |
+
)
|
| 452 |
+
elif isinstance(kind, str):
|
| 453 |
+
raise ValueError(
|
| 454 |
+
"kind argument is a string, but"
|
| 455 |
+
f" {kind!r} is not a known kind name."
|
| 456 |
+
)
|
| 457 |
+
else:
|
| 458 |
+
try:
|
| 459 |
+
kind = _preprocess_dtype(kind)
|
| 460 |
+
except _PreprocessDTypeError:
|
| 461 |
+
raise TypeError(
|
| 462 |
+
"kind argument must be comprised of "
|
| 463 |
+
"NumPy dtypes or strings only, "
|
| 464 |
+
f"but is a {type(kind)}."
|
| 465 |
+
) from None
|
| 466 |
+
processed_kinds.add(kind)
|
| 467 |
+
|
| 468 |
+
return dtype in processed_kinds
|
| 469 |
+
|
| 470 |
+
|
| 471 |
+
@set_module('numpy')
|
| 472 |
+
def issubdtype(arg1, arg2):
|
| 473 |
+
r"""
|
| 474 |
+
Returns True if first argument is a typecode lower/equal in type hierarchy.
|
| 475 |
+
|
| 476 |
+
This is like the builtin :func:`issubclass`, but for `dtype`\ s.
|
| 477 |
+
|
| 478 |
+
Parameters
|
| 479 |
+
----------
|
| 480 |
+
arg1, arg2 : dtype_like
|
| 481 |
+
`dtype` or object coercible to one
|
| 482 |
+
|
| 483 |
+
Returns
|
| 484 |
+
-------
|
| 485 |
+
out : bool
|
| 486 |
+
|
| 487 |
+
See Also
|
| 488 |
+
--------
|
| 489 |
+
:ref:`arrays.scalars` : Overview of the numpy type hierarchy.
|
| 490 |
+
|
| 491 |
+
Examples
|
| 492 |
+
--------
|
| 493 |
+
`issubdtype` can be used to check the type of arrays:
|
| 494 |
+
|
| 495 |
+
>>> ints = np.array([1, 2, 3], dtype=np.int32)
|
| 496 |
+
>>> np.issubdtype(ints.dtype, np.integer)
|
| 497 |
+
True
|
| 498 |
+
>>> np.issubdtype(ints.dtype, np.floating)
|
| 499 |
+
False
|
| 500 |
+
|
| 501 |
+
>>> floats = np.array([1, 2, 3], dtype=np.float32)
|
| 502 |
+
>>> np.issubdtype(floats.dtype, np.integer)
|
| 503 |
+
False
|
| 504 |
+
>>> np.issubdtype(floats.dtype, np.floating)
|
| 505 |
+
True
|
| 506 |
+
|
| 507 |
+
Similar types of different sizes are not subdtypes of each other:
|
| 508 |
+
|
| 509 |
+
>>> np.issubdtype(np.float64, np.float32)
|
| 510 |
+
False
|
| 511 |
+
>>> np.issubdtype(np.float32, np.float64)
|
| 512 |
+
False
|
| 513 |
+
|
| 514 |
+
but both are subtypes of `floating`:
|
| 515 |
+
|
| 516 |
+
>>> np.issubdtype(np.float64, np.floating)
|
| 517 |
+
True
|
| 518 |
+
>>> np.issubdtype(np.float32, np.floating)
|
| 519 |
+
True
|
| 520 |
+
|
| 521 |
+
For convenience, dtype-like objects are allowed too:
|
| 522 |
+
|
| 523 |
+
>>> np.issubdtype('S1', np.bytes_)
|
| 524 |
+
True
|
| 525 |
+
>>> np.issubdtype('i4', np.signedinteger)
|
| 526 |
+
True
|
| 527 |
+
|
| 528 |
+
"""
|
| 529 |
+
if not issubclass_(arg1, generic):
|
| 530 |
+
arg1 = dtype(arg1).type
|
| 531 |
+
if not issubclass_(arg2, generic):
|
| 532 |
+
arg2 = dtype(arg2).type
|
| 533 |
+
|
| 534 |
+
return issubclass(arg1, arg2)
|
| 535 |
+
|
| 536 |
+
|
| 537 |
+
@set_module('numpy')
|
| 538 |
+
def sctype2char(sctype):
|
| 539 |
+
"""
|
| 540 |
+
Return the string representation of a scalar dtype.
|
| 541 |
+
|
| 542 |
+
Parameters
|
| 543 |
+
----------
|
| 544 |
+
sctype : scalar dtype or object
|
| 545 |
+
If a scalar dtype, the corresponding string character is
|
| 546 |
+
returned. If an object, `sctype2char` tries to infer its scalar type
|
| 547 |
+
and then return the corresponding string character.
|
| 548 |
+
|
| 549 |
+
Returns
|
| 550 |
+
-------
|
| 551 |
+
typechar : str
|
| 552 |
+
The string character corresponding to the scalar type.
|
| 553 |
+
|
| 554 |
+
Raises
|
| 555 |
+
------
|
| 556 |
+
ValueError
|
| 557 |
+
If `sctype` is an object for which the type can not be inferred.
|
| 558 |
+
|
| 559 |
+
See Also
|
| 560 |
+
--------
|
| 561 |
+
obj2sctype, issctype, issubsctype, mintypecode
|
| 562 |
+
|
| 563 |
+
Examples
|
| 564 |
+
--------
|
| 565 |
+
>>> from numpy._core.numerictypes import sctype2char
|
| 566 |
+
>>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]:
|
| 567 |
+
... print(sctype2char(sctype))
|
| 568 |
+
l # may vary
|
| 569 |
+
d
|
| 570 |
+
D
|
| 571 |
+
S
|
| 572 |
+
O
|
| 573 |
+
|
| 574 |
+
>>> x = np.array([1., 2-1.j])
|
| 575 |
+
>>> sctype2char(x)
|
| 576 |
+
'D'
|
| 577 |
+
>>> sctype2char(list)
|
| 578 |
+
'O'
|
| 579 |
+
|
| 580 |
+
"""
|
| 581 |
+
sctype = obj2sctype(sctype)
|
| 582 |
+
if sctype is None:
|
| 583 |
+
raise ValueError("unrecognized type")
|
| 584 |
+
if sctype not in sctypeDict.values():
|
| 585 |
+
# for compatibility
|
| 586 |
+
raise KeyError(sctype)
|
| 587 |
+
return dtype(sctype).char
|
| 588 |
+
|
| 589 |
+
|
| 590 |
+
def _scalar_type_key(typ):
|
| 591 |
+
"""A ``key`` function for `sorted`."""
|
| 592 |
+
dt = dtype(typ)
|
| 593 |
+
return (dt.kind.lower(), dt.itemsize)
|
| 594 |
+
|
| 595 |
+
|
| 596 |
+
ScalarType = [int, float, complex, bool, bytes, str, memoryview]
|
| 597 |
+
ScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key)
|
| 598 |
+
ScalarType = tuple(ScalarType)
|
| 599 |
+
|
| 600 |
+
|
| 601 |
+
# Now add the types we've determined to this module
|
| 602 |
+
for key in allTypes:
|
| 603 |
+
globals()[key] = allTypes[key]
|
| 604 |
+
__all__.append(key)
|
| 605 |
+
|
| 606 |
+
del key
|
| 607 |
+
|
| 608 |
+
typecodes = {'Character': 'c',
|
| 609 |
+
'Integer': 'bhilqnp',
|
| 610 |
+
'UnsignedInteger': 'BHILQNP',
|
| 611 |
+
'Float': 'efdg',
|
| 612 |
+
'Complex': 'FDG',
|
| 613 |
+
'AllInteger': 'bBhHiIlLqQnNpP',
|
| 614 |
+
'AllFloat': 'efdgFDG',
|
| 615 |
+
'Datetime': 'Mm',
|
| 616 |
+
'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'}
|
| 617 |
+
|
| 618 |
+
# backwards compatibility --- deprecated name
|
| 619 |
+
# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)
|
| 620 |
+
typeDict = sctypeDict
|
| 621 |
+
|
| 622 |
+
def _register_types():
|
| 623 |
+
numbers.Integral.register(integer)
|
| 624 |
+
numbers.Complex.register(inexact)
|
| 625 |
+
numbers.Real.register(floating)
|
| 626 |
+
numbers.Number.register(number)
|
| 627 |
+
|
| 628 |
+
|
| 629 |
+
_register_types()
|
janus/lib/python3.10/site-packages/numpy/_core/numerictypes.pyi
ADDED
|
@@ -0,0 +1,217 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import builtins
|
| 2 |
+
from typing import (
|
| 3 |
+
Any,
|
| 4 |
+
Literal as L,
|
| 5 |
+
TypedDict,
|
| 6 |
+
type_check_only,
|
| 7 |
+
)
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from numpy import (
|
| 11 |
+
dtype,
|
| 12 |
+
generic,
|
| 13 |
+
bool,
|
| 14 |
+
bool_,
|
| 15 |
+
uint8,
|
| 16 |
+
uint16,
|
| 17 |
+
uint32,
|
| 18 |
+
uint64,
|
| 19 |
+
ubyte,
|
| 20 |
+
ushort,
|
| 21 |
+
uintc,
|
| 22 |
+
ulong,
|
| 23 |
+
ulonglong,
|
| 24 |
+
uintp,
|
| 25 |
+
uint,
|
| 26 |
+
int8,
|
| 27 |
+
int16,
|
| 28 |
+
int32,
|
| 29 |
+
int64,
|
| 30 |
+
byte,
|
| 31 |
+
short,
|
| 32 |
+
intc,
|
| 33 |
+
long,
|
| 34 |
+
longlong,
|
| 35 |
+
intp,
|
| 36 |
+
int_,
|
| 37 |
+
float16,
|
| 38 |
+
float32,
|
| 39 |
+
float64,
|
| 40 |
+
half,
|
| 41 |
+
single,
|
| 42 |
+
double,
|
| 43 |
+
longdouble,
|
| 44 |
+
complex64,
|
| 45 |
+
complex128,
|
| 46 |
+
csingle,
|
| 47 |
+
cdouble,
|
| 48 |
+
clongdouble,
|
| 49 |
+
datetime64,
|
| 50 |
+
timedelta64,
|
| 51 |
+
object_,
|
| 52 |
+
str_,
|
| 53 |
+
bytes_,
|
| 54 |
+
void,
|
| 55 |
+
unsignedinteger,
|
| 56 |
+
character,
|
| 57 |
+
inexact,
|
| 58 |
+
number,
|
| 59 |
+
integer,
|
| 60 |
+
flexible,
|
| 61 |
+
complexfloating,
|
| 62 |
+
signedinteger,
|
| 63 |
+
floating,
|
| 64 |
+
)
|
| 65 |
+
from ._type_aliases import sctypeDict # noqa: F401
|
| 66 |
+
from .multiarray import (
|
| 67 |
+
busday_count,
|
| 68 |
+
busday_offset,
|
| 69 |
+
busdaycalendar,
|
| 70 |
+
datetime_as_string,
|
| 71 |
+
datetime_data,
|
| 72 |
+
is_busday,
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
from numpy._typing import DTypeLike
|
| 76 |
+
from numpy._typing._extended_precision import (
|
| 77 |
+
uint128,
|
| 78 |
+
uint256,
|
| 79 |
+
int128,
|
| 80 |
+
int256,
|
| 81 |
+
float80,
|
| 82 |
+
float96,
|
| 83 |
+
float128,
|
| 84 |
+
float256,
|
| 85 |
+
complex160,
|
| 86 |
+
complex192,
|
| 87 |
+
complex256,
|
| 88 |
+
complex512,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
__all__ = [
|
| 92 |
+
"ScalarType",
|
| 93 |
+
"typecodes",
|
| 94 |
+
"issubdtype",
|
| 95 |
+
"datetime_data",
|
| 96 |
+
"datetime_as_string",
|
| 97 |
+
"busday_offset",
|
| 98 |
+
"busday_count",
|
| 99 |
+
"is_busday",
|
| 100 |
+
"busdaycalendar",
|
| 101 |
+
"isdtype",
|
| 102 |
+
"generic",
|
| 103 |
+
"unsignedinteger",
|
| 104 |
+
"character",
|
| 105 |
+
"inexact",
|
| 106 |
+
"number",
|
| 107 |
+
"integer",
|
| 108 |
+
"flexible",
|
| 109 |
+
"complexfloating",
|
| 110 |
+
"signedinteger",
|
| 111 |
+
"floating",
|
| 112 |
+
"bool",
|
| 113 |
+
"float16",
|
| 114 |
+
"float32",
|
| 115 |
+
"float64",
|
| 116 |
+
"longdouble",
|
| 117 |
+
"complex64",
|
| 118 |
+
"complex128",
|
| 119 |
+
"clongdouble",
|
| 120 |
+
"bytes_",
|
| 121 |
+
"str_",
|
| 122 |
+
"void",
|
| 123 |
+
"object_",
|
| 124 |
+
"datetime64",
|
| 125 |
+
"timedelta64",
|
| 126 |
+
"int8",
|
| 127 |
+
"byte",
|
| 128 |
+
"uint8",
|
| 129 |
+
"ubyte",
|
| 130 |
+
"int16",
|
| 131 |
+
"short",
|
| 132 |
+
"uint16",
|
| 133 |
+
"ushort",
|
| 134 |
+
"int32",
|
| 135 |
+
"intc",
|
| 136 |
+
"uint32",
|
| 137 |
+
"uintc",
|
| 138 |
+
"int64",
|
| 139 |
+
"long",
|
| 140 |
+
"uint64",
|
| 141 |
+
"ulong",
|
| 142 |
+
"longlong",
|
| 143 |
+
"ulonglong",
|
| 144 |
+
"intp",
|
| 145 |
+
"uintp",
|
| 146 |
+
"double",
|
| 147 |
+
"cdouble",
|
| 148 |
+
"single",
|
| 149 |
+
"csingle",
|
| 150 |
+
"half",
|
| 151 |
+
"bool_",
|
| 152 |
+
"int_",
|
| 153 |
+
"uint",
|
| 154 |
+
"uint128",
|
| 155 |
+
"uint256",
|
| 156 |
+
"int128",
|
| 157 |
+
"int256",
|
| 158 |
+
"float80",
|
| 159 |
+
"float96",
|
| 160 |
+
"float128",
|
| 161 |
+
"float256",
|
| 162 |
+
"complex160",
|
| 163 |
+
"complex192",
|
| 164 |
+
"complex256",
|
| 165 |
+
"complex512",
|
| 166 |
+
]
|
| 167 |
+
|
| 168 |
+
@type_check_only
|
| 169 |
+
class _TypeCodes(TypedDict):
|
| 170 |
+
Character: L['c']
|
| 171 |
+
Integer: L['bhilqnp']
|
| 172 |
+
UnsignedInteger: L['BHILQNP']
|
| 173 |
+
Float: L['efdg']
|
| 174 |
+
Complex: L['FDG']
|
| 175 |
+
AllInteger: L['bBhHiIlLqQnNpP']
|
| 176 |
+
AllFloat: L['efdgFDG']
|
| 177 |
+
Datetime: L['Mm']
|
| 178 |
+
All: L['?bhilqnpBHILQNPefdgFDGSUVOMm']
|
| 179 |
+
|
| 180 |
+
def isdtype(dtype: dtype[Any] | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ...
|
| 181 |
+
|
| 182 |
+
def issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ...
|
| 183 |
+
|
| 184 |
+
typecodes: _TypeCodes
|
| 185 |
+
ScalarType: tuple[
|
| 186 |
+
type[int],
|
| 187 |
+
type[float],
|
| 188 |
+
type[complex],
|
| 189 |
+
type[builtins.bool],
|
| 190 |
+
type[bytes],
|
| 191 |
+
type[str],
|
| 192 |
+
type[memoryview],
|
| 193 |
+
type[np.bool],
|
| 194 |
+
type[csingle],
|
| 195 |
+
type[cdouble],
|
| 196 |
+
type[clongdouble],
|
| 197 |
+
type[half],
|
| 198 |
+
type[single],
|
| 199 |
+
type[double],
|
| 200 |
+
type[longdouble],
|
| 201 |
+
type[byte],
|
| 202 |
+
type[short],
|
| 203 |
+
type[intc],
|
| 204 |
+
type[long],
|
| 205 |
+
type[longlong],
|
| 206 |
+
type[timedelta64],
|
| 207 |
+
type[datetime64],
|
| 208 |
+
type[object_],
|
| 209 |
+
type[bytes_],
|
| 210 |
+
type[str_],
|
| 211 |
+
type[ubyte],
|
| 212 |
+
type[ushort],
|
| 213 |
+
type[uintc],
|
| 214 |
+
type[ulong],
|
| 215 |
+
type[ulonglong],
|
| 216 |
+
type[void],
|
| 217 |
+
]
|
janus/lib/python3.10/site-packages/numpy/_core/printoptions.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Stores and defines the low-level format_options context variable.
|
| 3 |
+
|
| 4 |
+
This is defined in its own file outside of the arrayprint module
|
| 5 |
+
so we can import it from C while initializing the multiarray
|
| 6 |
+
C module during import without introducing circular dependencies.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import sys
|
| 10 |
+
from contextvars import ContextVar
|
| 11 |
+
|
| 12 |
+
__all__ = ["format_options"]
|
| 13 |
+
|
| 14 |
+
default_format_options_dict = {
|
| 15 |
+
"edgeitems": 3, # repr N leading and trailing items of each dimension
|
| 16 |
+
"threshold": 1000, # total items > triggers array summarization
|
| 17 |
+
"floatmode": "maxprec",
|
| 18 |
+
"precision": 8, # precision of floating point representations
|
| 19 |
+
"suppress": False, # suppress printing small floating values in exp format
|
| 20 |
+
"linewidth": 75,
|
| 21 |
+
"nanstr": "nan",
|
| 22 |
+
"infstr": "inf",
|
| 23 |
+
"sign": "-",
|
| 24 |
+
"formatter": None,
|
| 25 |
+
# Internally stored as an int to simplify comparisons; converted from/to
|
| 26 |
+
# str/False on the way in/out.
|
| 27 |
+
'legacy': sys.maxsize,
|
| 28 |
+
'override_repr': None,
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
format_options = ContextVar(
|
| 32 |
+
"format_options", default=default_format_options_dict.copy())
|
janus/lib/python3.10/site-packages/numpy/_core/records.pyi
ADDED
|
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from _typeshed import StrOrBytesPath
|
| 2 |
+
from collections.abc import Sequence, Iterable
|
| 3 |
+
from types import EllipsisType
|
| 4 |
+
from typing import (
|
| 5 |
+
Any,
|
| 6 |
+
TypeAlias,
|
| 7 |
+
TypeVar,
|
| 8 |
+
overload,
|
| 9 |
+
Protocol,
|
| 10 |
+
SupportsIndex,
|
| 11 |
+
Literal,
|
| 12 |
+
type_check_only
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
from numpy import (
|
| 16 |
+
ndarray,
|
| 17 |
+
dtype,
|
| 18 |
+
generic,
|
| 19 |
+
void,
|
| 20 |
+
_ByteOrder,
|
| 21 |
+
_SupportsBuffer,
|
| 22 |
+
_OrderKACF,
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
from numpy._typing import (
|
| 26 |
+
ArrayLike,
|
| 27 |
+
DTypeLike,
|
| 28 |
+
NDArray,
|
| 29 |
+
_Shape,
|
| 30 |
+
_ShapeLike,
|
| 31 |
+
_ArrayLikeInt_co,
|
| 32 |
+
_ArrayLikeVoid_co,
|
| 33 |
+
_NestedSequence,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
__all__ = [
|
| 37 |
+
"record",
|
| 38 |
+
"recarray",
|
| 39 |
+
"format_parser",
|
| 40 |
+
"fromarrays",
|
| 41 |
+
"fromrecords",
|
| 42 |
+
"fromstring",
|
| 43 |
+
"fromfile",
|
| 44 |
+
"array",
|
| 45 |
+
"find_duplicate",
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
_T = TypeVar("_T")
|
| 49 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 50 |
+
_DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
|
| 51 |
+
_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True)
|
| 52 |
+
|
| 53 |
+
_RecArray: TypeAlias = recarray[Any, dtype[_SCT]]
|
| 54 |
+
|
| 55 |
+
@type_check_only
|
| 56 |
+
class _SupportsReadInto(Protocol):
|
| 57 |
+
def seek(self, offset: int, whence: int, /) -> object: ...
|
| 58 |
+
def tell(self, /) -> int: ...
|
| 59 |
+
def readinto(self, buffer: memoryview, /) -> int: ...
|
| 60 |
+
|
| 61 |
+
class record(void):
|
| 62 |
+
def __getattribute__(self, attr: str) -> Any: ...
|
| 63 |
+
def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
|
| 64 |
+
def pprint(self) -> str: ...
|
| 65 |
+
@overload
|
| 66 |
+
def __getitem__(self, key: str | SupportsIndex) -> Any: ...
|
| 67 |
+
@overload
|
| 68 |
+
def __getitem__(self, key: list[str]) -> record: ...
|
| 69 |
+
|
| 70 |
+
class recarray(ndarray[_ShapeT_co, _DType_co]):
|
| 71 |
+
# NOTE: While not strictly mandatory, we're demanding here that arguments
|
| 72 |
+
# for the `format_parser`- and `dtype`-based dtype constructors are
|
| 73 |
+
# mutually exclusive
|
| 74 |
+
@overload
|
| 75 |
+
def __new__(
|
| 76 |
+
subtype,
|
| 77 |
+
shape: _ShapeLike,
|
| 78 |
+
dtype: None = ...,
|
| 79 |
+
buf: None | _SupportsBuffer = ...,
|
| 80 |
+
offset: SupportsIndex = ...,
|
| 81 |
+
strides: None | _ShapeLike = ...,
|
| 82 |
+
*,
|
| 83 |
+
formats: DTypeLike,
|
| 84 |
+
names: None | str | Sequence[str] = ...,
|
| 85 |
+
titles: None | str | Sequence[str] = ...,
|
| 86 |
+
byteorder: None | _ByteOrder = ...,
|
| 87 |
+
aligned: bool = ...,
|
| 88 |
+
order: _OrderKACF = ...,
|
| 89 |
+
) -> recarray[Any, dtype[record]]: ...
|
| 90 |
+
@overload
|
| 91 |
+
def __new__(
|
| 92 |
+
subtype,
|
| 93 |
+
shape: _ShapeLike,
|
| 94 |
+
dtype: DTypeLike,
|
| 95 |
+
buf: None | _SupportsBuffer = ...,
|
| 96 |
+
offset: SupportsIndex = ...,
|
| 97 |
+
strides: None | _ShapeLike = ...,
|
| 98 |
+
formats: None = ...,
|
| 99 |
+
names: None = ...,
|
| 100 |
+
titles: None = ...,
|
| 101 |
+
byteorder: None = ...,
|
| 102 |
+
aligned: Literal[False] = ...,
|
| 103 |
+
order: _OrderKACF = ...,
|
| 104 |
+
) -> recarray[Any, dtype[Any]]: ...
|
| 105 |
+
def __array_finalize__(self, obj: object) -> None: ...
|
| 106 |
+
def __getattribute__(self, attr: str) -> Any: ...
|
| 107 |
+
def __setattr__(self, attr: str, val: ArrayLike) -> None: ...
|
| 108 |
+
@overload
|
| 109 |
+
def __getitem__(self, indx: (
|
| 110 |
+
SupportsIndex
|
| 111 |
+
| _ArrayLikeInt_co
|
| 112 |
+
| tuple[SupportsIndex | _ArrayLikeInt_co, ...]
|
| 113 |
+
)) -> Any: ...
|
| 114 |
+
@overload
|
| 115 |
+
def __getitem__(self: recarray[Any, dtype[void]], indx: (
|
| 116 |
+
None
|
| 117 |
+
| slice
|
| 118 |
+
| EllipsisType
|
| 119 |
+
| SupportsIndex
|
| 120 |
+
| _ArrayLikeInt_co
|
| 121 |
+
| tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...]
|
| 122 |
+
)) -> recarray[_Shape, _DType_co]: ...
|
| 123 |
+
@overload
|
| 124 |
+
def __getitem__(self, indx: (
|
| 125 |
+
None
|
| 126 |
+
| slice
|
| 127 |
+
| EllipsisType
|
| 128 |
+
| SupportsIndex
|
| 129 |
+
| _ArrayLikeInt_co
|
| 130 |
+
| tuple[None | slice | EllipsisType | _ArrayLikeInt_co | SupportsIndex, ...]
|
| 131 |
+
)) -> ndarray[_Shape, _DType_co]: ...
|
| 132 |
+
@overload
|
| 133 |
+
def __getitem__(self, indx: str) -> NDArray[Any]: ...
|
| 134 |
+
@overload
|
| 135 |
+
def __getitem__(self, indx: list[str]) -> recarray[_ShapeT_co, dtype[record]]: ...
|
| 136 |
+
@overload
|
| 137 |
+
def field(self, attr: int | str, val: None = ...) -> Any: ...
|
| 138 |
+
@overload
|
| 139 |
+
def field(self, attr: int | str, val: ArrayLike) -> None: ...
|
| 140 |
+
|
| 141 |
+
class format_parser:
|
| 142 |
+
dtype: dtype[void]
|
| 143 |
+
def __init__(
|
| 144 |
+
self,
|
| 145 |
+
formats: DTypeLike,
|
| 146 |
+
names: None | str | Sequence[str],
|
| 147 |
+
titles: None | str | Sequence[str],
|
| 148 |
+
aligned: bool = ...,
|
| 149 |
+
byteorder: None | _ByteOrder = ...,
|
| 150 |
+
) -> None: ...
|
| 151 |
+
|
| 152 |
+
@overload
|
| 153 |
+
def fromarrays(
|
| 154 |
+
arrayList: Iterable[ArrayLike],
|
| 155 |
+
dtype: DTypeLike = ...,
|
| 156 |
+
shape: None | _ShapeLike = ...,
|
| 157 |
+
formats: None = ...,
|
| 158 |
+
names: None = ...,
|
| 159 |
+
titles: None = ...,
|
| 160 |
+
aligned: bool = ...,
|
| 161 |
+
byteorder: None = ...,
|
| 162 |
+
) -> _RecArray[Any]: ...
|
| 163 |
+
@overload
|
| 164 |
+
def fromarrays(
|
| 165 |
+
arrayList: Iterable[ArrayLike],
|
| 166 |
+
dtype: None = ...,
|
| 167 |
+
shape: None | _ShapeLike = ...,
|
| 168 |
+
*,
|
| 169 |
+
formats: DTypeLike,
|
| 170 |
+
names: None | str | Sequence[str] = ...,
|
| 171 |
+
titles: None | str | Sequence[str] = ...,
|
| 172 |
+
aligned: bool = ...,
|
| 173 |
+
byteorder: None | _ByteOrder = ...,
|
| 174 |
+
) -> _RecArray[record]: ...
|
| 175 |
+
|
| 176 |
+
@overload
|
| 177 |
+
def fromrecords(
|
| 178 |
+
recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
|
| 179 |
+
dtype: DTypeLike = ...,
|
| 180 |
+
shape: None | _ShapeLike = ...,
|
| 181 |
+
formats: None = ...,
|
| 182 |
+
names: None = ...,
|
| 183 |
+
titles: None = ...,
|
| 184 |
+
aligned: bool = ...,
|
| 185 |
+
byteorder: None = ...,
|
| 186 |
+
) -> _RecArray[record]: ...
|
| 187 |
+
@overload
|
| 188 |
+
def fromrecords(
|
| 189 |
+
recList: _ArrayLikeVoid_co | tuple[Any, ...] | _NestedSequence[tuple[Any, ...]],
|
| 190 |
+
dtype: None = ...,
|
| 191 |
+
shape: None | _ShapeLike = ...,
|
| 192 |
+
*,
|
| 193 |
+
formats: DTypeLike = ...,
|
| 194 |
+
names: None | str | Sequence[str] = ...,
|
| 195 |
+
titles: None | str | Sequence[str] = ...,
|
| 196 |
+
aligned: bool = ...,
|
| 197 |
+
byteorder: None | _ByteOrder = ...,
|
| 198 |
+
) -> _RecArray[record]: ...
|
| 199 |
+
|
| 200 |
+
@overload
|
| 201 |
+
def fromstring(
|
| 202 |
+
datastring: _SupportsBuffer,
|
| 203 |
+
dtype: DTypeLike,
|
| 204 |
+
shape: None | _ShapeLike = ...,
|
| 205 |
+
offset: int = ...,
|
| 206 |
+
formats: None = ...,
|
| 207 |
+
names: None = ...,
|
| 208 |
+
titles: None = ...,
|
| 209 |
+
aligned: bool = ...,
|
| 210 |
+
byteorder: None = ...,
|
| 211 |
+
) -> _RecArray[record]: ...
|
| 212 |
+
@overload
|
| 213 |
+
def fromstring(
|
| 214 |
+
datastring: _SupportsBuffer,
|
| 215 |
+
dtype: None = ...,
|
| 216 |
+
shape: None | _ShapeLike = ...,
|
| 217 |
+
offset: int = ...,
|
| 218 |
+
*,
|
| 219 |
+
formats: DTypeLike,
|
| 220 |
+
names: None | str | Sequence[str] = ...,
|
| 221 |
+
titles: None | str | Sequence[str] = ...,
|
| 222 |
+
aligned: bool = ...,
|
| 223 |
+
byteorder: None | _ByteOrder = ...,
|
| 224 |
+
) -> _RecArray[record]: ...
|
| 225 |
+
|
| 226 |
+
@overload
|
| 227 |
+
def fromfile(
|
| 228 |
+
fd: StrOrBytesPath | _SupportsReadInto,
|
| 229 |
+
dtype: DTypeLike,
|
| 230 |
+
shape: None | _ShapeLike = ...,
|
| 231 |
+
offset: int = ...,
|
| 232 |
+
formats: None = ...,
|
| 233 |
+
names: None = ...,
|
| 234 |
+
titles: None = ...,
|
| 235 |
+
aligned: bool = ...,
|
| 236 |
+
byteorder: None = ...,
|
| 237 |
+
) -> _RecArray[Any]: ...
|
| 238 |
+
@overload
|
| 239 |
+
def fromfile(
|
| 240 |
+
fd: StrOrBytesPath | _SupportsReadInto,
|
| 241 |
+
dtype: None = ...,
|
| 242 |
+
shape: None | _ShapeLike = ...,
|
| 243 |
+
offset: int = ...,
|
| 244 |
+
*,
|
| 245 |
+
formats: DTypeLike,
|
| 246 |
+
names: None | str | Sequence[str] = ...,
|
| 247 |
+
titles: None | str | Sequence[str] = ...,
|
| 248 |
+
aligned: bool = ...,
|
| 249 |
+
byteorder: None | _ByteOrder = ...,
|
| 250 |
+
) -> _RecArray[record]: ...
|
| 251 |
+
|
| 252 |
+
@overload
|
| 253 |
+
def array(
|
| 254 |
+
obj: _SCT | NDArray[_SCT],
|
| 255 |
+
dtype: None = ...,
|
| 256 |
+
shape: None | _ShapeLike = ...,
|
| 257 |
+
offset: int = ...,
|
| 258 |
+
formats: None = ...,
|
| 259 |
+
names: None = ...,
|
| 260 |
+
titles: None = ...,
|
| 261 |
+
aligned: bool = ...,
|
| 262 |
+
byteorder: None = ...,
|
| 263 |
+
copy: bool = ...,
|
| 264 |
+
) -> _RecArray[_SCT]: ...
|
| 265 |
+
@overload
|
| 266 |
+
def array(
|
| 267 |
+
obj: ArrayLike,
|
| 268 |
+
dtype: DTypeLike,
|
| 269 |
+
shape: None | _ShapeLike = ...,
|
| 270 |
+
offset: int = ...,
|
| 271 |
+
formats: None = ...,
|
| 272 |
+
names: None = ...,
|
| 273 |
+
titles: None = ...,
|
| 274 |
+
aligned: bool = ...,
|
| 275 |
+
byteorder: None = ...,
|
| 276 |
+
copy: bool = ...,
|
| 277 |
+
) -> _RecArray[Any]: ...
|
| 278 |
+
@overload
|
| 279 |
+
def array(
|
| 280 |
+
obj: ArrayLike,
|
| 281 |
+
dtype: None = ...,
|
| 282 |
+
shape: None | _ShapeLike = ...,
|
| 283 |
+
offset: int = ...,
|
| 284 |
+
*,
|
| 285 |
+
formats: DTypeLike,
|
| 286 |
+
names: None | str | Sequence[str] = ...,
|
| 287 |
+
titles: None | str | Sequence[str] = ...,
|
| 288 |
+
aligned: bool = ...,
|
| 289 |
+
byteorder: None | _ByteOrder = ...,
|
| 290 |
+
copy: bool = ...,
|
| 291 |
+
) -> _RecArray[record]: ...
|
| 292 |
+
@overload
|
| 293 |
+
def array(
|
| 294 |
+
obj: None,
|
| 295 |
+
dtype: DTypeLike,
|
| 296 |
+
shape: _ShapeLike,
|
| 297 |
+
offset: int = ...,
|
| 298 |
+
formats: None = ...,
|
| 299 |
+
names: None = ...,
|
| 300 |
+
titles: None = ...,
|
| 301 |
+
aligned: bool = ...,
|
| 302 |
+
byteorder: None = ...,
|
| 303 |
+
copy: bool = ...,
|
| 304 |
+
) -> _RecArray[Any]: ...
|
| 305 |
+
@overload
|
| 306 |
+
def array(
|
| 307 |
+
obj: None,
|
| 308 |
+
dtype: None = ...,
|
| 309 |
+
*,
|
| 310 |
+
shape: _ShapeLike,
|
| 311 |
+
offset: int = ...,
|
| 312 |
+
formats: DTypeLike,
|
| 313 |
+
names: None | str | Sequence[str] = ...,
|
| 314 |
+
titles: None | str | Sequence[str] = ...,
|
| 315 |
+
aligned: bool = ...,
|
| 316 |
+
byteorder: None | _ByteOrder = ...,
|
| 317 |
+
copy: bool = ...,
|
| 318 |
+
) -> _RecArray[record]: ...
|
| 319 |
+
@overload
|
| 320 |
+
def array(
|
| 321 |
+
obj: _SupportsReadInto,
|
| 322 |
+
dtype: DTypeLike,
|
| 323 |
+
shape: None | _ShapeLike = ...,
|
| 324 |
+
offset: int = ...,
|
| 325 |
+
formats: None = ...,
|
| 326 |
+
names: None = ...,
|
| 327 |
+
titles: None = ...,
|
| 328 |
+
aligned: bool = ...,
|
| 329 |
+
byteorder: None = ...,
|
| 330 |
+
copy: bool = ...,
|
| 331 |
+
) -> _RecArray[Any]: ...
|
| 332 |
+
@overload
|
| 333 |
+
def array(
|
| 334 |
+
obj: _SupportsReadInto,
|
| 335 |
+
dtype: None = ...,
|
| 336 |
+
shape: None | _ShapeLike = ...,
|
| 337 |
+
offset: int = ...,
|
| 338 |
+
*,
|
| 339 |
+
formats: DTypeLike,
|
| 340 |
+
names: None | str | Sequence[str] = ...,
|
| 341 |
+
titles: None | str | Sequence[str] = ...,
|
| 342 |
+
aligned: bool = ...,
|
| 343 |
+
byteorder: None | _ByteOrder = ...,
|
| 344 |
+
copy: bool = ...,
|
| 345 |
+
) -> _RecArray[record]: ...
|
| 346 |
+
|
| 347 |
+
def find_duplicate(list: Iterable[_T]) -> list[_T]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/shape_base.py
ADDED
|
@@ -0,0 +1,1004 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
|
| 2 |
+
'stack', 'unstack', 'vstack']
|
| 3 |
+
|
| 4 |
+
import functools
|
| 5 |
+
import itertools
|
| 6 |
+
import operator
|
| 7 |
+
|
| 8 |
+
from . import numeric as _nx
|
| 9 |
+
from . import overrides
|
| 10 |
+
from .multiarray import array, asanyarray, normalize_axis_index
|
| 11 |
+
from . import fromnumeric as _from_nx
|
| 12 |
+
|
| 13 |
+
array_function_dispatch = functools.partial(
|
| 14 |
+
overrides.array_function_dispatch, module='numpy')
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def _atleast_1d_dispatcher(*arys):
|
| 18 |
+
return arys
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@array_function_dispatch(_atleast_1d_dispatcher)
|
| 22 |
+
def atleast_1d(*arys):
|
| 23 |
+
"""
|
| 24 |
+
Convert inputs to arrays with at least one dimension.
|
| 25 |
+
|
| 26 |
+
Scalar inputs are converted to 1-dimensional arrays, whilst
|
| 27 |
+
higher-dimensional inputs are preserved.
|
| 28 |
+
|
| 29 |
+
Parameters
|
| 30 |
+
----------
|
| 31 |
+
arys1, arys2, ... : array_like
|
| 32 |
+
One or more input arrays.
|
| 33 |
+
|
| 34 |
+
Returns
|
| 35 |
+
-------
|
| 36 |
+
ret : ndarray
|
| 37 |
+
An array, or tuple of arrays, each with ``a.ndim >= 1``.
|
| 38 |
+
Copies are made only if necessary.
|
| 39 |
+
|
| 40 |
+
See Also
|
| 41 |
+
--------
|
| 42 |
+
atleast_2d, atleast_3d
|
| 43 |
+
|
| 44 |
+
Examples
|
| 45 |
+
--------
|
| 46 |
+
>>> import numpy as np
|
| 47 |
+
>>> np.atleast_1d(1.0)
|
| 48 |
+
array([1.])
|
| 49 |
+
|
| 50 |
+
>>> x = np.arange(9.0).reshape(3,3)
|
| 51 |
+
>>> np.atleast_1d(x)
|
| 52 |
+
array([[0., 1., 2.],
|
| 53 |
+
[3., 4., 5.],
|
| 54 |
+
[6., 7., 8.]])
|
| 55 |
+
>>> np.atleast_1d(x) is x
|
| 56 |
+
True
|
| 57 |
+
|
| 58 |
+
>>> np.atleast_1d(1, [3, 4])
|
| 59 |
+
(array([1]), array([3, 4]))
|
| 60 |
+
|
| 61 |
+
"""
|
| 62 |
+
if len(arys) == 1:
|
| 63 |
+
result = asanyarray(arys[0])
|
| 64 |
+
if result.ndim == 0:
|
| 65 |
+
result = result.reshape(1)
|
| 66 |
+
return result
|
| 67 |
+
res = []
|
| 68 |
+
for ary in arys:
|
| 69 |
+
result = asanyarray(ary)
|
| 70 |
+
if result.ndim == 0:
|
| 71 |
+
result = result.reshape(1)
|
| 72 |
+
res.append(result)
|
| 73 |
+
return tuple(res)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def _atleast_2d_dispatcher(*arys):
|
| 77 |
+
return arys
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@array_function_dispatch(_atleast_2d_dispatcher)
|
| 81 |
+
def atleast_2d(*arys):
|
| 82 |
+
"""
|
| 83 |
+
View inputs as arrays with at least two dimensions.
|
| 84 |
+
|
| 85 |
+
Parameters
|
| 86 |
+
----------
|
| 87 |
+
arys1, arys2, ... : array_like
|
| 88 |
+
One or more array-like sequences. Non-array inputs are converted
|
| 89 |
+
to arrays. Arrays that already have two or more dimensions are
|
| 90 |
+
preserved.
|
| 91 |
+
|
| 92 |
+
Returns
|
| 93 |
+
-------
|
| 94 |
+
res, res2, ... : ndarray
|
| 95 |
+
An array, or tuple of arrays, each with ``a.ndim >= 2``.
|
| 96 |
+
Copies are avoided where possible, and views with two or more
|
| 97 |
+
dimensions are returned.
|
| 98 |
+
|
| 99 |
+
See Also
|
| 100 |
+
--------
|
| 101 |
+
atleast_1d, atleast_3d
|
| 102 |
+
|
| 103 |
+
Examples
|
| 104 |
+
--------
|
| 105 |
+
>>> import numpy as np
|
| 106 |
+
>>> np.atleast_2d(3.0)
|
| 107 |
+
array([[3.]])
|
| 108 |
+
|
| 109 |
+
>>> x = np.arange(3.0)
|
| 110 |
+
>>> np.atleast_2d(x)
|
| 111 |
+
array([[0., 1., 2.]])
|
| 112 |
+
>>> np.atleast_2d(x).base is x
|
| 113 |
+
True
|
| 114 |
+
|
| 115 |
+
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
|
| 116 |
+
(array([[1]]), array([[1, 2]]), array([[1, 2]]))
|
| 117 |
+
|
| 118 |
+
"""
|
| 119 |
+
res = []
|
| 120 |
+
for ary in arys:
|
| 121 |
+
ary = asanyarray(ary)
|
| 122 |
+
if ary.ndim == 0:
|
| 123 |
+
result = ary.reshape(1, 1)
|
| 124 |
+
elif ary.ndim == 1:
|
| 125 |
+
result = ary[_nx.newaxis, :]
|
| 126 |
+
else:
|
| 127 |
+
result = ary
|
| 128 |
+
res.append(result)
|
| 129 |
+
if len(res) == 1:
|
| 130 |
+
return res[0]
|
| 131 |
+
else:
|
| 132 |
+
return tuple(res)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def _atleast_3d_dispatcher(*arys):
|
| 136 |
+
return arys
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@array_function_dispatch(_atleast_3d_dispatcher)
|
| 140 |
+
def atleast_3d(*arys):
|
| 141 |
+
"""
|
| 142 |
+
View inputs as arrays with at least three dimensions.
|
| 143 |
+
|
| 144 |
+
Parameters
|
| 145 |
+
----------
|
| 146 |
+
arys1, arys2, ... : array_like
|
| 147 |
+
One or more array-like sequences. Non-array inputs are converted to
|
| 148 |
+
arrays. Arrays that already have three or more dimensions are
|
| 149 |
+
preserved.
|
| 150 |
+
|
| 151 |
+
Returns
|
| 152 |
+
-------
|
| 153 |
+
res1, res2, ... : ndarray
|
| 154 |
+
An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are
|
| 155 |
+
avoided where possible, and views with three or more dimensions are
|
| 156 |
+
returned. For example, a 1-D array of shape ``(N,)`` becomes a view
|
| 157 |
+
of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
|
| 158 |
+
view of shape ``(M, N, 1)``.
|
| 159 |
+
|
| 160 |
+
See Also
|
| 161 |
+
--------
|
| 162 |
+
atleast_1d, atleast_2d
|
| 163 |
+
|
| 164 |
+
Examples
|
| 165 |
+
--------
|
| 166 |
+
>>> import numpy as np
|
| 167 |
+
>>> np.atleast_3d(3.0)
|
| 168 |
+
array([[[3.]]])
|
| 169 |
+
|
| 170 |
+
>>> x = np.arange(3.0)
|
| 171 |
+
>>> np.atleast_3d(x).shape
|
| 172 |
+
(1, 3, 1)
|
| 173 |
+
|
| 174 |
+
>>> x = np.arange(12.0).reshape(4,3)
|
| 175 |
+
>>> np.atleast_3d(x).shape
|
| 176 |
+
(4, 3, 1)
|
| 177 |
+
>>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
|
| 178 |
+
True
|
| 179 |
+
|
| 180 |
+
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
|
| 181 |
+
... print(arr, arr.shape) # doctest: +SKIP
|
| 182 |
+
...
|
| 183 |
+
[[[1]
|
| 184 |
+
[2]]] (1, 2, 1)
|
| 185 |
+
[[[1]
|
| 186 |
+
[2]]] (1, 2, 1)
|
| 187 |
+
[[[1 2]]] (1, 1, 2)
|
| 188 |
+
|
| 189 |
+
"""
|
| 190 |
+
res = []
|
| 191 |
+
for ary in arys:
|
| 192 |
+
ary = asanyarray(ary)
|
| 193 |
+
if ary.ndim == 0:
|
| 194 |
+
result = ary.reshape(1, 1, 1)
|
| 195 |
+
elif ary.ndim == 1:
|
| 196 |
+
result = ary[_nx.newaxis, :, _nx.newaxis]
|
| 197 |
+
elif ary.ndim == 2:
|
| 198 |
+
result = ary[:, :, _nx.newaxis]
|
| 199 |
+
else:
|
| 200 |
+
result = ary
|
| 201 |
+
res.append(result)
|
| 202 |
+
if len(res) == 1:
|
| 203 |
+
return res[0]
|
| 204 |
+
else:
|
| 205 |
+
return tuple(res)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def _arrays_for_stack_dispatcher(arrays):
|
| 209 |
+
if not hasattr(arrays, "__getitem__"):
|
| 210 |
+
raise TypeError('arrays to stack must be passed as a "sequence" type '
|
| 211 |
+
'such as list or tuple.')
|
| 212 |
+
|
| 213 |
+
return tuple(arrays)
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
def _vhstack_dispatcher(tup, *, dtype=None, casting=None):
|
| 217 |
+
return _arrays_for_stack_dispatcher(tup)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
@array_function_dispatch(_vhstack_dispatcher)
|
| 221 |
+
def vstack(tup, *, dtype=None, casting="same_kind"):
|
| 222 |
+
"""
|
| 223 |
+
Stack arrays in sequence vertically (row wise).
|
| 224 |
+
|
| 225 |
+
This is equivalent to concatenation along the first axis after 1-D arrays
|
| 226 |
+
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
|
| 227 |
+
`vsplit`.
|
| 228 |
+
|
| 229 |
+
This function makes most sense for arrays with up to 3 dimensions. For
|
| 230 |
+
instance, for pixel-data with a height (first axis), width (second axis),
|
| 231 |
+
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
|
| 232 |
+
`block` provide more general stacking and concatenation operations.
|
| 233 |
+
|
| 234 |
+
Parameters
|
| 235 |
+
----------
|
| 236 |
+
tup : sequence of ndarrays
|
| 237 |
+
The arrays must have the same shape along all but the first axis.
|
| 238 |
+
1-D arrays must have the same length. In the case of a single
|
| 239 |
+
array_like input, it will be treated as a sequence of arrays; i.e.,
|
| 240 |
+
each element along the zeroth axis is treated as a separate array.
|
| 241 |
+
|
| 242 |
+
dtype : str or dtype
|
| 243 |
+
If provided, the destination array will have this dtype. Cannot be
|
| 244 |
+
provided together with `out`.
|
| 245 |
+
|
| 246 |
+
.. versionadded:: 1.24
|
| 247 |
+
|
| 248 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
| 249 |
+
Controls what kind of data casting may occur. Defaults to 'same_kind'.
|
| 250 |
+
|
| 251 |
+
.. versionadded:: 1.24
|
| 252 |
+
|
| 253 |
+
Returns
|
| 254 |
+
-------
|
| 255 |
+
stacked : ndarray
|
| 256 |
+
The array formed by stacking the given arrays, will be at least 2-D.
|
| 257 |
+
|
| 258 |
+
See Also
|
| 259 |
+
--------
|
| 260 |
+
concatenate : Join a sequence of arrays along an existing axis.
|
| 261 |
+
stack : Join a sequence of arrays along a new axis.
|
| 262 |
+
block : Assemble an nd-array from nested lists of blocks.
|
| 263 |
+
hstack : Stack arrays in sequence horizontally (column wise).
|
| 264 |
+
dstack : Stack arrays in sequence depth wise (along third axis).
|
| 265 |
+
column_stack : Stack 1-D arrays as columns into a 2-D array.
|
| 266 |
+
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
|
| 267 |
+
unstack : Split an array into a tuple of sub-arrays along an axis.
|
| 268 |
+
|
| 269 |
+
Examples
|
| 270 |
+
--------
|
| 271 |
+
>>> import numpy as np
|
| 272 |
+
>>> a = np.array([1, 2, 3])
|
| 273 |
+
>>> b = np.array([4, 5, 6])
|
| 274 |
+
>>> np.vstack((a,b))
|
| 275 |
+
array([[1, 2, 3],
|
| 276 |
+
[4, 5, 6]])
|
| 277 |
+
|
| 278 |
+
>>> a = np.array([[1], [2], [3]])
|
| 279 |
+
>>> b = np.array([[4], [5], [6]])
|
| 280 |
+
>>> np.vstack((a,b))
|
| 281 |
+
array([[1],
|
| 282 |
+
[2],
|
| 283 |
+
[3],
|
| 284 |
+
[4],
|
| 285 |
+
[5],
|
| 286 |
+
[6]])
|
| 287 |
+
|
| 288 |
+
"""
|
| 289 |
+
arrs = atleast_2d(*tup)
|
| 290 |
+
if not isinstance(arrs, tuple):
|
| 291 |
+
arrs = (arrs,)
|
| 292 |
+
return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
|
| 293 |
+
|
| 294 |
+
|
| 295 |
+
@array_function_dispatch(_vhstack_dispatcher)
|
| 296 |
+
def hstack(tup, *, dtype=None, casting="same_kind"):
|
| 297 |
+
"""
|
| 298 |
+
Stack arrays in sequence horizontally (column wise).
|
| 299 |
+
|
| 300 |
+
This is equivalent to concatenation along the second axis, except for 1-D
|
| 301 |
+
arrays where it concatenates along the first axis. Rebuilds arrays divided
|
| 302 |
+
by `hsplit`.
|
| 303 |
+
|
| 304 |
+
This function makes most sense for arrays with up to 3 dimensions. For
|
| 305 |
+
instance, for pixel-data with a height (first axis), width (second axis),
|
| 306 |
+
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
|
| 307 |
+
`block` provide more general stacking and concatenation operations.
|
| 308 |
+
|
| 309 |
+
Parameters
|
| 310 |
+
----------
|
| 311 |
+
tup : sequence of ndarrays
|
| 312 |
+
The arrays must have the same shape along all but the second axis,
|
| 313 |
+
except 1-D arrays which can be any length. In the case of a single
|
| 314 |
+
array_like input, it will be treated as a sequence of arrays; i.e.,
|
| 315 |
+
each element along the zeroth axis is treated as a separate array.
|
| 316 |
+
|
| 317 |
+
dtype : str or dtype
|
| 318 |
+
If provided, the destination array will have this dtype. Cannot be
|
| 319 |
+
provided together with `out`.
|
| 320 |
+
|
| 321 |
+
.. versionadded:: 1.24
|
| 322 |
+
|
| 323 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
| 324 |
+
Controls what kind of data casting may occur. Defaults to 'same_kind'.
|
| 325 |
+
|
| 326 |
+
.. versionadded:: 1.24
|
| 327 |
+
|
| 328 |
+
Returns
|
| 329 |
+
-------
|
| 330 |
+
stacked : ndarray
|
| 331 |
+
The array formed by stacking the given arrays.
|
| 332 |
+
|
| 333 |
+
See Also
|
| 334 |
+
--------
|
| 335 |
+
concatenate : Join a sequence of arrays along an existing axis.
|
| 336 |
+
stack : Join a sequence of arrays along a new axis.
|
| 337 |
+
block : Assemble an nd-array from nested lists of blocks.
|
| 338 |
+
vstack : Stack arrays in sequence vertically (row wise).
|
| 339 |
+
dstack : Stack arrays in sequence depth wise (along third axis).
|
| 340 |
+
column_stack : Stack 1-D arrays as columns into a 2-D array.
|
| 341 |
+
hsplit : Split an array into multiple sub-arrays
|
| 342 |
+
horizontally (column-wise).
|
| 343 |
+
unstack : Split an array into a tuple of sub-arrays along an axis.
|
| 344 |
+
|
| 345 |
+
Examples
|
| 346 |
+
--------
|
| 347 |
+
>>> import numpy as np
|
| 348 |
+
>>> a = np.array((1,2,3))
|
| 349 |
+
>>> b = np.array((4,5,6))
|
| 350 |
+
>>> np.hstack((a,b))
|
| 351 |
+
array([1, 2, 3, 4, 5, 6])
|
| 352 |
+
>>> a = np.array([[1],[2],[3]])
|
| 353 |
+
>>> b = np.array([[4],[5],[6]])
|
| 354 |
+
>>> np.hstack((a,b))
|
| 355 |
+
array([[1, 4],
|
| 356 |
+
[2, 5],
|
| 357 |
+
[3, 6]])
|
| 358 |
+
|
| 359 |
+
"""
|
| 360 |
+
arrs = atleast_1d(*tup)
|
| 361 |
+
if not isinstance(arrs, tuple):
|
| 362 |
+
arrs = (arrs,)
|
| 363 |
+
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
|
| 364 |
+
if arrs and arrs[0].ndim == 1:
|
| 365 |
+
return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
|
| 366 |
+
else:
|
| 367 |
+
return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def _stack_dispatcher(arrays, axis=None, out=None, *,
|
| 371 |
+
dtype=None, casting=None):
|
| 372 |
+
arrays = _arrays_for_stack_dispatcher(arrays)
|
| 373 |
+
if out is not None:
|
| 374 |
+
# optimize for the typical case where only arrays is provided
|
| 375 |
+
arrays = list(arrays)
|
| 376 |
+
arrays.append(out)
|
| 377 |
+
return arrays
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
@array_function_dispatch(_stack_dispatcher)
|
| 381 |
+
def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
|
| 382 |
+
"""
|
| 383 |
+
Join a sequence of arrays along a new axis.
|
| 384 |
+
|
| 385 |
+
The ``axis`` parameter specifies the index of the new axis in the
|
| 386 |
+
dimensions of the result. For example, if ``axis=0`` it will be the first
|
| 387 |
+
dimension and if ``axis=-1`` it will be the last dimension.
|
| 388 |
+
|
| 389 |
+
Parameters
|
| 390 |
+
----------
|
| 391 |
+
arrays : sequence of ndarrays
|
| 392 |
+
Each array must have the same shape. In the case of a single ndarray
|
| 393 |
+
array_like input, it will be treated as a sequence of arrays; i.e.,
|
| 394 |
+
each element along the zeroth axis is treated as a separate array.
|
| 395 |
+
|
| 396 |
+
axis : int, optional
|
| 397 |
+
The axis in the result array along which the input arrays are stacked.
|
| 398 |
+
|
| 399 |
+
out : ndarray, optional
|
| 400 |
+
If provided, the destination to place the result. The shape must be
|
| 401 |
+
correct, matching that of what stack would have returned if no
|
| 402 |
+
out argument were specified.
|
| 403 |
+
|
| 404 |
+
dtype : str or dtype
|
| 405 |
+
If provided, the destination array will have this dtype. Cannot be
|
| 406 |
+
provided together with `out`.
|
| 407 |
+
|
| 408 |
+
.. versionadded:: 1.24
|
| 409 |
+
|
| 410 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
| 411 |
+
Controls what kind of data casting may occur. Defaults to 'same_kind'.
|
| 412 |
+
|
| 413 |
+
.. versionadded:: 1.24
|
| 414 |
+
|
| 415 |
+
|
| 416 |
+
Returns
|
| 417 |
+
-------
|
| 418 |
+
stacked : ndarray
|
| 419 |
+
The stacked array has one more dimension than the input arrays.
|
| 420 |
+
|
| 421 |
+
See Also
|
| 422 |
+
--------
|
| 423 |
+
concatenate : Join a sequence of arrays along an existing axis.
|
| 424 |
+
block : Assemble an nd-array from nested lists of blocks.
|
| 425 |
+
split : Split array into a list of multiple sub-arrays of equal size.
|
| 426 |
+
unstack : Split an array into a tuple of sub-arrays along an axis.
|
| 427 |
+
|
| 428 |
+
Examples
|
| 429 |
+
--------
|
| 430 |
+
>>> import numpy as np
|
| 431 |
+
>>> rng = np.random.default_rng()
|
| 432 |
+
>>> arrays = [rng.normal(size=(3,4)) for _ in range(10)]
|
| 433 |
+
>>> np.stack(arrays, axis=0).shape
|
| 434 |
+
(10, 3, 4)
|
| 435 |
+
|
| 436 |
+
>>> np.stack(arrays, axis=1).shape
|
| 437 |
+
(3, 10, 4)
|
| 438 |
+
|
| 439 |
+
>>> np.stack(arrays, axis=2).shape
|
| 440 |
+
(3, 4, 10)
|
| 441 |
+
|
| 442 |
+
>>> a = np.array([1, 2, 3])
|
| 443 |
+
>>> b = np.array([4, 5, 6])
|
| 444 |
+
>>> np.stack((a, b))
|
| 445 |
+
array([[1, 2, 3],
|
| 446 |
+
[4, 5, 6]])
|
| 447 |
+
|
| 448 |
+
>>> np.stack((a, b), axis=-1)
|
| 449 |
+
array([[1, 4],
|
| 450 |
+
[2, 5],
|
| 451 |
+
[3, 6]])
|
| 452 |
+
|
| 453 |
+
"""
|
| 454 |
+
arrays = [asanyarray(arr) for arr in arrays]
|
| 455 |
+
if not arrays:
|
| 456 |
+
raise ValueError('need at least one array to stack')
|
| 457 |
+
|
| 458 |
+
shapes = {arr.shape for arr in arrays}
|
| 459 |
+
if len(shapes) != 1:
|
| 460 |
+
raise ValueError('all input arrays must have the same shape')
|
| 461 |
+
|
| 462 |
+
result_ndim = arrays[0].ndim + 1
|
| 463 |
+
axis = normalize_axis_index(axis, result_ndim)
|
| 464 |
+
|
| 465 |
+
sl = (slice(None),) * axis + (_nx.newaxis,)
|
| 466 |
+
expanded_arrays = [arr[sl] for arr in arrays]
|
| 467 |
+
return _nx.concatenate(expanded_arrays, axis=axis, out=out,
|
| 468 |
+
dtype=dtype, casting=casting)
|
| 469 |
+
|
| 470 |
+
def _unstack_dispatcher(x, /, *, axis=None):
|
| 471 |
+
return (x,)
|
| 472 |
+
|
| 473 |
+
@array_function_dispatch(_unstack_dispatcher)
|
| 474 |
+
def unstack(x, /, *, axis=0):
|
| 475 |
+
"""
|
| 476 |
+
Split an array into a sequence of arrays along the given axis.
|
| 477 |
+
|
| 478 |
+
The ``axis`` parameter specifies the dimension along which the array will
|
| 479 |
+
be split. For example, if ``axis=0`` (the default) it will be the first
|
| 480 |
+
dimension and if ``axis=-1`` it will be the last dimension.
|
| 481 |
+
|
| 482 |
+
The result is a tuple of arrays split along ``axis``.
|
| 483 |
+
|
| 484 |
+
.. versionadded:: 2.1.0
|
| 485 |
+
|
| 486 |
+
Parameters
|
| 487 |
+
----------
|
| 488 |
+
x : ndarray
|
| 489 |
+
The array to be unstacked.
|
| 490 |
+
axis : int, optional
|
| 491 |
+
Axis along which the array will be split. Default: ``0``.
|
| 492 |
+
|
| 493 |
+
Returns
|
| 494 |
+
-------
|
| 495 |
+
unstacked : tuple of ndarrays
|
| 496 |
+
The unstacked arrays.
|
| 497 |
+
|
| 498 |
+
See Also
|
| 499 |
+
--------
|
| 500 |
+
stack : Join a sequence of arrays along a new axis.
|
| 501 |
+
concatenate : Join a sequence of arrays along an existing axis.
|
| 502 |
+
block : Assemble an nd-array from nested lists of blocks.
|
| 503 |
+
split : Split array into a list of multiple sub-arrays of equal size.
|
| 504 |
+
|
| 505 |
+
Notes
|
| 506 |
+
-----
|
| 507 |
+
``unstack`` serves as the reverse operation of :py:func:`stack`, i.e.,
|
| 508 |
+
``stack(unstack(x, axis=axis), axis=axis) == x``.
|
| 509 |
+
|
| 510 |
+
This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since
|
| 511 |
+
iterating on an array iterates along the first axis.
|
| 512 |
+
|
| 513 |
+
Examples
|
| 514 |
+
--------
|
| 515 |
+
>>> arr = np.arange(24).reshape((2, 3, 4))
|
| 516 |
+
>>> np.unstack(arr)
|
| 517 |
+
(array([[ 0, 1, 2, 3],
|
| 518 |
+
[ 4, 5, 6, 7],
|
| 519 |
+
[ 8, 9, 10, 11]]),
|
| 520 |
+
array([[12, 13, 14, 15],
|
| 521 |
+
[16, 17, 18, 19],
|
| 522 |
+
[20, 21, 22, 23]]))
|
| 523 |
+
>>> np.unstack(arr, axis=1)
|
| 524 |
+
(array([[ 0, 1, 2, 3],
|
| 525 |
+
[12, 13, 14, 15]]),
|
| 526 |
+
array([[ 4, 5, 6, 7],
|
| 527 |
+
[16, 17, 18, 19]]),
|
| 528 |
+
array([[ 8, 9, 10, 11],
|
| 529 |
+
[20, 21, 22, 23]]))
|
| 530 |
+
>>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1)
|
| 531 |
+
>>> arr2.shape
|
| 532 |
+
(2, 3, 4)
|
| 533 |
+
>>> np.all(arr == arr2)
|
| 534 |
+
np.True_
|
| 535 |
+
|
| 536 |
+
"""
|
| 537 |
+
if x.ndim == 0:
|
| 538 |
+
raise ValueError("Input array must be at least 1-d.")
|
| 539 |
+
return tuple(_nx.moveaxis(x, axis, 0))
|
| 540 |
+
|
| 541 |
+
# Internal functions to eliminate the overhead of repeated dispatch in one of
|
| 542 |
+
# the two possible paths inside np.block.
|
| 543 |
+
# Use getattr to protect against __array_function__ being disabled.
|
| 544 |
+
_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
|
| 545 |
+
_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
|
| 546 |
+
_concatenate = getattr(_from_nx.concatenate,
|
| 547 |
+
'__wrapped__', _from_nx.concatenate)
|
| 548 |
+
|
| 549 |
+
|
| 550 |
+
def _block_format_index(index):
|
| 551 |
+
"""
|
| 552 |
+
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
|
| 553 |
+
"""
|
| 554 |
+
idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
|
| 555 |
+
return 'arrays' + idx_str
|
| 556 |
+
|
| 557 |
+
|
| 558 |
+
def _block_check_depths_match(arrays, parent_index=[]):
|
| 559 |
+
"""
|
| 560 |
+
Recursive function checking that the depths of nested lists in `arrays`
|
| 561 |
+
all match. Mismatch raises a ValueError as described in the block
|
| 562 |
+
docstring below.
|
| 563 |
+
|
| 564 |
+
The entire index (rather than just the depth) needs to be calculated
|
| 565 |
+
for each innermost list, in case an error needs to be raised, so that
|
| 566 |
+
the index of the offending list can be printed as part of the error.
|
| 567 |
+
|
| 568 |
+
Parameters
|
| 569 |
+
----------
|
| 570 |
+
arrays : nested list of arrays
|
| 571 |
+
The arrays to check
|
| 572 |
+
parent_index : list of int
|
| 573 |
+
The full index of `arrays` within the nested lists passed to
|
| 574 |
+
`_block_check_depths_match` at the top of the recursion.
|
| 575 |
+
|
| 576 |
+
Returns
|
| 577 |
+
-------
|
| 578 |
+
first_index : list of int
|
| 579 |
+
The full index of an element from the bottom of the nesting in
|
| 580 |
+
`arrays`. If any element at the bottom is an empty list, this will
|
| 581 |
+
refer to it, and the last index along the empty axis will be None.
|
| 582 |
+
max_arr_ndim : int
|
| 583 |
+
The maximum of the ndims of the arrays nested in `arrays`.
|
| 584 |
+
final_size: int
|
| 585 |
+
The number of elements in the final array. This is used the motivate
|
| 586 |
+
the choice of algorithm used using benchmarking wisdom.
|
| 587 |
+
|
| 588 |
+
"""
|
| 589 |
+
if type(arrays) is tuple:
|
| 590 |
+
# not strictly necessary, but saves us from:
|
| 591 |
+
# - more than one way to do things - no point treating tuples like
|
| 592 |
+
# lists
|
| 593 |
+
# - horribly confusing behaviour that results when tuples are
|
| 594 |
+
# treated like ndarray
|
| 595 |
+
raise TypeError(
|
| 596 |
+
'{} is a tuple. '
|
| 597 |
+
'Only lists can be used to arrange blocks, and np.block does '
|
| 598 |
+
'not allow implicit conversion from tuple to ndarray.'.format(
|
| 599 |
+
_block_format_index(parent_index)
|
| 600 |
+
)
|
| 601 |
+
)
|
| 602 |
+
elif type(arrays) is list and len(arrays) > 0:
|
| 603 |
+
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
|
| 604 |
+
for i, arr in enumerate(arrays))
|
| 605 |
+
|
| 606 |
+
first_index, max_arr_ndim, final_size = next(idxs_ndims)
|
| 607 |
+
for index, ndim, size in idxs_ndims:
|
| 608 |
+
final_size += size
|
| 609 |
+
if ndim > max_arr_ndim:
|
| 610 |
+
max_arr_ndim = ndim
|
| 611 |
+
if len(index) != len(first_index):
|
| 612 |
+
raise ValueError(
|
| 613 |
+
"List depths are mismatched. First element was at depth "
|
| 614 |
+
"{}, but there is an element at depth {} ({})".format(
|
| 615 |
+
len(first_index),
|
| 616 |
+
len(index),
|
| 617 |
+
_block_format_index(index)
|
| 618 |
+
)
|
| 619 |
+
)
|
| 620 |
+
# propagate our flag that indicates an empty list at the bottom
|
| 621 |
+
if index[-1] is None:
|
| 622 |
+
first_index = index
|
| 623 |
+
|
| 624 |
+
return first_index, max_arr_ndim, final_size
|
| 625 |
+
elif type(arrays) is list and len(arrays) == 0:
|
| 626 |
+
# We've 'bottomed out' on an empty list
|
| 627 |
+
return parent_index + [None], 0, 0
|
| 628 |
+
else:
|
| 629 |
+
# We've 'bottomed out' - arrays is either a scalar or an array
|
| 630 |
+
size = _size(arrays)
|
| 631 |
+
return parent_index, _ndim(arrays), size
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
def _atleast_nd(a, ndim):
|
| 635 |
+
# Ensures `a` has at least `ndim` dimensions by prepending
|
| 636 |
+
# ones to `a.shape` as necessary
|
| 637 |
+
return array(a, ndmin=ndim, copy=None, subok=True)
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
def _accumulate(values):
|
| 641 |
+
return list(itertools.accumulate(values))
|
| 642 |
+
|
| 643 |
+
|
| 644 |
+
def _concatenate_shapes(shapes, axis):
|
| 645 |
+
"""Given array shapes, return the resulting shape and slices prefixes.
|
| 646 |
+
|
| 647 |
+
These help in nested concatenation.
|
| 648 |
+
|
| 649 |
+
Returns
|
| 650 |
+
-------
|
| 651 |
+
shape: tuple of int
|
| 652 |
+
This tuple satisfies::
|
| 653 |
+
|
| 654 |
+
shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
|
| 655 |
+
shape == concatenate(arrs, axis).shape
|
| 656 |
+
|
| 657 |
+
slice_prefixes: tuple of (slice(start, end), )
|
| 658 |
+
For a list of arrays being concatenated, this returns the slice
|
| 659 |
+
in the larger array at axis that needs to be sliced into.
|
| 660 |
+
|
| 661 |
+
For example, the following holds::
|
| 662 |
+
|
| 663 |
+
ret = concatenate([a, b, c], axis)
|
| 664 |
+
_, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
|
| 665 |
+
|
| 666 |
+
ret[(slice(None),) * axis + sl_a] == a
|
| 667 |
+
ret[(slice(None),) * axis + sl_b] == b
|
| 668 |
+
ret[(slice(None),) * axis + sl_c] == c
|
| 669 |
+
|
| 670 |
+
These are called slice prefixes since they are used in the recursive
|
| 671 |
+
blocking algorithm to compute the left-most slices during the
|
| 672 |
+
recursion. Therefore, they must be prepended to rest of the slice
|
| 673 |
+
that was computed deeper in the recursion.
|
| 674 |
+
|
| 675 |
+
These are returned as tuples to ensure that they can quickly be added
|
| 676 |
+
to existing slice tuple without creating a new tuple every time.
|
| 677 |
+
|
| 678 |
+
"""
|
| 679 |
+
# Cache a result that will be reused.
|
| 680 |
+
shape_at_axis = [shape[axis] for shape in shapes]
|
| 681 |
+
|
| 682 |
+
# Take a shape, any shape
|
| 683 |
+
first_shape = shapes[0]
|
| 684 |
+
first_shape_pre = first_shape[:axis]
|
| 685 |
+
first_shape_post = first_shape[axis+1:]
|
| 686 |
+
|
| 687 |
+
if any(shape[:axis] != first_shape_pre or
|
| 688 |
+
shape[axis+1:] != first_shape_post for shape in shapes):
|
| 689 |
+
raise ValueError(
|
| 690 |
+
'Mismatched array shapes in block along axis {}.'.format(axis))
|
| 691 |
+
|
| 692 |
+
shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
|
| 693 |
+
|
| 694 |
+
offsets_at_axis = _accumulate(shape_at_axis)
|
| 695 |
+
slice_prefixes = [(slice(start, end),)
|
| 696 |
+
for start, end in zip([0] + offsets_at_axis,
|
| 697 |
+
offsets_at_axis)]
|
| 698 |
+
return shape, slice_prefixes
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
|
| 702 |
+
"""
|
| 703 |
+
Returns the shape of the final array, along with a list
|
| 704 |
+
of slices and a list of arrays that can be used for assignment inside the
|
| 705 |
+
new array
|
| 706 |
+
|
| 707 |
+
Parameters
|
| 708 |
+
----------
|
| 709 |
+
arrays : nested list of arrays
|
| 710 |
+
The arrays to check
|
| 711 |
+
max_depth : list of int
|
| 712 |
+
The number of nested lists
|
| 713 |
+
result_ndim : int
|
| 714 |
+
The number of dimensions in thefinal array.
|
| 715 |
+
|
| 716 |
+
Returns
|
| 717 |
+
-------
|
| 718 |
+
shape : tuple of int
|
| 719 |
+
The shape that the final array will take on.
|
| 720 |
+
slices: list of tuple of slices
|
| 721 |
+
The slices into the full array required for assignment. These are
|
| 722 |
+
required to be prepended with ``(Ellipsis, )`` to obtain to correct
|
| 723 |
+
final index.
|
| 724 |
+
arrays: list of ndarray
|
| 725 |
+
The data to assign to each slice of the full array
|
| 726 |
+
|
| 727 |
+
"""
|
| 728 |
+
if depth < max_depth:
|
| 729 |
+
shapes, slices, arrays = zip(
|
| 730 |
+
*[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
|
| 731 |
+
for arr in arrays])
|
| 732 |
+
|
| 733 |
+
axis = result_ndim - max_depth + depth
|
| 734 |
+
shape, slice_prefixes = _concatenate_shapes(shapes, axis)
|
| 735 |
+
|
| 736 |
+
# Prepend the slice prefix and flatten the slices
|
| 737 |
+
slices = [slice_prefix + the_slice
|
| 738 |
+
for slice_prefix, inner_slices in zip(slice_prefixes, slices)
|
| 739 |
+
for the_slice in inner_slices]
|
| 740 |
+
|
| 741 |
+
# Flatten the array list
|
| 742 |
+
arrays = functools.reduce(operator.add, arrays)
|
| 743 |
+
|
| 744 |
+
return shape, slices, arrays
|
| 745 |
+
else:
|
| 746 |
+
# We've 'bottomed out' - arrays is either a scalar or an array
|
| 747 |
+
# type(arrays) is not list
|
| 748 |
+
# Return the slice and the array inside a list to be consistent with
|
| 749 |
+
# the recursive case.
|
| 750 |
+
arr = _atleast_nd(arrays, result_ndim)
|
| 751 |
+
return arr.shape, [()], [arr]
|
| 752 |
+
|
| 753 |
+
|
| 754 |
+
def _block(arrays, max_depth, result_ndim, depth=0):
|
| 755 |
+
"""
|
| 756 |
+
Internal implementation of block based on repeated concatenation.
|
| 757 |
+
`arrays` is the argument passed to
|
| 758 |
+
block. `max_depth` is the depth of nested lists within `arrays` and
|
| 759 |
+
`result_ndim` is the greatest of the dimensions of the arrays in
|
| 760 |
+
`arrays` and the depth of the lists in `arrays` (see block docstring
|
| 761 |
+
for details).
|
| 762 |
+
"""
|
| 763 |
+
if depth < max_depth:
|
| 764 |
+
arrs = [_block(arr, max_depth, result_ndim, depth+1)
|
| 765 |
+
for arr in arrays]
|
| 766 |
+
return _concatenate(arrs, axis=-(max_depth-depth))
|
| 767 |
+
else:
|
| 768 |
+
# We've 'bottomed out' - arrays is either a scalar or an array
|
| 769 |
+
# type(arrays) is not list
|
| 770 |
+
return _atleast_nd(arrays, result_ndim)
|
| 771 |
+
|
| 772 |
+
|
| 773 |
+
def _block_dispatcher(arrays):
|
| 774 |
+
# Use type(...) is list to match the behavior of np.block(), which special
|
| 775 |
+
# cases list specifically rather than allowing for generic iterables or
|
| 776 |
+
# tuple. Also, we know that list.__array_function__ will never exist.
|
| 777 |
+
if type(arrays) is list:
|
| 778 |
+
for subarrays in arrays:
|
| 779 |
+
yield from _block_dispatcher(subarrays)
|
| 780 |
+
else:
|
| 781 |
+
yield arrays
|
| 782 |
+
|
| 783 |
+
|
| 784 |
+
@array_function_dispatch(_block_dispatcher)
|
| 785 |
+
def block(arrays):
|
| 786 |
+
"""
|
| 787 |
+
Assemble an nd-array from nested lists of blocks.
|
| 788 |
+
|
| 789 |
+
Blocks in the innermost lists are concatenated (see `concatenate`) along
|
| 790 |
+
the last dimension (-1), then these are concatenated along the
|
| 791 |
+
second-last dimension (-2), and so on until the outermost list is reached.
|
| 792 |
+
|
| 793 |
+
Blocks can be of any dimension, but will not be broadcasted using
|
| 794 |
+
the normal rules. Instead, leading axes of size 1 are inserted,
|
| 795 |
+
to make ``block.ndim`` the same for all blocks. This is primarily useful
|
| 796 |
+
for working with scalars, and means that code like ``np.block([v, 1])``
|
| 797 |
+
is valid, where ``v.ndim == 1``.
|
| 798 |
+
|
| 799 |
+
When the nested list is two levels deep, this allows block matrices to be
|
| 800 |
+
constructed from their components.
|
| 801 |
+
|
| 802 |
+
Parameters
|
| 803 |
+
----------
|
| 804 |
+
arrays : nested list of array_like or scalars (but not tuples)
|
| 805 |
+
If passed a single ndarray or scalar (a nested list of depth 0), this
|
| 806 |
+
is returned unmodified (and not copied).
|
| 807 |
+
|
| 808 |
+
Elements shapes must match along the appropriate axes (without
|
| 809 |
+
broadcasting), but leading 1s will be prepended to the shape as
|
| 810 |
+
necessary to make the dimensions match.
|
| 811 |
+
|
| 812 |
+
Returns
|
| 813 |
+
-------
|
| 814 |
+
block_array : ndarray
|
| 815 |
+
The array assembled from the given blocks.
|
| 816 |
+
|
| 817 |
+
The dimensionality of the output is equal to the greatest of:
|
| 818 |
+
|
| 819 |
+
* the dimensionality of all the inputs
|
| 820 |
+
* the depth to which the input list is nested
|
| 821 |
+
|
| 822 |
+
Raises
|
| 823 |
+
------
|
| 824 |
+
ValueError
|
| 825 |
+
* If list depths are mismatched - for instance, ``[[a, b], c]`` is
|
| 826 |
+
illegal, and should be spelt ``[[a, b], [c]]``
|
| 827 |
+
* If lists are empty - for instance, ``[[a, b], []]``
|
| 828 |
+
|
| 829 |
+
See Also
|
| 830 |
+
--------
|
| 831 |
+
concatenate : Join a sequence of arrays along an existing axis.
|
| 832 |
+
stack : Join a sequence of arrays along a new axis.
|
| 833 |
+
vstack : Stack arrays in sequence vertically (row wise).
|
| 834 |
+
hstack : Stack arrays in sequence horizontally (column wise).
|
| 835 |
+
dstack : Stack arrays in sequence depth wise (along third axis).
|
| 836 |
+
column_stack : Stack 1-D arrays as columns into a 2-D array.
|
| 837 |
+
vsplit : Split an array into multiple sub-arrays vertically (row-wise).
|
| 838 |
+
unstack : Split an array into a tuple of sub-arrays along an axis.
|
| 839 |
+
|
| 840 |
+
Notes
|
| 841 |
+
-----
|
| 842 |
+
When called with only scalars, ``np.block`` is equivalent to an ndarray
|
| 843 |
+
call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
|
| 844 |
+
``np.array([[1, 2], [3, 4]])``.
|
| 845 |
+
|
| 846 |
+
This function does not enforce that the blocks lie on a fixed grid.
|
| 847 |
+
``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
|
| 848 |
+
|
| 849 |
+
AAAbb
|
| 850 |
+
AAAbb
|
| 851 |
+
cccDD
|
| 852 |
+
|
| 853 |
+
But is also allowed to produce, for some ``a, b, c, d``::
|
| 854 |
+
|
| 855 |
+
AAAbb
|
| 856 |
+
AAAbb
|
| 857 |
+
cDDDD
|
| 858 |
+
|
| 859 |
+
Since concatenation happens along the last axis first, `block` is *not*
|
| 860 |
+
capable of producing the following directly::
|
| 861 |
+
|
| 862 |
+
AAAbb
|
| 863 |
+
cccbb
|
| 864 |
+
cccDD
|
| 865 |
+
|
| 866 |
+
Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
|
| 867 |
+
equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
|
| 868 |
+
|
| 869 |
+
Examples
|
| 870 |
+
--------
|
| 871 |
+
The most common use of this function is to build a block matrix:
|
| 872 |
+
|
| 873 |
+
>>> import numpy as np
|
| 874 |
+
>>> A = np.eye(2) * 2
|
| 875 |
+
>>> B = np.eye(3) * 3
|
| 876 |
+
>>> np.block([
|
| 877 |
+
... [A, np.zeros((2, 3))],
|
| 878 |
+
... [np.ones((3, 2)), B ]
|
| 879 |
+
... ])
|
| 880 |
+
array([[2., 0., 0., 0., 0.],
|
| 881 |
+
[0., 2., 0., 0., 0.],
|
| 882 |
+
[1., 1., 3., 0., 0.],
|
| 883 |
+
[1., 1., 0., 3., 0.],
|
| 884 |
+
[1., 1., 0., 0., 3.]])
|
| 885 |
+
|
| 886 |
+
With a list of depth 1, `block` can be used as `hstack`:
|
| 887 |
+
|
| 888 |
+
>>> np.block([1, 2, 3]) # hstack([1, 2, 3])
|
| 889 |
+
array([1, 2, 3])
|
| 890 |
+
|
| 891 |
+
>>> a = np.array([1, 2, 3])
|
| 892 |
+
>>> b = np.array([4, 5, 6])
|
| 893 |
+
>>> np.block([a, b, 10]) # hstack([a, b, 10])
|
| 894 |
+
array([ 1, 2, 3, 4, 5, 6, 10])
|
| 895 |
+
|
| 896 |
+
>>> A = np.ones((2, 2), int)
|
| 897 |
+
>>> B = 2 * A
|
| 898 |
+
>>> np.block([A, B]) # hstack([A, B])
|
| 899 |
+
array([[1, 1, 2, 2],
|
| 900 |
+
[1, 1, 2, 2]])
|
| 901 |
+
|
| 902 |
+
With a list of depth 2, `block` can be used in place of `vstack`:
|
| 903 |
+
|
| 904 |
+
>>> a = np.array([1, 2, 3])
|
| 905 |
+
>>> b = np.array([4, 5, 6])
|
| 906 |
+
>>> np.block([[a], [b]]) # vstack([a, b])
|
| 907 |
+
array([[1, 2, 3],
|
| 908 |
+
[4, 5, 6]])
|
| 909 |
+
|
| 910 |
+
>>> A = np.ones((2, 2), int)
|
| 911 |
+
>>> B = 2 * A
|
| 912 |
+
>>> np.block([[A], [B]]) # vstack([A, B])
|
| 913 |
+
array([[1, 1],
|
| 914 |
+
[1, 1],
|
| 915 |
+
[2, 2],
|
| 916 |
+
[2, 2]])
|
| 917 |
+
|
| 918 |
+
It can also be used in place of `atleast_1d` and `atleast_2d`:
|
| 919 |
+
|
| 920 |
+
>>> a = np.array(0)
|
| 921 |
+
>>> b = np.array([1])
|
| 922 |
+
>>> np.block([a]) # atleast_1d(a)
|
| 923 |
+
array([0])
|
| 924 |
+
>>> np.block([b]) # atleast_1d(b)
|
| 925 |
+
array([1])
|
| 926 |
+
|
| 927 |
+
>>> np.block([[a]]) # atleast_2d(a)
|
| 928 |
+
array([[0]])
|
| 929 |
+
>>> np.block([[b]]) # atleast_2d(b)
|
| 930 |
+
array([[1]])
|
| 931 |
+
|
| 932 |
+
|
| 933 |
+
"""
|
| 934 |
+
arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
|
| 935 |
+
|
| 936 |
+
# It was found through benchmarking that making an array of final size
|
| 937 |
+
# around 256x256 was faster by straight concatenation on a
|
| 938 |
+
# i7-7700HQ processor and dual channel ram 2400MHz.
|
| 939 |
+
# It didn't seem to matter heavily on the dtype used.
|
| 940 |
+
#
|
| 941 |
+
# A 2D array using repeated concatenation requires 2 copies of the array.
|
| 942 |
+
#
|
| 943 |
+
# The fastest algorithm will depend on the ratio of CPU power to memory
|
| 944 |
+
# speed.
|
| 945 |
+
# One can monitor the results of the benchmark
|
| 946 |
+
# https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
|
| 947 |
+
# to tune this parameter until a C version of the `_block_info_recursion`
|
| 948 |
+
# algorithm is implemented which would likely be faster than the python
|
| 949 |
+
# version.
|
| 950 |
+
if list_ndim * final_size > (2 * 512 * 512):
|
| 951 |
+
return _block_slicing(arrays, list_ndim, result_ndim)
|
| 952 |
+
else:
|
| 953 |
+
return _block_concatenate(arrays, list_ndim, result_ndim)
|
| 954 |
+
|
| 955 |
+
|
| 956 |
+
# These helper functions are mostly used for testing.
|
| 957 |
+
# They allow us to write tests that directly call `_block_slicing`
|
| 958 |
+
# or `_block_concatenate` without blocking large arrays to force the wisdom
|
| 959 |
+
# to trigger the desired path.
|
| 960 |
+
def _block_setup(arrays):
|
| 961 |
+
"""
|
| 962 |
+
Returns
|
| 963 |
+
(`arrays`, list_ndim, result_ndim, final_size)
|
| 964 |
+
"""
|
| 965 |
+
bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
|
| 966 |
+
list_ndim = len(bottom_index)
|
| 967 |
+
if bottom_index and bottom_index[-1] is None:
|
| 968 |
+
raise ValueError(
|
| 969 |
+
'List at {} cannot be empty'.format(
|
| 970 |
+
_block_format_index(bottom_index)
|
| 971 |
+
)
|
| 972 |
+
)
|
| 973 |
+
result_ndim = max(arr_ndim, list_ndim)
|
| 974 |
+
return arrays, list_ndim, result_ndim, final_size
|
| 975 |
+
|
| 976 |
+
|
| 977 |
+
def _block_slicing(arrays, list_ndim, result_ndim):
|
| 978 |
+
shape, slices, arrays = _block_info_recursion(
|
| 979 |
+
arrays, list_ndim, result_ndim)
|
| 980 |
+
dtype = _nx.result_type(*[arr.dtype for arr in arrays])
|
| 981 |
+
|
| 982 |
+
# Test preferring F only in the case that all input arrays are F
|
| 983 |
+
F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
|
| 984 |
+
C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
|
| 985 |
+
order = 'F' if F_order and not C_order else 'C'
|
| 986 |
+
result = _nx.empty(shape=shape, dtype=dtype, order=order)
|
| 987 |
+
# Note: In a c implementation, the function
|
| 988 |
+
# PyArray_CreateMultiSortedStridePerm could be used for more advanced
|
| 989 |
+
# guessing of the desired order.
|
| 990 |
+
|
| 991 |
+
for the_slice, arr in zip(slices, arrays):
|
| 992 |
+
result[(Ellipsis,) + the_slice] = arr
|
| 993 |
+
return result
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
def _block_concatenate(arrays, list_ndim, result_ndim):
|
| 997 |
+
result = _block(arrays, list_ndim, result_ndim)
|
| 998 |
+
if list_ndim == 0:
|
| 999 |
+
# Catch an edge case where _block returns a view because
|
| 1000 |
+
# `arrays` is a single numpy array and not a list of numpy arrays.
|
| 1001 |
+
# This might copy scalars or lists twice, but this isn't a likely
|
| 1002 |
+
# usecase for those interested in performance
|
| 1003 |
+
result = result.copy()
|
| 1004 |
+
return result
|
janus/lib/python3.10/site-packages/numpy/_core/shape_base.pyi
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from collections.abc import Sequence
|
| 2 |
+
from typing import TypeVar, overload, Any, SupportsIndex
|
| 3 |
+
|
| 4 |
+
from numpy import generic, _CastingKind
|
| 5 |
+
from numpy._typing import (
|
| 6 |
+
NDArray,
|
| 7 |
+
ArrayLike,
|
| 8 |
+
DTypeLike,
|
| 9 |
+
_ArrayLike,
|
| 10 |
+
_DTypeLike,
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
__all__ = [
|
| 14 |
+
"atleast_1d",
|
| 15 |
+
"atleast_2d",
|
| 16 |
+
"atleast_3d",
|
| 17 |
+
"block",
|
| 18 |
+
"hstack",
|
| 19 |
+
"stack",
|
| 20 |
+
"unstack",
|
| 21 |
+
"vstack",
|
| 22 |
+
]
|
| 23 |
+
|
| 24 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
| 25 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
| 26 |
+
|
| 27 |
+
@overload
|
| 28 |
+
def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 29 |
+
@overload
|
| 30 |
+
def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 31 |
+
@overload
|
| 32 |
+
def atleast_1d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ...
|
| 33 |
+
|
| 34 |
+
@overload
|
| 35 |
+
def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 36 |
+
@overload
|
| 37 |
+
def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 38 |
+
@overload
|
| 39 |
+
def atleast_2d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ...
|
| 40 |
+
|
| 41 |
+
@overload
|
| 42 |
+
def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
| 43 |
+
@overload
|
| 44 |
+
def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
| 45 |
+
@overload
|
| 46 |
+
def atleast_3d(*arys: ArrayLike) -> tuple[NDArray[Any], ...]: ...
|
| 47 |
+
|
| 48 |
+
@overload
|
| 49 |
+
def vstack(
|
| 50 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
| 51 |
+
*,
|
| 52 |
+
dtype: None = ...,
|
| 53 |
+
casting: _CastingKind = ...
|
| 54 |
+
) -> NDArray[_SCT]: ...
|
| 55 |
+
@overload
|
| 56 |
+
def vstack(
|
| 57 |
+
tup: Sequence[ArrayLike],
|
| 58 |
+
*,
|
| 59 |
+
dtype: _DTypeLike[_SCT],
|
| 60 |
+
casting: _CastingKind = ...
|
| 61 |
+
) -> NDArray[_SCT]: ...
|
| 62 |
+
@overload
|
| 63 |
+
def vstack(
|
| 64 |
+
tup: Sequence[ArrayLike],
|
| 65 |
+
*,
|
| 66 |
+
dtype: DTypeLike = ...,
|
| 67 |
+
casting: _CastingKind = ...
|
| 68 |
+
) -> NDArray[Any]: ...
|
| 69 |
+
|
| 70 |
+
@overload
|
| 71 |
+
def hstack(
|
| 72 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
| 73 |
+
*,
|
| 74 |
+
dtype: None = ...,
|
| 75 |
+
casting: _CastingKind = ...
|
| 76 |
+
) -> NDArray[_SCT]: ...
|
| 77 |
+
@overload
|
| 78 |
+
def hstack(
|
| 79 |
+
tup: Sequence[ArrayLike],
|
| 80 |
+
*,
|
| 81 |
+
dtype: _DTypeLike[_SCT],
|
| 82 |
+
casting: _CastingKind = ...
|
| 83 |
+
) -> NDArray[_SCT]: ...
|
| 84 |
+
@overload
|
| 85 |
+
def hstack(
|
| 86 |
+
tup: Sequence[ArrayLike],
|
| 87 |
+
*,
|
| 88 |
+
dtype: DTypeLike = ...,
|
| 89 |
+
casting: _CastingKind = ...
|
| 90 |
+
) -> NDArray[Any]: ...
|
| 91 |
+
|
| 92 |
+
@overload
|
| 93 |
+
def stack(
|
| 94 |
+
arrays: Sequence[_ArrayLike[_SCT]],
|
| 95 |
+
axis: SupportsIndex = ...,
|
| 96 |
+
out: None = ...,
|
| 97 |
+
*,
|
| 98 |
+
dtype: None = ...,
|
| 99 |
+
casting: _CastingKind = ...
|
| 100 |
+
) -> NDArray[_SCT]: ...
|
| 101 |
+
@overload
|
| 102 |
+
def stack(
|
| 103 |
+
arrays: Sequence[ArrayLike],
|
| 104 |
+
axis: SupportsIndex = ...,
|
| 105 |
+
out: None = ...,
|
| 106 |
+
*,
|
| 107 |
+
dtype: _DTypeLike[_SCT],
|
| 108 |
+
casting: _CastingKind = ...
|
| 109 |
+
) -> NDArray[_SCT]: ...
|
| 110 |
+
@overload
|
| 111 |
+
def stack(
|
| 112 |
+
arrays: Sequence[ArrayLike],
|
| 113 |
+
axis: SupportsIndex = ...,
|
| 114 |
+
out: None = ...,
|
| 115 |
+
*,
|
| 116 |
+
dtype: DTypeLike = ...,
|
| 117 |
+
casting: _CastingKind = ...
|
| 118 |
+
) -> NDArray[Any]: ...
|
| 119 |
+
@overload
|
| 120 |
+
def stack(
|
| 121 |
+
arrays: Sequence[ArrayLike],
|
| 122 |
+
axis: SupportsIndex = ...,
|
| 123 |
+
out: _ArrayType = ...,
|
| 124 |
+
*,
|
| 125 |
+
dtype: DTypeLike = ...,
|
| 126 |
+
casting: _CastingKind = ...
|
| 127 |
+
) -> _ArrayType: ...
|
| 128 |
+
|
| 129 |
+
@overload
|
| 130 |
+
def unstack(
|
| 131 |
+
array: _ArrayLike[_SCT],
|
| 132 |
+
/,
|
| 133 |
+
*,
|
| 134 |
+
axis: int = ...,
|
| 135 |
+
) -> tuple[NDArray[_SCT], ...]: ...
|
| 136 |
+
@overload
|
| 137 |
+
def unstack(
|
| 138 |
+
array: ArrayLike,
|
| 139 |
+
/,
|
| 140 |
+
*,
|
| 141 |
+
axis: int = ...,
|
| 142 |
+
) -> tuple[NDArray[Any], ...]: ...
|
| 143 |
+
|
| 144 |
+
@overload
|
| 145 |
+
def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
|
| 146 |
+
@overload
|
| 147 |
+
def block(arrays: ArrayLike) -> NDArray[Any]: ...
|
janus/lib/python3.10/site-packages/numpy/_core/strings.pyi
ADDED
|
@@ -0,0 +1,478 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, overload, TypeAlias
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy._typing import (
|
| 5 |
+
NDArray,
|
| 6 |
+
_ArrayLikeStr_co as U_co,
|
| 7 |
+
_ArrayLikeBytes_co as S_co,
|
| 8 |
+
_ArrayLikeInt_co as i_co,
|
| 9 |
+
_ArrayLikeString_co as T_co,
|
| 10 |
+
_ArrayLikeAnyString_co as UST_co,
|
| 11 |
+
_Shape,
|
| 12 |
+
_SupportsArray,
|
| 13 |
+
)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
_StringDTypeArray: TypeAlias = np.ndarray[_Shape, np.dtypes.StringDType]
|
| 17 |
+
_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType]
|
| 18 |
+
_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_Shape, np.dtype[np.str_]] | np.ndarray[_Shape, np.dtypes.StringDType]
|
| 19 |
+
|
| 20 |
+
@overload
|
| 21 |
+
def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 22 |
+
@overload
|
| 23 |
+
def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 24 |
+
@overload
|
| 25 |
+
def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 26 |
+
|
| 27 |
+
@overload
|
| 28 |
+
def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 29 |
+
@overload
|
| 30 |
+
def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 31 |
+
@overload
|
| 32 |
+
def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 33 |
+
|
| 34 |
+
@overload
|
| 35 |
+
def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 36 |
+
@overload
|
| 37 |
+
def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 38 |
+
@overload
|
| 39 |
+
def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 40 |
+
|
| 41 |
+
@overload
|
| 42 |
+
def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 43 |
+
@overload
|
| 44 |
+
def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 45 |
+
@overload
|
| 46 |
+
def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 47 |
+
|
| 48 |
+
@overload
|
| 49 |
+
def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 50 |
+
@overload
|
| 51 |
+
def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 52 |
+
@overload
|
| 53 |
+
def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 54 |
+
|
| 55 |
+
@overload
|
| 56 |
+
def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
|
| 57 |
+
@overload
|
| 58 |
+
def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
|
| 59 |
+
@overload
|
| 60 |
+
def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
|
| 61 |
+
|
| 62 |
+
@overload
|
| 63 |
+
def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...
|
| 64 |
+
@overload
|
| 65 |
+
def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...
|
| 66 |
+
@overload
|
| 67 |
+
def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 68 |
+
@overload
|
| 69 |
+
def add(x1: T_co, T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 70 |
+
|
| 71 |
+
@overload
|
| 72 |
+
def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...
|
| 73 |
+
@overload
|
| 74 |
+
def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...
|
| 75 |
+
@overload
|
| 76 |
+
def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...
|
| 77 |
+
@overload
|
| 78 |
+
def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...
|
| 79 |
+
|
| 80 |
+
@overload
|
| 81 |
+
def mod(a: U_co, value: Any) -> NDArray[np.str_]: ...
|
| 82 |
+
@overload
|
| 83 |
+
def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ...
|
| 84 |
+
@overload
|
| 85 |
+
def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ...
|
| 86 |
+
@overload
|
| 87 |
+
def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ...
|
| 88 |
+
|
| 89 |
+
def isalpha(x: UST_co) -> NDArray[np.bool]: ...
|
| 90 |
+
def isalnum(a: UST_co) -> NDArray[np.bool]: ...
|
| 91 |
+
def isdigit(x: UST_co) -> NDArray[np.bool]: ...
|
| 92 |
+
def isspace(x: UST_co) -> NDArray[np.bool]: ...
|
| 93 |
+
def isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ...
|
| 94 |
+
def isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ...
|
| 95 |
+
def islower(a: UST_co) -> NDArray[np.bool]: ...
|
| 96 |
+
def istitle(a: UST_co) -> NDArray[np.bool]: ...
|
| 97 |
+
def isupper(a: UST_co) -> NDArray[np.bool]: ...
|
| 98 |
+
|
| 99 |
+
def str_len(x: UST_co) -> NDArray[np.int_]: ...
|
| 100 |
+
|
| 101 |
+
@overload
|
| 102 |
+
def find(
|
| 103 |
+
a: U_co,
|
| 104 |
+
sub: U_co,
|
| 105 |
+
start: i_co = ...,
|
| 106 |
+
end: i_co | None = ...,
|
| 107 |
+
) -> NDArray[np.int_]: ...
|
| 108 |
+
@overload
|
| 109 |
+
def find(
|
| 110 |
+
a: S_co,
|
| 111 |
+
sub: S_co,
|
| 112 |
+
start: i_co = ...,
|
| 113 |
+
end: i_co | None = ...,
|
| 114 |
+
) -> NDArray[np.int_]: ...
|
| 115 |
+
@overload
|
| 116 |
+
def find(
|
| 117 |
+
a: T_co,
|
| 118 |
+
sub: T_co,
|
| 119 |
+
start: i_co = ...,
|
| 120 |
+
end: i_co | None = ...,
|
| 121 |
+
) -> NDArray[np.int_]: ...
|
| 122 |
+
|
| 123 |
+
@overload
|
| 124 |
+
def rfind(
|
| 125 |
+
a: U_co,
|
| 126 |
+
sub: U_co,
|
| 127 |
+
start: i_co = ...,
|
| 128 |
+
end: i_co | None = ...,
|
| 129 |
+
) -> NDArray[np.int_]: ...
|
| 130 |
+
@overload
|
| 131 |
+
def rfind(
|
| 132 |
+
a: S_co,
|
| 133 |
+
sub: S_co,
|
| 134 |
+
start: i_co = ...,
|
| 135 |
+
end: i_co | None = ...,
|
| 136 |
+
) -> NDArray[np.int_]: ...
|
| 137 |
+
@overload
|
| 138 |
+
def rfind(
|
| 139 |
+
a: T_co,
|
| 140 |
+
sub: T_co,
|
| 141 |
+
start: i_co = ...,
|
| 142 |
+
end: i_co | None = ...,
|
| 143 |
+
) -> NDArray[np.int_]: ...
|
| 144 |
+
|
| 145 |
+
@overload
|
| 146 |
+
def index(
|
| 147 |
+
a: U_co,
|
| 148 |
+
sub: U_co,
|
| 149 |
+
start: i_co = ...,
|
| 150 |
+
end: None | i_co = ...,
|
| 151 |
+
) -> NDArray[np.int_]: ...
|
| 152 |
+
@overload
|
| 153 |
+
def index(
|
| 154 |
+
a: S_co,
|
| 155 |
+
sub: S_co,
|
| 156 |
+
start: i_co = ...,
|
| 157 |
+
end: None | i_co = ...,
|
| 158 |
+
) -> NDArray[np.int_]: ...
|
| 159 |
+
@overload
|
| 160 |
+
def index(
|
| 161 |
+
a: T_co,
|
| 162 |
+
sub: T_co,
|
| 163 |
+
start: i_co = ...,
|
| 164 |
+
end: i_co | None = ...,
|
| 165 |
+
) -> NDArray[np.int_]: ...
|
| 166 |
+
|
| 167 |
+
@overload
|
| 168 |
+
def rindex(
|
| 169 |
+
a: U_co,
|
| 170 |
+
sub: U_co,
|
| 171 |
+
start: i_co = ...,
|
| 172 |
+
end: None | i_co = ...,
|
| 173 |
+
) -> NDArray[np.int_]: ...
|
| 174 |
+
@overload
|
| 175 |
+
def rindex(
|
| 176 |
+
a: S_co,
|
| 177 |
+
sub: S_co,
|
| 178 |
+
start: i_co = ...,
|
| 179 |
+
end: None | i_co = ...,
|
| 180 |
+
) -> NDArray[np.int_]: ...
|
| 181 |
+
@overload
|
| 182 |
+
def rindex(
|
| 183 |
+
a: T_co,
|
| 184 |
+
sub: T_co,
|
| 185 |
+
start: i_co = ...,
|
| 186 |
+
end: i_co | None = ...,
|
| 187 |
+
) -> NDArray[np.int_]: ...
|
| 188 |
+
|
| 189 |
+
@overload
|
| 190 |
+
def count(
|
| 191 |
+
a: U_co,
|
| 192 |
+
sub: U_co,
|
| 193 |
+
start: i_co = ...,
|
| 194 |
+
end: i_co | None = ...,
|
| 195 |
+
) -> NDArray[np.int_]: ...
|
| 196 |
+
@overload
|
| 197 |
+
def count(
|
| 198 |
+
a: S_co,
|
| 199 |
+
sub: S_co,
|
| 200 |
+
start: i_co = ...,
|
| 201 |
+
end: i_co | None = ...,
|
| 202 |
+
) -> NDArray[np.int_]: ...
|
| 203 |
+
@overload
|
| 204 |
+
def count(
|
| 205 |
+
a: T_co,
|
| 206 |
+
sub: T_co,
|
| 207 |
+
start: i_co = ...,
|
| 208 |
+
end: i_co | None = ...,
|
| 209 |
+
) -> NDArray[np.int_]: ...
|
| 210 |
+
|
| 211 |
+
@overload
|
| 212 |
+
def startswith(
|
| 213 |
+
a: U_co,
|
| 214 |
+
prefix: U_co,
|
| 215 |
+
start: i_co = ...,
|
| 216 |
+
end: i_co | None = ...,
|
| 217 |
+
) -> NDArray[np.bool]: ...
|
| 218 |
+
@overload
|
| 219 |
+
def startswith(
|
| 220 |
+
a: S_co,
|
| 221 |
+
prefix: S_co,
|
| 222 |
+
start: i_co = ...,
|
| 223 |
+
end: i_co | None = ...,
|
| 224 |
+
) -> NDArray[np.bool]: ...
|
| 225 |
+
@overload
|
| 226 |
+
def startswith(
|
| 227 |
+
a: T_co,
|
| 228 |
+
suffix: T_co,
|
| 229 |
+
start: i_co = ...,
|
| 230 |
+
end: i_co | None = ...,
|
| 231 |
+
) -> NDArray[np.bool]: ...
|
| 232 |
+
|
| 233 |
+
@overload
|
| 234 |
+
def endswith(
|
| 235 |
+
a: U_co,
|
| 236 |
+
suffix: U_co,
|
| 237 |
+
start: i_co = ...,
|
| 238 |
+
end: i_co | None = ...,
|
| 239 |
+
) -> NDArray[np.bool]: ...
|
| 240 |
+
@overload
|
| 241 |
+
def endswith(
|
| 242 |
+
a: S_co,
|
| 243 |
+
suffix: S_co,
|
| 244 |
+
start: i_co = ...,
|
| 245 |
+
end: i_co | None = ...,
|
| 246 |
+
) -> NDArray[np.bool]: ...
|
| 247 |
+
@overload
|
| 248 |
+
def endswith(
|
| 249 |
+
a: T_co,
|
| 250 |
+
suffix: T_co,
|
| 251 |
+
start: i_co = ...,
|
| 252 |
+
end: i_co | None = ...,
|
| 253 |
+
) -> NDArray[np.bool]: ...
|
| 254 |
+
|
| 255 |
+
def decode(
|
| 256 |
+
a: S_co,
|
| 257 |
+
encoding: None | str = ...,
|
| 258 |
+
errors: None | str = ...,
|
| 259 |
+
) -> NDArray[np.str_]: ...
|
| 260 |
+
def encode(
|
| 261 |
+
a: U_co | T_co,
|
| 262 |
+
encoding: None | str = ...,
|
| 263 |
+
errors: None | str = ...,
|
| 264 |
+
) -> NDArray[np.bytes_]: ...
|
| 265 |
+
|
| 266 |
+
@overload
|
| 267 |
+
def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ...
|
| 268 |
+
@overload
|
| 269 |
+
def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ...
|
| 270 |
+
@overload
|
| 271 |
+
def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...
|
| 272 |
+
@overload
|
| 273 |
+
def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 274 |
+
|
| 275 |
+
@overload
|
| 276 |
+
def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ...
|
| 277 |
+
@overload
|
| 278 |
+
def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ...
|
| 279 |
+
@overload
|
| 280 |
+
def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 281 |
+
@overload
|
| 282 |
+
def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 283 |
+
|
| 284 |
+
@overload
|
| 285 |
+
def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[np.str_]: ...
|
| 286 |
+
@overload
|
| 287 |
+
def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[np.bytes_]: ...
|
| 288 |
+
@overload
|
| 289 |
+
def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 290 |
+
@overload
|
| 291 |
+
def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 292 |
+
|
| 293 |
+
@overload
|
| 294 |
+
def rjust(
|
| 295 |
+
a: U_co,
|
| 296 |
+
width: i_co,
|
| 297 |
+
fillchar: U_co = ...,
|
| 298 |
+
) -> NDArray[np.str_]: ...
|
| 299 |
+
@overload
|
| 300 |
+
def rjust(
|
| 301 |
+
a: S_co,
|
| 302 |
+
width: i_co,
|
| 303 |
+
fillchar: S_co = ...,
|
| 304 |
+
) -> NDArray[np.bytes_]: ...
|
| 305 |
+
@overload
|
| 306 |
+
def rjust(
|
| 307 |
+
a: _StringDTypeSupportsArray,
|
| 308 |
+
width: i_co,
|
| 309 |
+
fillchar: _StringDTypeSupportsArray = ...,
|
| 310 |
+
) -> _StringDTypeArray: ...
|
| 311 |
+
@overload
|
| 312 |
+
def rjust(
|
| 313 |
+
a: T_co,
|
| 314 |
+
width: i_co,
|
| 315 |
+
fillchar: T_co = ...,
|
| 316 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
| 317 |
+
|
| 318 |
+
@overload
|
| 319 |
+
def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ...
|
| 320 |
+
@overload
|
| 321 |
+
def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ...
|
| 322 |
+
@overload
|
| 323 |
+
def lstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 324 |
+
@overload
|
| 325 |
+
def lstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 326 |
+
|
| 327 |
+
@overload
|
| 328 |
+
def rstrip(a: U_co, char: None | U_co = ...) -> NDArray[np.str_]: ...
|
| 329 |
+
@overload
|
| 330 |
+
def rstrip(a: S_co, char: None | S_co = ...) -> NDArray[np.bytes_]: ...
|
| 331 |
+
@overload
|
| 332 |
+
def rstrip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 333 |
+
@overload
|
| 334 |
+
def rstrip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 335 |
+
|
| 336 |
+
@overload
|
| 337 |
+
def strip(a: U_co, chars: None | U_co = ...) -> NDArray[np.str_]: ...
|
| 338 |
+
@overload
|
| 339 |
+
def strip(a: S_co, chars: None | S_co = ...) -> NDArray[np.bytes_]: ...
|
| 340 |
+
@overload
|
| 341 |
+
def strip(a: _StringDTypeSupportsArray, chars: None | _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
|
| 342 |
+
@overload
|
| 343 |
+
def strip(a: T_co, chars: None | T_co = ...) -> _StringDTypeOrUnicodeArray: ...
|
| 344 |
+
|
| 345 |
+
@overload
|
| 346 |
+
def zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ...
|
| 347 |
+
@overload
|
| 348 |
+
def zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ...
|
| 349 |
+
@overload
|
| 350 |
+
def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...
|
| 351 |
+
@overload
|
| 352 |
+
def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...
|
| 353 |
+
|
| 354 |
+
@overload
|
| 355 |
+
def upper(a: U_co) -> NDArray[np.str_]: ...
|
| 356 |
+
@overload
|
| 357 |
+
def upper(a: S_co) -> NDArray[np.bytes_]: ...
|
| 358 |
+
@overload
|
| 359 |
+
def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 360 |
+
@overload
|
| 361 |
+
def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 362 |
+
|
| 363 |
+
@overload
|
| 364 |
+
def lower(a: U_co) -> NDArray[np.str_]: ...
|
| 365 |
+
@overload
|
| 366 |
+
def lower(a: S_co) -> NDArray[np.bytes_]: ...
|
| 367 |
+
@overload
|
| 368 |
+
def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 369 |
+
@overload
|
| 370 |
+
def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 371 |
+
|
| 372 |
+
@overload
|
| 373 |
+
def swapcase(a: U_co) -> NDArray[np.str_]: ...
|
| 374 |
+
@overload
|
| 375 |
+
def swapcase(a: S_co) -> NDArray[np.bytes_]: ...
|
| 376 |
+
@overload
|
| 377 |
+
def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 378 |
+
@overload
|
| 379 |
+
def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 380 |
+
|
| 381 |
+
@overload
|
| 382 |
+
def capitalize(a: U_co) -> NDArray[np.str_]: ...
|
| 383 |
+
@overload
|
| 384 |
+
def capitalize(a: S_co) -> NDArray[np.bytes_]: ...
|
| 385 |
+
@overload
|
| 386 |
+
def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 387 |
+
@overload
|
| 388 |
+
def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 389 |
+
|
| 390 |
+
@overload
|
| 391 |
+
def title(a: U_co) -> NDArray[np.str_]: ...
|
| 392 |
+
@overload
|
| 393 |
+
def title(a: S_co) -> NDArray[np.bytes_]: ...
|
| 394 |
+
@overload
|
| 395 |
+
def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 396 |
+
@overload
|
| 397 |
+
def title(a: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 398 |
+
|
| 399 |
+
@overload
|
| 400 |
+
def replace(
|
| 401 |
+
a: U_co,
|
| 402 |
+
old: U_co,
|
| 403 |
+
new: U_co,
|
| 404 |
+
count: i_co = ...,
|
| 405 |
+
) -> NDArray[np.str_]: ...
|
| 406 |
+
@overload
|
| 407 |
+
def replace(
|
| 408 |
+
a: S_co,
|
| 409 |
+
old: S_co,
|
| 410 |
+
new: S_co,
|
| 411 |
+
count: i_co = ...,
|
| 412 |
+
) -> NDArray[np.bytes_]: ...
|
| 413 |
+
@overload
|
| 414 |
+
def replace(
|
| 415 |
+
a: _StringDTypeSupportsArray,
|
| 416 |
+
old: _StringDTypeSupportsArray,
|
| 417 |
+
new: _StringDTypeSupportsArray,
|
| 418 |
+
count: i_co = ...,
|
| 419 |
+
) -> _StringDTypeArray: ...
|
| 420 |
+
@overload
|
| 421 |
+
def replace(
|
| 422 |
+
a: T_co,
|
| 423 |
+
old: T_co,
|
| 424 |
+
new: T_co,
|
| 425 |
+
count: i_co = ...,
|
| 426 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
| 427 |
+
|
| 428 |
+
@overload
|
| 429 |
+
def join(sep: U_co, seq: U_co) -> NDArray[np.str_]: ...
|
| 430 |
+
@overload
|
| 431 |
+
def join(sep: S_co, seq: S_co) -> NDArray[np.bytes_]: ...
|
| 432 |
+
@overload
|
| 433 |
+
def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 434 |
+
@overload
|
| 435 |
+
def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 436 |
+
|
| 437 |
+
@overload
|
| 438 |
+
def partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ...
|
| 439 |
+
@overload
|
| 440 |
+
def partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ...
|
| 441 |
+
@overload
|
| 442 |
+
def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 443 |
+
@overload
|
| 444 |
+
def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 445 |
+
|
| 446 |
+
@overload
|
| 447 |
+
def rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ...
|
| 448 |
+
@overload
|
| 449 |
+
def rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ...
|
| 450 |
+
@overload
|
| 451 |
+
def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
|
| 452 |
+
@overload
|
| 453 |
+
def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
|
| 454 |
+
|
| 455 |
+
@overload
|
| 456 |
+
def translate(
|
| 457 |
+
a: U_co,
|
| 458 |
+
table: str,
|
| 459 |
+
deletechars: None | str = ...,
|
| 460 |
+
) -> NDArray[np.str_]: ...
|
| 461 |
+
@overload
|
| 462 |
+
def translate(
|
| 463 |
+
a: S_co,
|
| 464 |
+
table: str,
|
| 465 |
+
deletechars: None | str = ...,
|
| 466 |
+
) -> NDArray[np.bytes_]: ...
|
| 467 |
+
@overload
|
| 468 |
+
def translate(
|
| 469 |
+
a: _StringDTypeSupportsArray,
|
| 470 |
+
table: str,
|
| 471 |
+
deletechars: None | str = ...,
|
| 472 |
+
) -> _StringDTypeArray: ...
|
| 473 |
+
@overload
|
| 474 |
+
def translate(
|
| 475 |
+
a: T_co,
|
| 476 |
+
table: str,
|
| 477 |
+
deletechars: None | str = ...,
|
| 478 |
+
) -> _StringDTypeOrUnicodeArray: ...
|
janus/lib/python3.10/site-packages/numpy/_core/tests/_locales.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Provide class for testing in French locale
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import sys
|
| 5 |
+
import locale
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
__ALL__ = ['CommaDecimalPointLocale']
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def find_comma_decimal_point_locale():
|
| 13 |
+
"""See if platform has a decimal point as comma locale.
|
| 14 |
+
|
| 15 |
+
Find a locale that uses a comma instead of a period as the
|
| 16 |
+
decimal point.
|
| 17 |
+
|
| 18 |
+
Returns
|
| 19 |
+
-------
|
| 20 |
+
old_locale: str
|
| 21 |
+
Locale when the function was called.
|
| 22 |
+
new_locale: {str, None)
|
| 23 |
+
First French locale found, None if none found.
|
| 24 |
+
|
| 25 |
+
"""
|
| 26 |
+
if sys.platform == 'win32':
|
| 27 |
+
locales = ['FRENCH']
|
| 28 |
+
else:
|
| 29 |
+
locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
|
| 30 |
+
|
| 31 |
+
old_locale = locale.getlocale(locale.LC_NUMERIC)
|
| 32 |
+
new_locale = None
|
| 33 |
+
try:
|
| 34 |
+
for loc in locales:
|
| 35 |
+
try:
|
| 36 |
+
locale.setlocale(locale.LC_NUMERIC, loc)
|
| 37 |
+
new_locale = loc
|
| 38 |
+
break
|
| 39 |
+
except locale.Error:
|
| 40 |
+
pass
|
| 41 |
+
finally:
|
| 42 |
+
locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
|
| 43 |
+
return old_locale, new_locale
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class CommaDecimalPointLocale:
|
| 47 |
+
"""Sets LC_NUMERIC to a locale with comma as decimal point.
|
| 48 |
+
|
| 49 |
+
Classes derived from this class have setup and teardown methods that run
|
| 50 |
+
tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
|
| 51 |
+
the decimal point instead of periods ('.'). On exit the locale is restored
|
| 52 |
+
to the initial locale. It also serves as context manager with the same
|
| 53 |
+
effect. If no such locale is available, the test is skipped.
|
| 54 |
+
|
| 55 |
+
"""
|
| 56 |
+
(cur_locale, tst_locale) = find_comma_decimal_point_locale()
|
| 57 |
+
|
| 58 |
+
def setup_method(self):
|
| 59 |
+
if self.tst_locale is None:
|
| 60 |
+
pytest.skip("No French locale available")
|
| 61 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
| 62 |
+
|
| 63 |
+
def teardown_method(self):
|
| 64 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
| 65 |
+
|
| 66 |
+
def __enter__(self):
|
| 67 |
+
if self.tst_locale is None:
|
| 68 |
+
pytest.skip("No French locale available")
|
| 69 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
|
| 70 |
+
|
| 71 |
+
def __exit__(self, type, value, traceback):
|
| 72 |
+
locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test__exceptions.py
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import pickle
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy.exceptions import AxisError
|
| 10 |
+
|
| 11 |
+
_ArrayMemoryError = np._core._exceptions._ArrayMemoryError
|
| 12 |
+
_UFuncNoLoopError = np._core._exceptions._UFuncNoLoopError
|
| 13 |
+
|
| 14 |
+
class TestArrayMemoryError:
|
| 15 |
+
def test_pickling(self):
|
| 16 |
+
""" Test that _ArrayMemoryError can be pickled """
|
| 17 |
+
error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
|
| 18 |
+
res = pickle.loads(pickle.dumps(error))
|
| 19 |
+
assert res._total_size == error._total_size
|
| 20 |
+
|
| 21 |
+
def test_str(self):
|
| 22 |
+
e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
|
| 23 |
+
str(e) # not crashing is enough
|
| 24 |
+
|
| 25 |
+
# testing these properties is easier than testing the full string repr
|
| 26 |
+
def test__size_to_string(self):
|
| 27 |
+
""" Test e._size_to_string """
|
| 28 |
+
f = _ArrayMemoryError._size_to_string
|
| 29 |
+
Ki = 1024
|
| 30 |
+
assert f(0) == '0 bytes'
|
| 31 |
+
assert f(1) == '1 bytes'
|
| 32 |
+
assert f(1023) == '1023 bytes'
|
| 33 |
+
assert f(Ki) == '1.00 KiB'
|
| 34 |
+
assert f(Ki+1) == '1.00 KiB'
|
| 35 |
+
assert f(10*Ki) == '10.0 KiB'
|
| 36 |
+
assert f(int(999.4*Ki)) == '999. KiB'
|
| 37 |
+
assert f(int(1023.4*Ki)) == '1023. KiB'
|
| 38 |
+
assert f(int(1023.5*Ki)) == '1.00 MiB'
|
| 39 |
+
assert f(Ki*Ki) == '1.00 MiB'
|
| 40 |
+
|
| 41 |
+
# 1023.9999 Mib should round to 1 GiB
|
| 42 |
+
assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
|
| 43 |
+
assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
|
| 44 |
+
# larger than sys.maxsize, adding larger prefixes isn't going to help
|
| 45 |
+
# anyway.
|
| 46 |
+
assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
|
| 47 |
+
|
| 48 |
+
def test__total_size(self):
|
| 49 |
+
""" Test e._total_size """
|
| 50 |
+
e = _ArrayMemoryError((1,), np.dtype(np.uint8))
|
| 51 |
+
assert e._total_size == 1
|
| 52 |
+
|
| 53 |
+
e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
|
| 54 |
+
assert e._total_size == 1024
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class TestUFuncNoLoopError:
|
| 58 |
+
def test_pickling(self):
|
| 59 |
+
""" Test that _UFuncNoLoopError can be pickled """
|
| 60 |
+
assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@pytest.mark.parametrize("args", [
|
| 64 |
+
(2, 1, None),
|
| 65 |
+
(2, 1, "test_prefix"),
|
| 66 |
+
("test message",),
|
| 67 |
+
])
|
| 68 |
+
class TestAxisError:
|
| 69 |
+
def test_attr(self, args):
|
| 70 |
+
"""Validate attribute types."""
|
| 71 |
+
exc = AxisError(*args)
|
| 72 |
+
if len(args) == 1:
|
| 73 |
+
assert exc.axis is None
|
| 74 |
+
assert exc.ndim is None
|
| 75 |
+
else:
|
| 76 |
+
axis, ndim, *_ = args
|
| 77 |
+
assert exc.axis == axis
|
| 78 |
+
assert exc.ndim == ndim
|
| 79 |
+
|
| 80 |
+
def test_pickling(self, args):
|
| 81 |
+
"""Test that `AxisError` can be pickled."""
|
| 82 |
+
exc = AxisError(*args)
|
| 83 |
+
exc2 = pickle.loads(pickle.dumps(exc))
|
| 84 |
+
|
| 85 |
+
assert type(exc) is type(exc2)
|
| 86 |
+
for name in ("axis", "ndim", "args"):
|
| 87 |
+
attr1 = getattr(exc, name)
|
| 88 |
+
attr2 = getattr(exc2, name)
|
| 89 |
+
assert attr1 == attr2, name
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_api.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import numpy._core.umath as ncu
|
| 5 |
+
from numpy._core._rational_tests import rational
|
| 6 |
+
import pytest
|
| 7 |
+
from numpy.testing import (
|
| 8 |
+
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
|
| 9 |
+
HAS_REFCOUNT
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_array_array():
|
| 14 |
+
tobj = type(object)
|
| 15 |
+
ones11 = np.ones((1, 1), np.float64)
|
| 16 |
+
tndarray = type(ones11)
|
| 17 |
+
# Test is_ndarray
|
| 18 |
+
assert_equal(np.array(ones11, dtype=np.float64), ones11)
|
| 19 |
+
if HAS_REFCOUNT:
|
| 20 |
+
old_refcount = sys.getrefcount(tndarray)
|
| 21 |
+
np.array(ones11)
|
| 22 |
+
assert_equal(old_refcount, sys.getrefcount(tndarray))
|
| 23 |
+
|
| 24 |
+
# test None
|
| 25 |
+
assert_equal(np.array(None, dtype=np.float64),
|
| 26 |
+
np.array(np.nan, dtype=np.float64))
|
| 27 |
+
if HAS_REFCOUNT:
|
| 28 |
+
old_refcount = sys.getrefcount(tobj)
|
| 29 |
+
np.array(None, dtype=np.float64)
|
| 30 |
+
assert_equal(old_refcount, sys.getrefcount(tobj))
|
| 31 |
+
|
| 32 |
+
# test scalar
|
| 33 |
+
assert_equal(np.array(1.0, dtype=np.float64),
|
| 34 |
+
np.ones((), dtype=np.float64))
|
| 35 |
+
if HAS_REFCOUNT:
|
| 36 |
+
old_refcount = sys.getrefcount(np.float64)
|
| 37 |
+
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
|
| 38 |
+
assert_equal(old_refcount, sys.getrefcount(np.float64))
|
| 39 |
+
|
| 40 |
+
# test string
|
| 41 |
+
S2 = np.dtype((bytes, 2))
|
| 42 |
+
S3 = np.dtype((bytes, 3))
|
| 43 |
+
S5 = np.dtype((bytes, 5))
|
| 44 |
+
assert_equal(np.array(b"1.0", dtype=np.float64),
|
| 45 |
+
np.ones((), dtype=np.float64))
|
| 46 |
+
assert_equal(np.array(b"1.0").dtype, S3)
|
| 47 |
+
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
|
| 48 |
+
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
|
| 49 |
+
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
|
| 50 |
+
|
| 51 |
+
# test string
|
| 52 |
+
U2 = np.dtype((str, 2))
|
| 53 |
+
U3 = np.dtype((str, 3))
|
| 54 |
+
U5 = np.dtype((str, 5))
|
| 55 |
+
assert_equal(np.array("1.0", dtype=np.float64),
|
| 56 |
+
np.ones((), dtype=np.float64))
|
| 57 |
+
assert_equal(np.array("1.0").dtype, U3)
|
| 58 |
+
assert_equal(np.array("1.0", dtype=str).dtype, U3)
|
| 59 |
+
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
|
| 60 |
+
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
|
| 61 |
+
|
| 62 |
+
builtins = getattr(__builtins__, '__dict__', __builtins__)
|
| 63 |
+
assert_(hasattr(builtins, 'get'))
|
| 64 |
+
|
| 65 |
+
# test memoryview
|
| 66 |
+
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
|
| 67 |
+
assert_equal(dat, [49.0, 46.0, 48.0])
|
| 68 |
+
assert_(dat.dtype.type is np.float64)
|
| 69 |
+
|
| 70 |
+
dat = np.array(memoryview(b'1.0'))
|
| 71 |
+
assert_equal(dat, [49, 46, 48])
|
| 72 |
+
assert_(dat.dtype.type is np.uint8)
|
| 73 |
+
|
| 74 |
+
# test array interface
|
| 75 |
+
a = np.array(100.0, dtype=np.float64)
|
| 76 |
+
o = type("o", (object,),
|
| 77 |
+
dict(__array_interface__=a.__array_interface__))
|
| 78 |
+
assert_equal(np.array(o, dtype=np.float64), a)
|
| 79 |
+
|
| 80 |
+
# test array_struct interface
|
| 81 |
+
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
|
| 82 |
+
dtype=[('f0', int), ('f1', float), ('f2', str)])
|
| 83 |
+
o = type("o", (object,),
|
| 84 |
+
dict(__array_struct__=a.__array_struct__))
|
| 85 |
+
## wasn't what I expected... is np.array(o) supposed to equal a ?
|
| 86 |
+
## instead we get a array([...], dtype=">V18")
|
| 87 |
+
assert_equal(bytes(np.array(o).data), bytes(a.data))
|
| 88 |
+
|
| 89 |
+
# test array
|
| 90 |
+
def custom__array__(self, dtype=None, copy=None):
|
| 91 |
+
return np.array(100.0, dtype=dtype, copy=copy)
|
| 92 |
+
|
| 93 |
+
o = type("o", (object,), dict(__array__=custom__array__))()
|
| 94 |
+
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
|
| 95 |
+
|
| 96 |
+
# test recursion
|
| 97 |
+
nested = 1.5
|
| 98 |
+
for i in range(ncu.MAXDIMS):
|
| 99 |
+
nested = [nested]
|
| 100 |
+
|
| 101 |
+
# no error
|
| 102 |
+
np.array(nested)
|
| 103 |
+
|
| 104 |
+
# Exceeds recursion limit
|
| 105 |
+
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
|
| 106 |
+
|
| 107 |
+
# Try with lists...
|
| 108 |
+
# float32
|
| 109 |
+
assert_equal(np.array([None] * 10, dtype=np.float32),
|
| 110 |
+
np.full((10,), np.nan, dtype=np.float32))
|
| 111 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float32),
|
| 112 |
+
np.full((10, 1), np.nan, dtype=np.float32))
|
| 113 |
+
assert_equal(np.array([[None] * 10], dtype=np.float32),
|
| 114 |
+
np.full((1, 10), np.nan, dtype=np.float32))
|
| 115 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float32),
|
| 116 |
+
np.full((10, 10), np.nan, dtype=np.float32))
|
| 117 |
+
# float64
|
| 118 |
+
assert_equal(np.array([None] * 10, dtype=np.float64),
|
| 119 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 120 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float64),
|
| 121 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 122 |
+
assert_equal(np.array([[None] * 10], dtype=np.float64),
|
| 123 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 124 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
|
| 125 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 126 |
+
|
| 127 |
+
assert_equal(np.array([1.0] * 10, dtype=np.float64),
|
| 128 |
+
np.ones((10,), dtype=np.float64))
|
| 129 |
+
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
|
| 130 |
+
np.ones((10, 1), dtype=np.float64))
|
| 131 |
+
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
|
| 132 |
+
np.ones((1, 10), dtype=np.float64))
|
| 133 |
+
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
|
| 134 |
+
np.ones((10, 10), dtype=np.float64))
|
| 135 |
+
|
| 136 |
+
# Try with tuples
|
| 137 |
+
assert_equal(np.array((None,) * 10, dtype=np.float64),
|
| 138 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 139 |
+
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
|
| 140 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 141 |
+
assert_equal(np.array([(None,) * 10], dtype=np.float64),
|
| 142 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 143 |
+
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
|
| 144 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 145 |
+
|
| 146 |
+
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
|
| 147 |
+
np.ones((10,), dtype=np.float64))
|
| 148 |
+
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
|
| 149 |
+
np.ones((10, 1), dtype=np.float64))
|
| 150 |
+
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
|
| 151 |
+
np.ones((1, 10), dtype=np.float64))
|
| 152 |
+
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
|
| 153 |
+
np.ones((10, 10), dtype=np.float64))
|
| 154 |
+
|
| 155 |
+
@pytest.mark.parametrize("array", [True, False])
|
| 156 |
+
def test_array_impossible_casts(array):
|
| 157 |
+
# All builtin types can be forcibly cast, at least theoretically,
|
| 158 |
+
# but user dtypes cannot necessarily.
|
| 159 |
+
rt = rational(1, 2)
|
| 160 |
+
if array:
|
| 161 |
+
rt = np.array(rt)
|
| 162 |
+
with assert_raises(TypeError):
|
| 163 |
+
np.array(rt, dtype="M8")
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def test_array_astype():
|
| 167 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 168 |
+
# Default behavior: allows unsafe casts, keeps memory layout,
|
| 169 |
+
# always copies.
|
| 170 |
+
b = a.astype('i4')
|
| 171 |
+
assert_equal(a, b)
|
| 172 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 173 |
+
assert_equal(a.strides, b.strides)
|
| 174 |
+
b = a.T.astype('i4')
|
| 175 |
+
assert_equal(a.T, b)
|
| 176 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 177 |
+
assert_equal(a.T.strides, b.strides)
|
| 178 |
+
b = a.astype('f4')
|
| 179 |
+
assert_equal(a, b)
|
| 180 |
+
assert_(not (a is b))
|
| 181 |
+
|
| 182 |
+
# copy=False parameter skips a copy
|
| 183 |
+
b = a.astype('f4', copy=False)
|
| 184 |
+
assert_(a is b)
|
| 185 |
+
|
| 186 |
+
# order parameter allows overriding of the memory layout,
|
| 187 |
+
# forcing a copy if the layout is wrong
|
| 188 |
+
b = a.astype('f4', order='F', copy=False)
|
| 189 |
+
assert_equal(a, b)
|
| 190 |
+
assert_(not (a is b))
|
| 191 |
+
assert_(b.flags.f_contiguous)
|
| 192 |
+
|
| 193 |
+
b = a.astype('f4', order='C', copy=False)
|
| 194 |
+
assert_equal(a, b)
|
| 195 |
+
assert_(a is b)
|
| 196 |
+
assert_(b.flags.c_contiguous)
|
| 197 |
+
|
| 198 |
+
# casting parameter allows catching bad casts
|
| 199 |
+
b = a.astype('c8', casting='safe')
|
| 200 |
+
assert_equal(a, b)
|
| 201 |
+
assert_equal(b.dtype, np.dtype('c8'))
|
| 202 |
+
|
| 203 |
+
assert_raises(TypeError, a.astype, 'i4', casting='safe')
|
| 204 |
+
|
| 205 |
+
# subok=False passes through a non-subclassed array
|
| 206 |
+
b = a.astype('f4', subok=0, copy=False)
|
| 207 |
+
assert_(a is b)
|
| 208 |
+
|
| 209 |
+
class MyNDArray(np.ndarray):
|
| 210 |
+
pass
|
| 211 |
+
|
| 212 |
+
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
|
| 213 |
+
|
| 214 |
+
# subok=True passes through a subclass
|
| 215 |
+
b = a.astype('f4', subok=True, copy=False)
|
| 216 |
+
assert_(a is b)
|
| 217 |
+
|
| 218 |
+
# subok=True is default, and creates a subtype on a cast
|
| 219 |
+
b = a.astype('i4', copy=False)
|
| 220 |
+
assert_equal(a, b)
|
| 221 |
+
assert_equal(type(b), MyNDArray)
|
| 222 |
+
|
| 223 |
+
# subok=False never returns a subclass
|
| 224 |
+
b = a.astype('f4', subok=False, copy=False)
|
| 225 |
+
assert_equal(a, b)
|
| 226 |
+
assert_(not (a is b))
|
| 227 |
+
assert_(type(b) is not MyNDArray)
|
| 228 |
+
|
| 229 |
+
# Make sure converting from string object to fixed length string
|
| 230 |
+
# does not truncate.
|
| 231 |
+
a = np.array([b'a'*100], dtype='O')
|
| 232 |
+
b = a.astype('S')
|
| 233 |
+
assert_equal(a, b)
|
| 234 |
+
assert_equal(b.dtype, np.dtype('S100'))
|
| 235 |
+
a = np.array(['a'*100], dtype='O')
|
| 236 |
+
b = a.astype('U')
|
| 237 |
+
assert_equal(a, b)
|
| 238 |
+
assert_equal(b.dtype, np.dtype('U100'))
|
| 239 |
+
|
| 240 |
+
# Same test as above but for strings shorter than 64 characters
|
| 241 |
+
a = np.array([b'a'*10], dtype='O')
|
| 242 |
+
b = a.astype('S')
|
| 243 |
+
assert_equal(a, b)
|
| 244 |
+
assert_equal(b.dtype, np.dtype('S10'))
|
| 245 |
+
a = np.array(['a'*10], dtype='O')
|
| 246 |
+
b = a.astype('U')
|
| 247 |
+
assert_equal(a, b)
|
| 248 |
+
assert_equal(b.dtype, np.dtype('U10'))
|
| 249 |
+
|
| 250 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
|
| 251 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 252 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
|
| 253 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 254 |
+
|
| 255 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
|
| 256 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 257 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
|
| 258 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 259 |
+
|
| 260 |
+
a = np.array(123456789012345678901234567890, dtype='S')
|
| 261 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 262 |
+
a = np.array(123456789012345678901234567890, dtype='U')
|
| 263 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 264 |
+
|
| 265 |
+
a = np.array('a\u0140', dtype='U')
|
| 266 |
+
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
|
| 267 |
+
assert_(b.size == 2)
|
| 268 |
+
|
| 269 |
+
a = np.array([1000], dtype='i4')
|
| 270 |
+
assert_raises(TypeError, a.astype, 'S1', casting='safe')
|
| 271 |
+
|
| 272 |
+
a = np.array(1000, dtype='i4')
|
| 273 |
+
assert_raises(TypeError, a.astype, 'U1', casting='safe')
|
| 274 |
+
|
| 275 |
+
# gh-24023
|
| 276 |
+
assert_raises(TypeError, a.astype)
|
| 277 |
+
|
| 278 |
+
@pytest.mark.parametrize("dt", ["S", "U"])
|
| 279 |
+
def test_array_astype_to_string_discovery_empty(dt):
|
| 280 |
+
# See also gh-19085
|
| 281 |
+
arr = np.array([""], dtype=object)
|
| 282 |
+
# Note, the itemsize is the `0 -> 1` logic, which should change.
|
| 283 |
+
# The important part the test is rather that it does not error.
|
| 284 |
+
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
|
| 285 |
+
|
| 286 |
+
# check the same thing for `np.can_cast` (since it accepts arrays)
|
| 287 |
+
assert np.can_cast(arr, dt, casting="unsafe")
|
| 288 |
+
assert not np.can_cast(arr, dt, casting="same_kind")
|
| 289 |
+
# as well as for the object as a descriptor:
|
| 290 |
+
assert np.can_cast("O", dt, casting="unsafe")
|
| 291 |
+
|
| 292 |
+
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
|
| 293 |
+
def test_array_astype_to_void(dt):
|
| 294 |
+
dt = np.dtype(dt)
|
| 295 |
+
arr = np.array([], dtype=dt)
|
| 296 |
+
assert arr.astype("V").dtype.itemsize == dt.itemsize
|
| 297 |
+
|
| 298 |
+
def test_object_array_astype_to_void():
|
| 299 |
+
# This is different to `test_array_astype_to_void` as object arrays
|
| 300 |
+
# are inspected. The default void is "V8" (8 is the length of double)
|
| 301 |
+
arr = np.array([], dtype="O").astype("V")
|
| 302 |
+
assert arr.dtype == "V8"
|
| 303 |
+
|
| 304 |
+
@pytest.mark.parametrize("t",
|
| 305 |
+
np._core.sctypes['uint'] +
|
| 306 |
+
np._core.sctypes['int'] +
|
| 307 |
+
np._core.sctypes['float']
|
| 308 |
+
)
|
| 309 |
+
def test_array_astype_warning(t):
|
| 310 |
+
# test ComplexWarning when casting from complex to float or int
|
| 311 |
+
a = np.array(10, dtype=np.complex128)
|
| 312 |
+
assert_warns(np.exceptions.ComplexWarning, a.astype, t)
|
| 313 |
+
|
| 314 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
| 315 |
+
[(np.bytes_, np.bool),
|
| 316 |
+
(np.str_, np.bool),
|
| 317 |
+
(np.dtype("S10,S9"), np.dtype("?,?")),
|
| 318 |
+
# The following also checks unaligned unicode access:
|
| 319 |
+
(np.dtype("S7,U9"), np.dtype("?,?"))])
|
| 320 |
+
def test_string_to_boolean_cast(dtype, out_dtype):
|
| 321 |
+
# Only the last two (empty) strings are falsy (the `\0` is stripped):
|
| 322 |
+
arr = np.array(
|
| 323 |
+
["10", "10\0\0\0", "0\0\0", "0", "False", " ", "", "\0"],
|
| 324 |
+
dtype=dtype)
|
| 325 |
+
expected = np.array(
|
| 326 |
+
[True, True, True, True, True, True, False, False],
|
| 327 |
+
dtype=out_dtype)
|
| 328 |
+
assert_array_equal(arr.astype(out_dtype), expected)
|
| 329 |
+
# As it's similar, check that nonzero behaves the same (structs are
|
| 330 |
+
# nonzero if all entries are)
|
| 331 |
+
assert_array_equal(np.nonzero(arr), np.nonzero(expected))
|
| 332 |
+
|
| 333 |
+
@pytest.mark.parametrize("str_type", [str, bytes, np.str_])
|
| 334 |
+
@pytest.mark.parametrize("scalar_type",
|
| 335 |
+
[np.complex64, np.complex128, np.clongdouble])
|
| 336 |
+
def test_string_to_complex_cast(str_type, scalar_type):
|
| 337 |
+
value = scalar_type(b"1+3j")
|
| 338 |
+
assert scalar_type(value) == 1+3j
|
| 339 |
+
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
|
| 340 |
+
assert np.array(value).astype(scalar_type)[()] == 1+3j
|
| 341 |
+
arr = np.zeros(1, dtype=scalar_type)
|
| 342 |
+
arr[0] = value
|
| 343 |
+
assert arr[0] == 1+3j
|
| 344 |
+
|
| 345 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
|
| 346 |
+
def test_none_to_nan_cast(dtype):
|
| 347 |
+
# Note that at the time of writing this test, the scalar constructors
|
| 348 |
+
# reject None
|
| 349 |
+
arr = np.zeros(1, dtype=dtype)
|
| 350 |
+
arr[0] = None
|
| 351 |
+
assert np.isnan(arr)[0]
|
| 352 |
+
assert np.isnan(np.array(None, dtype=dtype))[()]
|
| 353 |
+
assert np.isnan(np.array([None], dtype=dtype))[0]
|
| 354 |
+
assert np.isnan(np.array(None).astype(dtype))[()]
|
| 355 |
+
|
| 356 |
+
def test_copyto_fromscalar():
|
| 357 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 358 |
+
|
| 359 |
+
# Simple copy
|
| 360 |
+
np.copyto(a, 1.5)
|
| 361 |
+
assert_equal(a, 1.5)
|
| 362 |
+
np.copyto(a.T, 2.5)
|
| 363 |
+
assert_equal(a, 2.5)
|
| 364 |
+
|
| 365 |
+
# Where-masked copy
|
| 366 |
+
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
|
| 367 |
+
np.copyto(a, 3.5, where=mask)
|
| 368 |
+
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
|
| 369 |
+
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
|
| 370 |
+
np.copyto(a.T, 4.5, where=mask)
|
| 371 |
+
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
|
| 372 |
+
|
| 373 |
+
def test_copyto():
|
| 374 |
+
a = np.arange(6, dtype='i4').reshape(2, 3)
|
| 375 |
+
|
| 376 |
+
# Simple copy
|
| 377 |
+
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
|
| 378 |
+
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
|
| 379 |
+
|
| 380 |
+
# Overlapping copy should work
|
| 381 |
+
np.copyto(a[:, :2], a[::-1, 1::-1])
|
| 382 |
+
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
|
| 383 |
+
|
| 384 |
+
# Defaults to 'same_kind' casting
|
| 385 |
+
assert_raises(TypeError, np.copyto, a, 1.5)
|
| 386 |
+
|
| 387 |
+
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
|
| 388 |
+
np.copyto(a, 1.5, casting='unsafe')
|
| 389 |
+
assert_equal(a, 1)
|
| 390 |
+
|
| 391 |
+
# Copying with a mask
|
| 392 |
+
np.copyto(a, 3, where=[True, False, True])
|
| 393 |
+
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
|
| 394 |
+
|
| 395 |
+
# Casting rule still applies with a mask
|
| 396 |
+
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
|
| 397 |
+
|
| 398 |
+
# Lists of integer 0's and 1's is ok too
|
| 399 |
+
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
|
| 400 |
+
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
|
| 401 |
+
|
| 402 |
+
# Overlapping copy with mask should work
|
| 403 |
+
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
|
| 404 |
+
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
|
| 405 |
+
|
| 406 |
+
# 'dst' must be an array
|
| 407 |
+
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def test_copyto_cast_safety():
|
| 411 |
+
with pytest.raises(TypeError):
|
| 412 |
+
np.copyto(np.arange(3), 3., casting="safe")
|
| 413 |
+
|
| 414 |
+
# Can put integer and float scalars safely (and equiv):
|
| 415 |
+
np.copyto(np.arange(3), 3, casting="equiv")
|
| 416 |
+
np.copyto(np.arange(3.), 3., casting="equiv")
|
| 417 |
+
# And also with less precision safely:
|
| 418 |
+
np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe")
|
| 419 |
+
np.copyto(np.arange(3., dtype="float32"), 3., casting="safe")
|
| 420 |
+
|
| 421 |
+
# But not equiv:
|
| 422 |
+
with pytest.raises(TypeError):
|
| 423 |
+
np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv")
|
| 424 |
+
|
| 425 |
+
with pytest.raises(TypeError):
|
| 426 |
+
np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv")
|
| 427 |
+
|
| 428 |
+
# As a special thing, object is equiv currently:
|
| 429 |
+
np.copyto(np.arange(3, dtype=object), 3, casting="equiv")
|
| 430 |
+
|
| 431 |
+
# The following raises an overflow error/gives a warning but not
|
| 432 |
+
# type error (due to casting), though:
|
| 433 |
+
with pytest.raises(OverflowError):
|
| 434 |
+
np.copyto(np.arange(3), 2**80, casting="safe")
|
| 435 |
+
|
| 436 |
+
with pytest.warns(RuntimeWarning):
|
| 437 |
+
np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe")
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def test_copyto_permut():
|
| 441 |
+
# test explicit overflow case
|
| 442 |
+
pad = 500
|
| 443 |
+
l = [True] * pad + [True, True, True, True]
|
| 444 |
+
r = np.zeros(len(l)-pad)
|
| 445 |
+
d = np.ones(len(l)-pad)
|
| 446 |
+
mask = np.array(l)[pad:]
|
| 447 |
+
np.copyto(r, d, where=mask[::-1])
|
| 448 |
+
|
| 449 |
+
# test all permutation of possible masks, 9 should be sufficient for
|
| 450 |
+
# current 4 byte unrolled code
|
| 451 |
+
power = 9
|
| 452 |
+
d = np.ones(power)
|
| 453 |
+
for i in range(2**power):
|
| 454 |
+
r = np.zeros(power)
|
| 455 |
+
l = [(i & x) != 0 for x in range(power)]
|
| 456 |
+
mask = np.array(l)
|
| 457 |
+
np.copyto(r, d, where=mask)
|
| 458 |
+
assert_array_equal(r == 1, l)
|
| 459 |
+
assert_equal(r.sum(), sum(l))
|
| 460 |
+
|
| 461 |
+
r = np.zeros(power)
|
| 462 |
+
np.copyto(r, d, where=mask[::-1])
|
| 463 |
+
assert_array_equal(r == 1, l[::-1])
|
| 464 |
+
assert_equal(r.sum(), sum(l))
|
| 465 |
+
|
| 466 |
+
r = np.zeros(power)
|
| 467 |
+
np.copyto(r[::2], d[::2], where=mask[::2])
|
| 468 |
+
assert_array_equal(r[::2] == 1, l[::2])
|
| 469 |
+
assert_equal(r[::2].sum(), sum(l[::2]))
|
| 470 |
+
|
| 471 |
+
r = np.zeros(power)
|
| 472 |
+
np.copyto(r[::2], d[::2], where=mask[::-2])
|
| 473 |
+
assert_array_equal(r[::2] == 1, l[::-2])
|
| 474 |
+
assert_equal(r[::2].sum(), sum(l[::-2]))
|
| 475 |
+
|
| 476 |
+
for c in [0xFF, 0x7F, 0x02, 0x10]:
|
| 477 |
+
r = np.zeros(power)
|
| 478 |
+
mask = np.array(l)
|
| 479 |
+
imask = np.array(l).view(np.uint8)
|
| 480 |
+
imask[mask != 0] = c
|
| 481 |
+
np.copyto(r, d, where=mask)
|
| 482 |
+
assert_array_equal(r == 1, l)
|
| 483 |
+
assert_equal(r.sum(), sum(l))
|
| 484 |
+
|
| 485 |
+
r = np.zeros(power)
|
| 486 |
+
np.copyto(r, d, where=True)
|
| 487 |
+
assert_equal(r.sum(), r.size)
|
| 488 |
+
r = np.ones(power)
|
| 489 |
+
d = np.zeros(power)
|
| 490 |
+
np.copyto(r, d, where=False)
|
| 491 |
+
assert_equal(r.sum(), r.size)
|
| 492 |
+
|
| 493 |
+
def test_copy_order():
|
| 494 |
+
a = np.arange(24).reshape(2, 1, 3, 4)
|
| 495 |
+
b = a.copy(order='F')
|
| 496 |
+
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
|
| 497 |
+
|
| 498 |
+
def check_copy_result(x, y, ccontig, fcontig, strides=False):
|
| 499 |
+
assert_(not (x is y))
|
| 500 |
+
assert_equal(x, y)
|
| 501 |
+
assert_equal(res.flags.c_contiguous, ccontig)
|
| 502 |
+
assert_equal(res.flags.f_contiguous, fcontig)
|
| 503 |
+
|
| 504 |
+
# Validate the initial state of a, b, and c
|
| 505 |
+
assert_(a.flags.c_contiguous)
|
| 506 |
+
assert_(not a.flags.f_contiguous)
|
| 507 |
+
assert_(not b.flags.c_contiguous)
|
| 508 |
+
assert_(b.flags.f_contiguous)
|
| 509 |
+
assert_(not c.flags.c_contiguous)
|
| 510 |
+
assert_(not c.flags.f_contiguous)
|
| 511 |
+
|
| 512 |
+
# Copy with order='C'
|
| 513 |
+
res = a.copy(order='C')
|
| 514 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 515 |
+
res = b.copy(order='C')
|
| 516 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 517 |
+
res = c.copy(order='C')
|
| 518 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 519 |
+
res = np.copy(a, order='C')
|
| 520 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 521 |
+
res = np.copy(b, order='C')
|
| 522 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 523 |
+
res = np.copy(c, order='C')
|
| 524 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 525 |
+
|
| 526 |
+
# Copy with order='F'
|
| 527 |
+
res = a.copy(order='F')
|
| 528 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 529 |
+
res = b.copy(order='F')
|
| 530 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 531 |
+
res = c.copy(order='F')
|
| 532 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 533 |
+
res = np.copy(a, order='F')
|
| 534 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 535 |
+
res = np.copy(b, order='F')
|
| 536 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 537 |
+
res = np.copy(c, order='F')
|
| 538 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 539 |
+
|
| 540 |
+
# Copy with order='K'
|
| 541 |
+
res = a.copy(order='K')
|
| 542 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 543 |
+
res = b.copy(order='K')
|
| 544 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 545 |
+
res = c.copy(order='K')
|
| 546 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 547 |
+
res = np.copy(a, order='K')
|
| 548 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 549 |
+
res = np.copy(b, order='K')
|
| 550 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 551 |
+
res = np.copy(c, order='K')
|
| 552 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 553 |
+
|
| 554 |
+
def test_contiguous_flags():
|
| 555 |
+
a = np.ones((4, 4, 1))[::2,:,:]
|
| 556 |
+
a.strides = a.strides[:2] + (-123,)
|
| 557 |
+
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
|
| 558 |
+
|
| 559 |
+
def check_contig(a, ccontig, fcontig):
|
| 560 |
+
assert_(a.flags.c_contiguous == ccontig)
|
| 561 |
+
assert_(a.flags.f_contiguous == fcontig)
|
| 562 |
+
|
| 563 |
+
# Check if new arrays are correct:
|
| 564 |
+
check_contig(a, False, False)
|
| 565 |
+
check_contig(b, False, False)
|
| 566 |
+
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
|
| 567 |
+
check_contig(np.array([[[1], [2]]], order='F'), True, True)
|
| 568 |
+
check_contig(np.empty((2, 2)), True, False)
|
| 569 |
+
check_contig(np.empty((2, 2), order='F'), False, True)
|
| 570 |
+
|
| 571 |
+
# Check that np.array creates correct contiguous flags:
|
| 572 |
+
check_contig(np.array(a, copy=None), False, False)
|
| 573 |
+
check_contig(np.array(a, copy=None, order='C'), True, False)
|
| 574 |
+
check_contig(np.array(a, ndmin=4, copy=None, order='F'), False, True)
|
| 575 |
+
|
| 576 |
+
# Check slicing update of flags and :
|
| 577 |
+
check_contig(a[0], True, True)
|
| 578 |
+
check_contig(a[None, ::4, ..., None], True, True)
|
| 579 |
+
check_contig(b[0, 0, ...], False, True)
|
| 580 |
+
check_contig(b[:, :, 0:0, :, :], True, True)
|
| 581 |
+
|
| 582 |
+
# Test ravel and squeeze.
|
| 583 |
+
check_contig(a.ravel(), True, True)
|
| 584 |
+
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
|
| 585 |
+
|
| 586 |
+
def test_broadcast_arrays():
|
| 587 |
+
# Test user defined dtypes
|
| 588 |
+
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
|
| 589 |
+
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
|
| 590 |
+
result = np.broadcast_arrays(a, b)
|
| 591 |
+
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
|
| 592 |
+
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
|
| 593 |
+
|
| 594 |
+
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
|
| 595 |
+
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
|
| 596 |
+
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
|
| 597 |
+
def test_full_from_list(shape, fill_value, expected_output):
|
| 598 |
+
output = np.full(shape, fill_value)
|
| 599 |
+
assert_equal(output, expected_output)
|
| 600 |
+
|
| 601 |
+
def test_astype_copyflag():
|
| 602 |
+
# test the various copyflag options
|
| 603 |
+
arr = np.arange(10, dtype=np.intp)
|
| 604 |
+
|
| 605 |
+
res_true = arr.astype(np.intp, copy=True)
|
| 606 |
+
assert not np.shares_memory(arr, res_true)
|
| 607 |
+
|
| 608 |
+
res_false = arr.astype(np.intp, copy=False)
|
| 609 |
+
assert np.shares_memory(arr, res_false)
|
| 610 |
+
|
| 611 |
+
res_false_float = arr.astype(np.float64, copy=False)
|
| 612 |
+
assert not np.shares_memory(arr, res_false_float)
|
| 613 |
+
|
| 614 |
+
# _CopyMode enum isn't allowed
|
| 615 |
+
assert_raises(ValueError, arr.astype, np.float64,
|
| 616 |
+
copy=np._CopyMode.NEVER)
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_array_api_info.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
info = np.__array_namespace_info__()
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_capabilities():
|
| 8 |
+
caps = info.capabilities()
|
| 9 |
+
assert caps["boolean indexing"] is True
|
| 10 |
+
assert caps["data-dependent shapes"] is True
|
| 11 |
+
|
| 12 |
+
# This will be added in the 2024.12 release of the array API standard.
|
| 13 |
+
|
| 14 |
+
# assert caps["max rank"] == 64
|
| 15 |
+
# np.zeros((1,)*64)
|
| 16 |
+
# with pytest.raises(ValueError):
|
| 17 |
+
# np.zeros((1,)*65)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_default_device():
|
| 21 |
+
assert info.default_device() == "cpu" == np.asarray(0).device
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_default_dtypes():
|
| 25 |
+
dtypes = info.default_dtypes()
|
| 26 |
+
assert dtypes["real floating"] == np.float64 == np.asarray(0.0).dtype
|
| 27 |
+
assert dtypes["complex floating"] == np.complex128 == \
|
| 28 |
+
np.asarray(0.0j).dtype
|
| 29 |
+
assert dtypes["integral"] == np.intp == np.asarray(0).dtype
|
| 30 |
+
assert dtypes["indexing"] == np.intp == np.argmax(np.zeros(10)).dtype
|
| 31 |
+
|
| 32 |
+
with pytest.raises(ValueError, match="Device not understood"):
|
| 33 |
+
info.default_dtypes(device="gpu")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def test_dtypes_all():
|
| 37 |
+
dtypes = info.dtypes()
|
| 38 |
+
assert dtypes == {
|
| 39 |
+
"bool": np.bool_,
|
| 40 |
+
"int8": np.int8,
|
| 41 |
+
"int16": np.int16,
|
| 42 |
+
"int32": np.int32,
|
| 43 |
+
"int64": np.int64,
|
| 44 |
+
"uint8": np.uint8,
|
| 45 |
+
"uint16": np.uint16,
|
| 46 |
+
"uint32": np.uint32,
|
| 47 |
+
"uint64": np.uint64,
|
| 48 |
+
"float32": np.float32,
|
| 49 |
+
"float64": np.float64,
|
| 50 |
+
"complex64": np.complex64,
|
| 51 |
+
"complex128": np.complex128,
|
| 52 |
+
}
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
dtype_categories = {
|
| 56 |
+
"bool": {"bool": np.bool_},
|
| 57 |
+
"signed integer": {
|
| 58 |
+
"int8": np.int8,
|
| 59 |
+
"int16": np.int16,
|
| 60 |
+
"int32": np.int32,
|
| 61 |
+
"int64": np.int64,
|
| 62 |
+
},
|
| 63 |
+
"unsigned integer": {
|
| 64 |
+
"uint8": np.uint8,
|
| 65 |
+
"uint16": np.uint16,
|
| 66 |
+
"uint32": np.uint32,
|
| 67 |
+
"uint64": np.uint64,
|
| 68 |
+
},
|
| 69 |
+
"integral": ("signed integer", "unsigned integer"),
|
| 70 |
+
"real floating": {"float32": np.float32, "float64": np.float64},
|
| 71 |
+
"complex floating": {"complex64": np.complex64, "complex128":
|
| 72 |
+
np.complex128},
|
| 73 |
+
"numeric": ("integral", "real floating", "complex floating"),
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
@pytest.mark.parametrize("kind", dtype_categories)
|
| 78 |
+
def test_dtypes_kind(kind):
|
| 79 |
+
expected = dtype_categories[kind]
|
| 80 |
+
if isinstance(expected, tuple):
|
| 81 |
+
assert info.dtypes(kind=kind) == info.dtypes(kind=expected)
|
| 82 |
+
else:
|
| 83 |
+
assert info.dtypes(kind=kind) == expected
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def test_dtypes_tuple():
|
| 87 |
+
dtypes = info.dtypes(kind=("bool", "integral"))
|
| 88 |
+
assert dtypes == {
|
| 89 |
+
"bool": np.bool_,
|
| 90 |
+
"int8": np.int8,
|
| 91 |
+
"int16": np.int16,
|
| 92 |
+
"int32": np.int32,
|
| 93 |
+
"int64": np.int64,
|
| 94 |
+
"uint8": np.uint8,
|
| 95 |
+
"uint16": np.uint16,
|
| 96 |
+
"uint32": np.uint32,
|
| 97 |
+
"uint64": np.uint64,
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def test_dtypes_invalid_kind():
|
| 102 |
+
with pytest.raises(ValueError, match="unsupported kind"):
|
| 103 |
+
info.dtypes(kind="invalid")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def test_dtypes_invalid_device():
|
| 107 |
+
with pytest.raises(ValueError, match="Device not understood"):
|
| 108 |
+
info.dtypes(device="gpu")
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def test_devices():
|
| 112 |
+
assert info.devices() == ["cpu"]
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_arrayprint.py
ADDED
|
@@ -0,0 +1,1281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import gc
|
| 3 |
+
from hypothesis import given
|
| 4 |
+
from hypothesis.extra import numpy as hynp
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from numpy.testing import (
|
| 9 |
+
assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
|
| 10 |
+
assert_raises_regex, IS_WASM
|
| 11 |
+
)
|
| 12 |
+
from numpy.testing._private.utils import run_threaded
|
| 13 |
+
from numpy._core.arrayprint import _typelessdata
|
| 14 |
+
import textwrap
|
| 15 |
+
|
| 16 |
+
class TestArrayRepr:
|
| 17 |
+
def test_nan_inf(self):
|
| 18 |
+
x = np.array([np.nan, np.inf])
|
| 19 |
+
assert_equal(repr(x), 'array([nan, inf])')
|
| 20 |
+
|
| 21 |
+
def test_subclass(self):
|
| 22 |
+
class sub(np.ndarray):
|
| 23 |
+
pass
|
| 24 |
+
|
| 25 |
+
# one dimensional
|
| 26 |
+
x1d = np.array([1, 2]).view(sub)
|
| 27 |
+
assert_equal(repr(x1d), 'sub([1, 2])')
|
| 28 |
+
|
| 29 |
+
# two dimensional
|
| 30 |
+
x2d = np.array([[1, 2], [3, 4]]).view(sub)
|
| 31 |
+
assert_equal(repr(x2d),
|
| 32 |
+
'sub([[1, 2],\n'
|
| 33 |
+
' [3, 4]])')
|
| 34 |
+
|
| 35 |
+
# two dimensional with flexible dtype
|
| 36 |
+
xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
|
| 37 |
+
assert_equal(repr(xstruct),
|
| 38 |
+
"sub([[(1,), (1,)],\n"
|
| 39 |
+
" [(1,), (1,)]], dtype=[('a', '<i4')])"
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
@pytest.mark.xfail(reason="See gh-10544")
|
| 43 |
+
def test_object_subclass(self):
|
| 44 |
+
class sub(np.ndarray):
|
| 45 |
+
def __new__(cls, inp):
|
| 46 |
+
obj = np.asarray(inp).view(cls)
|
| 47 |
+
return obj
|
| 48 |
+
|
| 49 |
+
def __getitem__(self, ind):
|
| 50 |
+
ret = super().__getitem__(ind)
|
| 51 |
+
return sub(ret)
|
| 52 |
+
|
| 53 |
+
# test that object + subclass is OK:
|
| 54 |
+
x = sub([None, None])
|
| 55 |
+
assert_equal(repr(x), 'sub([None, None], dtype=object)')
|
| 56 |
+
assert_equal(str(x), '[None None]')
|
| 57 |
+
|
| 58 |
+
x = sub([None, sub([None, None])])
|
| 59 |
+
assert_equal(repr(x),
|
| 60 |
+
'sub([None, sub([None, None], dtype=object)], dtype=object)')
|
| 61 |
+
assert_equal(str(x), '[None sub([None, None], dtype=object)]')
|
| 62 |
+
|
| 63 |
+
def test_0d_object_subclass(self):
|
| 64 |
+
# make sure that subclasses which return 0ds instead
|
| 65 |
+
# of scalars don't cause infinite recursion in str
|
| 66 |
+
class sub(np.ndarray):
|
| 67 |
+
def __new__(cls, inp):
|
| 68 |
+
obj = np.asarray(inp).view(cls)
|
| 69 |
+
return obj
|
| 70 |
+
|
| 71 |
+
def __getitem__(self, ind):
|
| 72 |
+
ret = super().__getitem__(ind)
|
| 73 |
+
return sub(ret)
|
| 74 |
+
|
| 75 |
+
x = sub(1)
|
| 76 |
+
assert_equal(repr(x), 'sub(1)')
|
| 77 |
+
assert_equal(str(x), '1')
|
| 78 |
+
|
| 79 |
+
x = sub([1, 1])
|
| 80 |
+
assert_equal(repr(x), 'sub([1, 1])')
|
| 81 |
+
assert_equal(str(x), '[1 1]')
|
| 82 |
+
|
| 83 |
+
# check it works properly with object arrays too
|
| 84 |
+
x = sub(None)
|
| 85 |
+
assert_equal(repr(x), 'sub(None, dtype=object)')
|
| 86 |
+
assert_equal(str(x), 'None')
|
| 87 |
+
|
| 88 |
+
# plus recursive object arrays (even depth > 1)
|
| 89 |
+
y = sub(None)
|
| 90 |
+
x[()] = y
|
| 91 |
+
y[()] = x
|
| 92 |
+
assert_equal(repr(x),
|
| 93 |
+
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
|
| 94 |
+
assert_equal(str(x), '...')
|
| 95 |
+
x[()] = 0 # resolve circular references for garbage collector
|
| 96 |
+
|
| 97 |
+
# nested 0d-subclass-object
|
| 98 |
+
x = sub(None)
|
| 99 |
+
x[()] = sub(None)
|
| 100 |
+
assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
|
| 101 |
+
assert_equal(str(x), 'None')
|
| 102 |
+
|
| 103 |
+
# gh-10663
|
| 104 |
+
class DuckCounter(np.ndarray):
|
| 105 |
+
def __getitem__(self, item):
|
| 106 |
+
result = super().__getitem__(item)
|
| 107 |
+
if not isinstance(result, DuckCounter):
|
| 108 |
+
result = result[...].view(DuckCounter)
|
| 109 |
+
return result
|
| 110 |
+
|
| 111 |
+
def to_string(self):
|
| 112 |
+
return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
|
| 113 |
+
|
| 114 |
+
def __str__(self):
|
| 115 |
+
if self.shape == ():
|
| 116 |
+
return self.to_string()
|
| 117 |
+
else:
|
| 118 |
+
fmt = {'all': lambda x: x.to_string()}
|
| 119 |
+
return np.array2string(self, formatter=fmt)
|
| 120 |
+
|
| 121 |
+
dc = np.arange(5).view(DuckCounter)
|
| 122 |
+
assert_equal(str(dc), "[zero one two many many]")
|
| 123 |
+
assert_equal(str(dc[0]), "zero")
|
| 124 |
+
|
| 125 |
+
def test_self_containing(self):
|
| 126 |
+
arr0d = np.array(None)
|
| 127 |
+
arr0d[()] = arr0d
|
| 128 |
+
assert_equal(repr(arr0d),
|
| 129 |
+
'array(array(..., dtype=object), dtype=object)')
|
| 130 |
+
arr0d[()] = 0 # resolve recursion for garbage collector
|
| 131 |
+
|
| 132 |
+
arr1d = np.array([None, None])
|
| 133 |
+
arr1d[1] = arr1d
|
| 134 |
+
assert_equal(repr(arr1d),
|
| 135 |
+
'array([None, array(..., dtype=object)], dtype=object)')
|
| 136 |
+
arr1d[1] = 0 # resolve recursion for garbage collector
|
| 137 |
+
|
| 138 |
+
first = np.array(None)
|
| 139 |
+
second = np.array(None)
|
| 140 |
+
first[()] = second
|
| 141 |
+
second[()] = first
|
| 142 |
+
assert_equal(repr(first),
|
| 143 |
+
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
|
| 144 |
+
first[()] = 0 # resolve circular references for garbage collector
|
| 145 |
+
|
| 146 |
+
def test_containing_list(self):
|
| 147 |
+
# printing square brackets directly would be ambiguous
|
| 148 |
+
arr1d = np.array([None, None])
|
| 149 |
+
arr1d[0] = [1, 2]
|
| 150 |
+
arr1d[1] = [3]
|
| 151 |
+
assert_equal(repr(arr1d),
|
| 152 |
+
'array([list([1, 2]), list([3])], dtype=object)')
|
| 153 |
+
|
| 154 |
+
def test_void_scalar_recursion(self):
|
| 155 |
+
# gh-9345
|
| 156 |
+
repr(np.void(b'test')) # RecursionError ?
|
| 157 |
+
|
| 158 |
+
def test_fieldless_structured(self):
|
| 159 |
+
# gh-10366
|
| 160 |
+
no_fields = np.dtype([])
|
| 161 |
+
arr_no_fields = np.empty(4, dtype=no_fields)
|
| 162 |
+
assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class TestComplexArray:
|
| 166 |
+
def test_str(self):
|
| 167 |
+
rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
|
| 168 |
+
cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
|
| 169 |
+
dtypes = [np.complex64, np.cdouble, np.clongdouble]
|
| 170 |
+
actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
|
| 171 |
+
wanted = [
|
| 172 |
+
'[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
|
| 173 |
+
'[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
|
| 174 |
+
'[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
|
| 175 |
+
'[0.+infj]', '[0.+infj]', '[0.+infj]',
|
| 176 |
+
'[0.-infj]', '[0.-infj]', '[0.-infj]',
|
| 177 |
+
'[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
|
| 178 |
+
'[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
|
| 179 |
+
'[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
|
| 180 |
+
'[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
|
| 181 |
+
'[1.+infj]', '[1.+infj]', '[1.+infj]',
|
| 182 |
+
'[1.-infj]', '[1.-infj]', '[1.-infj]',
|
| 183 |
+
'[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
|
| 184 |
+
'[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
|
| 185 |
+
'[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
|
| 186 |
+
'[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
|
| 187 |
+
'[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
|
| 188 |
+
'[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
|
| 189 |
+
'[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
|
| 190 |
+
'[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
|
| 191 |
+
'[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
|
| 192 |
+
'[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
|
| 193 |
+
'[inf+infj]', '[inf+infj]', '[inf+infj]',
|
| 194 |
+
'[inf-infj]', '[inf-infj]', '[inf-infj]',
|
| 195 |
+
'[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
|
| 196 |
+
'[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
|
| 197 |
+
'[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
|
| 198 |
+
'[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
|
| 199 |
+
'[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
|
| 200 |
+
'[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
|
| 201 |
+
'[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
|
| 202 |
+
'[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
|
| 203 |
+
'[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
|
| 204 |
+
'[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
|
| 205 |
+
'[nan+infj]', '[nan+infj]', '[nan+infj]',
|
| 206 |
+
'[nan-infj]', '[nan-infj]', '[nan-infj]',
|
| 207 |
+
'[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
|
| 208 |
+
|
| 209 |
+
for res, val in zip(actual, wanted):
|
| 210 |
+
assert_equal(res, val)
|
| 211 |
+
|
| 212 |
+
class TestArray2String:
|
| 213 |
+
def test_basic(self):
|
| 214 |
+
"""Basic test of array2string."""
|
| 215 |
+
a = np.arange(3)
|
| 216 |
+
assert_(np.array2string(a) == '[0 1 2]')
|
| 217 |
+
assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
|
| 218 |
+
assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
|
| 219 |
+
|
| 220 |
+
def test_unexpected_kwarg(self):
|
| 221 |
+
# ensure than an appropriate TypeError
|
| 222 |
+
# is raised when array2string receives
|
| 223 |
+
# an unexpected kwarg
|
| 224 |
+
|
| 225 |
+
with assert_raises_regex(TypeError, 'nonsense'):
|
| 226 |
+
np.array2string(np.array([1, 2, 3]),
|
| 227 |
+
nonsense=None)
|
| 228 |
+
|
| 229 |
+
def test_format_function(self):
|
| 230 |
+
"""Test custom format function for each element in array."""
|
| 231 |
+
def _format_function(x):
|
| 232 |
+
if np.abs(x) < 1:
|
| 233 |
+
return '.'
|
| 234 |
+
elif np.abs(x) < 2:
|
| 235 |
+
return 'o'
|
| 236 |
+
else:
|
| 237 |
+
return 'O'
|
| 238 |
+
|
| 239 |
+
x = np.arange(3)
|
| 240 |
+
x_hex = "[0x0 0x1 0x2]"
|
| 241 |
+
x_oct = "[0o0 0o1 0o2]"
|
| 242 |
+
assert_(np.array2string(x, formatter={'all':_format_function}) ==
|
| 243 |
+
"[. o O]")
|
| 244 |
+
assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
|
| 245 |
+
"[. o O]")
|
| 246 |
+
assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
|
| 247 |
+
"[0.0000 1.0000 2.0000]")
|
| 248 |
+
assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
|
| 249 |
+
x_hex)
|
| 250 |
+
assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
|
| 251 |
+
x_oct)
|
| 252 |
+
|
| 253 |
+
x = np.arange(3.)
|
| 254 |
+
assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
|
| 255 |
+
"[0.00 1.00 2.00]")
|
| 256 |
+
assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
|
| 257 |
+
"[0.00 1.00 2.00]")
|
| 258 |
+
|
| 259 |
+
s = np.array(['abc', 'def'])
|
| 260 |
+
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
|
| 261 |
+
'[abcabc defdef]')
|
| 262 |
+
|
| 263 |
+
def test_structure_format_mixed(self):
|
| 264 |
+
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
|
| 265 |
+
x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
|
| 266 |
+
assert_equal(np.array2string(x),
|
| 267 |
+
"[('Sarah', [8., 7.]) ('John', [6., 7.])]")
|
| 268 |
+
|
| 269 |
+
np.set_printoptions(legacy='1.13')
|
| 270 |
+
try:
|
| 271 |
+
# for issue #5692
|
| 272 |
+
A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
|
| 273 |
+
A[5:].fill(np.datetime64('NaT'))
|
| 274 |
+
assert_equal(
|
| 275 |
+
np.array2string(A),
|
| 276 |
+
textwrap.dedent("""\
|
| 277 |
+
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 278 |
+
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
|
| 279 |
+
('NaT',) ('NaT',) ('NaT',)]""")
|
| 280 |
+
)
|
| 281 |
+
finally:
|
| 282 |
+
np.set_printoptions(legacy=False)
|
| 283 |
+
|
| 284 |
+
# same again, but with non-legacy behavior
|
| 285 |
+
assert_equal(
|
| 286 |
+
np.array2string(A),
|
| 287 |
+
textwrap.dedent("""\
|
| 288 |
+
[('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 289 |
+
('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
|
| 290 |
+
('1970-01-01T00:00:00',) ( 'NaT',)
|
| 291 |
+
( 'NaT',) ( 'NaT',)
|
| 292 |
+
( 'NaT',) ( 'NaT',)]""")
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
# and again, with timedeltas
|
| 296 |
+
A = np.full(10, 123456, dtype=[("A", "m8[s]")])
|
| 297 |
+
A[5:].fill(np.datetime64('NaT'))
|
| 298 |
+
assert_equal(
|
| 299 |
+
np.array2string(A),
|
| 300 |
+
textwrap.dedent("""\
|
| 301 |
+
[(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
|
| 302 |
+
( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
def test_structure_format_int(self):
|
| 306 |
+
# See #8160
|
| 307 |
+
struct_int = np.array([([1, -1],), ([123, 1],)],
|
| 308 |
+
dtype=[('B', 'i4', 2)])
|
| 309 |
+
assert_equal(np.array2string(struct_int),
|
| 310 |
+
"[([ 1, -1],) ([123, 1],)]")
|
| 311 |
+
struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
|
| 312 |
+
dtype=[('B', 'i4', (2, 2))])
|
| 313 |
+
assert_equal(np.array2string(struct_2dint),
|
| 314 |
+
"[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
|
| 315 |
+
|
| 316 |
+
def test_structure_format_float(self):
|
| 317 |
+
# See #8172
|
| 318 |
+
array_scalar = np.array(
|
| 319 |
+
(1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
|
| 320 |
+
assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
|
| 321 |
+
|
| 322 |
+
def test_unstructured_void_repr(self):
|
| 323 |
+
a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
|
| 324 |
+
27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
|
| 325 |
+
assert_equal(repr(a[0]),
|
| 326 |
+
r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
|
| 327 |
+
assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
|
| 328 |
+
assert_equal(repr(a),
|
| 329 |
+
r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
|
| 330 |
+
r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
|
| 331 |
+
|
| 332 |
+
assert_equal(eval(repr(a), vars(np)), a)
|
| 333 |
+
assert_equal(eval(repr(a[0]), dict(np=np)), a[0])
|
| 334 |
+
|
| 335 |
+
def test_edgeitems_kwarg(self):
|
| 336 |
+
# previously the global print options would be taken over the kwarg
|
| 337 |
+
arr = np.zeros(3, int)
|
| 338 |
+
assert_equal(
|
| 339 |
+
np.array2string(arr, edgeitems=1, threshold=0),
|
| 340 |
+
"[0 ... 0]"
|
| 341 |
+
)
|
| 342 |
+
|
| 343 |
+
def test_summarize_1d(self):
|
| 344 |
+
A = np.arange(1001)
|
| 345 |
+
strA = '[ 0 1 2 ... 998 999 1000]'
|
| 346 |
+
assert_equal(str(A), strA)
|
| 347 |
+
|
| 348 |
+
reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
|
| 349 |
+
try:
|
| 350 |
+
np.set_printoptions(legacy='2.1')
|
| 351 |
+
assert_equal(repr(A), reprA)
|
| 352 |
+
finally:
|
| 353 |
+
np.set_printoptions(legacy=False)
|
| 354 |
+
|
| 355 |
+
assert_equal(repr(A), reprA.replace(')', ', shape=(1001,))'))
|
| 356 |
+
|
| 357 |
+
def test_summarize_2d(self):
|
| 358 |
+
A = np.arange(1002).reshape(2, 501)
|
| 359 |
+
strA = '[[ 0 1 2 ... 498 499 500]\n' \
|
| 360 |
+
' [ 501 502 503 ... 999 1000 1001]]'
|
| 361 |
+
assert_equal(str(A), strA)
|
| 362 |
+
|
| 363 |
+
reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
|
| 364 |
+
' [ 501, 502, 503, ..., 999, 1000, 1001]])'
|
| 365 |
+
try:
|
| 366 |
+
np.set_printoptions(legacy='2.1')
|
| 367 |
+
assert_equal(repr(A), reprA)
|
| 368 |
+
finally:
|
| 369 |
+
np.set_printoptions(legacy=False)
|
| 370 |
+
|
| 371 |
+
assert_equal(repr(A), reprA.replace(')', ', shape=(2, 501))'))
|
| 372 |
+
|
| 373 |
+
def test_summarize_2d_dtype(self):
|
| 374 |
+
A = np.arange(1002, dtype='i2').reshape(2, 501)
|
| 375 |
+
strA = '[[ 0 1 2 ... 498 499 500]\n' \
|
| 376 |
+
' [ 501 502 503 ... 999 1000 1001]]'
|
| 377 |
+
assert_equal(str(A), strA)
|
| 378 |
+
|
| 379 |
+
reprA = ('array([[ 0, 1, 2, ..., 498, 499, 500],\n'
|
| 380 |
+
' [ 501, 502, 503, ..., 999, 1000, 1001]],\n'
|
| 381 |
+
' shape=(2, 501), dtype=int16)')
|
| 382 |
+
assert_equal(repr(A), reprA)
|
| 383 |
+
|
| 384 |
+
def test_summarize_structure(self):
|
| 385 |
+
A = (np.arange(2002, dtype="<i8").reshape(2, 1001)
|
| 386 |
+
.view([('i', "<i8", (1001,))]))
|
| 387 |
+
strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n"
|
| 388 |
+
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]")
|
| 389 |
+
assert_equal(str(A), strA)
|
| 390 |
+
|
| 391 |
+
reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n"
|
| 392 |
+
" [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n"
|
| 393 |
+
" dtype=[('i', '<i8', (1001,))])")
|
| 394 |
+
assert_equal(repr(A), reprA)
|
| 395 |
+
|
| 396 |
+
B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))])
|
| 397 |
+
strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]"
|
| 398 |
+
assert_equal(str(B), strB)
|
| 399 |
+
|
| 400 |
+
reprB = (
|
| 401 |
+
"array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n"
|
| 402 |
+
" dtype=[('i', '>i8', (2, 1001))])"
|
| 403 |
+
)
|
| 404 |
+
assert_equal(repr(B), reprB)
|
| 405 |
+
|
| 406 |
+
C = (np.arange(22, dtype="<i8").reshape(2, 11)
|
| 407 |
+
.view([('i1', "<i8"), ('i10', "<i8", (10,))]))
|
| 408 |
+
strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]"
|
| 409 |
+
assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC)
|
| 410 |
+
|
| 411 |
+
def test_linewidth(self):
|
| 412 |
+
a = np.full(6, 1)
|
| 413 |
+
|
| 414 |
+
def make_str(a, width, **kw):
|
| 415 |
+
return np.array2string(a, separator="", max_line_width=width, **kw)
|
| 416 |
+
|
| 417 |
+
assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
|
| 418 |
+
assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
|
| 419 |
+
assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
|
| 420 |
+
' 11]')
|
| 421 |
+
|
| 422 |
+
assert_equal(make_str(a, 8), '[111111]')
|
| 423 |
+
assert_equal(make_str(a, 7), '[11111\n'
|
| 424 |
+
' 1]')
|
| 425 |
+
assert_equal(make_str(a, 5), '[111\n'
|
| 426 |
+
' 111]')
|
| 427 |
+
|
| 428 |
+
b = a[None,None,:]
|
| 429 |
+
|
| 430 |
+
assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
|
| 431 |
+
assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
|
| 432 |
+
assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
|
| 433 |
+
' 1]]]')
|
| 434 |
+
|
| 435 |
+
assert_equal(make_str(b, 12), '[[[111111]]]')
|
| 436 |
+
assert_equal(make_str(b, 9), '[[[111\n'
|
| 437 |
+
' 111]]]')
|
| 438 |
+
assert_equal(make_str(b, 8), '[[[11\n'
|
| 439 |
+
' 11\n'
|
| 440 |
+
' 11]]]')
|
| 441 |
+
|
| 442 |
+
def test_wide_element(self):
|
| 443 |
+
a = np.array(['xxxxx'])
|
| 444 |
+
assert_equal(
|
| 445 |
+
np.array2string(a, max_line_width=5),
|
| 446 |
+
"['xxxxx']"
|
| 447 |
+
)
|
| 448 |
+
assert_equal(
|
| 449 |
+
np.array2string(a, max_line_width=5, legacy='1.13'),
|
| 450 |
+
"[ 'xxxxx']"
|
| 451 |
+
)
|
| 452 |
+
|
| 453 |
+
def test_multiline_repr(self):
|
| 454 |
+
class MultiLine:
|
| 455 |
+
def __repr__(self):
|
| 456 |
+
return "Line 1\nLine 2"
|
| 457 |
+
|
| 458 |
+
a = np.array([[None, MultiLine()], [MultiLine(), None]])
|
| 459 |
+
|
| 460 |
+
assert_equal(
|
| 461 |
+
np.array2string(a),
|
| 462 |
+
'[[None Line 1\n'
|
| 463 |
+
' Line 2]\n'
|
| 464 |
+
' [Line 1\n'
|
| 465 |
+
' Line 2 None]]'
|
| 466 |
+
)
|
| 467 |
+
assert_equal(
|
| 468 |
+
np.array2string(a, max_line_width=5),
|
| 469 |
+
'[[None\n'
|
| 470 |
+
' Line 1\n'
|
| 471 |
+
' Line 2]\n'
|
| 472 |
+
' [Line 1\n'
|
| 473 |
+
' Line 2\n'
|
| 474 |
+
' None]]'
|
| 475 |
+
)
|
| 476 |
+
assert_equal(
|
| 477 |
+
repr(a),
|
| 478 |
+
'array([[None, Line 1\n'
|
| 479 |
+
' Line 2],\n'
|
| 480 |
+
' [Line 1\n'
|
| 481 |
+
' Line 2, None]], dtype=object)'
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
class MultiLineLong:
|
| 485 |
+
def __repr__(self):
|
| 486 |
+
return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
|
| 487 |
+
|
| 488 |
+
a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
|
| 489 |
+
assert_equal(
|
| 490 |
+
repr(a),
|
| 491 |
+
'array([[None, Line 1\n'
|
| 492 |
+
' LooooooooooongestLine2\n'
|
| 493 |
+
' LongerLine 3 ],\n'
|
| 494 |
+
' [Line 1\n'
|
| 495 |
+
' LooooooooooongestLine2\n'
|
| 496 |
+
' LongerLine 3 , None]], dtype=object)'
|
| 497 |
+
)
|
| 498 |
+
assert_equal(
|
| 499 |
+
np.array_repr(a, 20),
|
| 500 |
+
'array([[None,\n'
|
| 501 |
+
' Line 1\n'
|
| 502 |
+
' LooooooooooongestLine2\n'
|
| 503 |
+
' LongerLine 3 ],\n'
|
| 504 |
+
' [Line 1\n'
|
| 505 |
+
' LooooooooooongestLine2\n'
|
| 506 |
+
' LongerLine 3 ,\n'
|
| 507 |
+
' None]],\n'
|
| 508 |
+
' dtype=object)'
|
| 509 |
+
)
|
| 510 |
+
|
| 511 |
+
def test_nested_array_repr(self):
|
| 512 |
+
a = np.empty((2, 2), dtype=object)
|
| 513 |
+
a[0, 0] = np.eye(2)
|
| 514 |
+
a[0, 1] = np.eye(3)
|
| 515 |
+
a[1, 0] = None
|
| 516 |
+
a[1, 1] = np.ones((3, 1))
|
| 517 |
+
assert_equal(
|
| 518 |
+
repr(a),
|
| 519 |
+
'array([[array([[1., 0.],\n'
|
| 520 |
+
' [0., 1.]]), array([[1., 0., 0.],\n'
|
| 521 |
+
' [0., 1., 0.],\n'
|
| 522 |
+
' [0., 0., 1.]])],\n'
|
| 523 |
+
' [None, array([[1.],\n'
|
| 524 |
+
' [1.],\n'
|
| 525 |
+
' [1.]])]], dtype=object)'
|
| 526 |
+
)
|
| 527 |
+
|
| 528 |
+
@given(hynp.from_dtype(np.dtype("U")))
|
| 529 |
+
def test_any_text(self, text):
|
| 530 |
+
# This test checks that, given any value that can be represented in an
|
| 531 |
+
# array of dtype("U") (i.e. unicode string), ...
|
| 532 |
+
a = np.array([text, text, text])
|
| 533 |
+
# casting a list of them to an array does not e.g. truncate the value
|
| 534 |
+
assert_equal(a[0], text)
|
| 535 |
+
text = text.item() # use raw python strings for repr below
|
| 536 |
+
# and that np.array2string puts a newline in the expected location
|
| 537 |
+
expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
|
| 538 |
+
result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
|
| 539 |
+
assert_equal(result, expected_repr)
|
| 540 |
+
|
| 541 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
| 542 |
+
def test_refcount(self):
|
| 543 |
+
# make sure we do not hold references to the array due to a recursive
|
| 544 |
+
# closure (gh-10620)
|
| 545 |
+
gc.disable()
|
| 546 |
+
a = np.arange(2)
|
| 547 |
+
r1 = sys.getrefcount(a)
|
| 548 |
+
np.array2string(a)
|
| 549 |
+
np.array2string(a)
|
| 550 |
+
r2 = sys.getrefcount(a)
|
| 551 |
+
gc.collect()
|
| 552 |
+
gc.enable()
|
| 553 |
+
assert_(r1 == r2)
|
| 554 |
+
|
| 555 |
+
def test_with_sign(self):
|
| 556 |
+
# mixed negative and positive value array
|
| 557 |
+
a = np.array([-2, 0, 3])
|
| 558 |
+
assert_equal(
|
| 559 |
+
np.array2string(a, sign='+'),
|
| 560 |
+
'[-2 +0 +3]'
|
| 561 |
+
)
|
| 562 |
+
assert_equal(
|
| 563 |
+
np.array2string(a, sign='-'),
|
| 564 |
+
'[-2 0 3]'
|
| 565 |
+
)
|
| 566 |
+
assert_equal(
|
| 567 |
+
np.array2string(a, sign=' '),
|
| 568 |
+
'[-2 0 3]'
|
| 569 |
+
)
|
| 570 |
+
# all non-negative array
|
| 571 |
+
a = np.array([2, 0, 3])
|
| 572 |
+
assert_equal(
|
| 573 |
+
np.array2string(a, sign='+'),
|
| 574 |
+
'[+2 +0 +3]'
|
| 575 |
+
)
|
| 576 |
+
assert_equal(
|
| 577 |
+
np.array2string(a, sign='-'),
|
| 578 |
+
'[2 0 3]'
|
| 579 |
+
)
|
| 580 |
+
assert_equal(
|
| 581 |
+
np.array2string(a, sign=' '),
|
| 582 |
+
'[ 2 0 3]'
|
| 583 |
+
)
|
| 584 |
+
# all negative array
|
| 585 |
+
a = np.array([-2, -1, -3])
|
| 586 |
+
assert_equal(
|
| 587 |
+
np.array2string(a, sign='+'),
|
| 588 |
+
'[-2 -1 -3]'
|
| 589 |
+
)
|
| 590 |
+
assert_equal(
|
| 591 |
+
np.array2string(a, sign='-'),
|
| 592 |
+
'[-2 -1 -3]'
|
| 593 |
+
)
|
| 594 |
+
assert_equal(
|
| 595 |
+
np.array2string(a, sign=' '),
|
| 596 |
+
'[-2 -1 -3]'
|
| 597 |
+
)
|
| 598 |
+
# 2d array mixed negative and positive
|
| 599 |
+
a = np.array([[10, -1, 1, 1], [10, 10, 10, 10]])
|
| 600 |
+
assert_equal(
|
| 601 |
+
np.array2string(a, sign='+'),
|
| 602 |
+
'[[+10 -1 +1 +1]\n [+10 +10 +10 +10]]'
|
| 603 |
+
)
|
| 604 |
+
assert_equal(
|
| 605 |
+
np.array2string(a, sign='-'),
|
| 606 |
+
'[[10 -1 1 1]\n [10 10 10 10]]'
|
| 607 |
+
)
|
| 608 |
+
assert_equal(
|
| 609 |
+
np.array2string(a, sign=' '),
|
| 610 |
+
'[[10 -1 1 1]\n [10 10 10 10]]'
|
| 611 |
+
)
|
| 612 |
+
# 2d array all positive
|
| 613 |
+
a = np.array([[10, 0, 1, 1], [10, 10, 10, 10]])
|
| 614 |
+
assert_equal(
|
| 615 |
+
np.array2string(a, sign='+'),
|
| 616 |
+
'[[+10 +0 +1 +1]\n [+10 +10 +10 +10]]'
|
| 617 |
+
)
|
| 618 |
+
assert_equal(
|
| 619 |
+
np.array2string(a, sign='-'),
|
| 620 |
+
'[[10 0 1 1]\n [10 10 10 10]]'
|
| 621 |
+
)
|
| 622 |
+
assert_equal(
|
| 623 |
+
np.array2string(a, sign=' '),
|
| 624 |
+
'[[ 10 0 1 1]\n [ 10 10 10 10]]'
|
| 625 |
+
)
|
| 626 |
+
# 2d array all negative
|
| 627 |
+
a = np.array([[-10, -1, -1, -1], [-10, -10, -10, -10]])
|
| 628 |
+
assert_equal(
|
| 629 |
+
np.array2string(a, sign='+'),
|
| 630 |
+
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
|
| 631 |
+
)
|
| 632 |
+
assert_equal(
|
| 633 |
+
np.array2string(a, sign='-'),
|
| 634 |
+
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
|
| 635 |
+
)
|
| 636 |
+
assert_equal(
|
| 637 |
+
np.array2string(a, sign=' '),
|
| 638 |
+
'[[-10 -1 -1 -1]\n [-10 -10 -10 -10]]'
|
| 639 |
+
)
|
| 640 |
+
|
| 641 |
+
|
| 642 |
+
class TestPrintOptions:
|
| 643 |
+
"""Test getting and setting global print options."""
|
| 644 |
+
|
| 645 |
+
def setup_method(self):
|
| 646 |
+
self.oldopts = np.get_printoptions()
|
| 647 |
+
|
| 648 |
+
def teardown_method(self):
|
| 649 |
+
np.set_printoptions(**self.oldopts)
|
| 650 |
+
|
| 651 |
+
def test_basic(self):
|
| 652 |
+
x = np.array([1.5, 0, 1.234567890])
|
| 653 |
+
assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
|
| 654 |
+
ret = np.set_printoptions(precision=4)
|
| 655 |
+
assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
|
| 656 |
+
assert ret is None
|
| 657 |
+
|
| 658 |
+
def test_precision_zero(self):
|
| 659 |
+
np.set_printoptions(precision=0)
|
| 660 |
+
for values, string in (
|
| 661 |
+
([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
|
| 662 |
+
([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
|
| 663 |
+
([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
|
| 664 |
+
([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
|
| 665 |
+
x = np.array(values)
|
| 666 |
+
assert_equal(repr(x), "array([%s])" % string)
|
| 667 |
+
|
| 668 |
+
def test_formatter(self):
|
| 669 |
+
x = np.arange(3)
|
| 670 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 671 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 672 |
+
|
| 673 |
+
def test_formatter_reset(self):
|
| 674 |
+
x = np.arange(3)
|
| 675 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 676 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 677 |
+
np.set_printoptions(formatter={'int':None})
|
| 678 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 679 |
+
|
| 680 |
+
np.set_printoptions(formatter={'all':lambda x: str(x-1)})
|
| 681 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 682 |
+
np.set_printoptions(formatter={'all':None})
|
| 683 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 684 |
+
|
| 685 |
+
np.set_printoptions(formatter={'int':lambda x: str(x-1)})
|
| 686 |
+
assert_equal(repr(x), "array([-1, 0, 1])")
|
| 687 |
+
np.set_printoptions(formatter={'int_kind':None})
|
| 688 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 689 |
+
|
| 690 |
+
x = np.arange(3.)
|
| 691 |
+
np.set_printoptions(formatter={'float':lambda x: str(x-1)})
|
| 692 |
+
assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
|
| 693 |
+
np.set_printoptions(formatter={'float_kind':None})
|
| 694 |
+
assert_equal(repr(x), "array([0., 1., 2.])")
|
| 695 |
+
|
| 696 |
+
def test_override_repr(self):
|
| 697 |
+
x = np.arange(3)
|
| 698 |
+
np.set_printoptions(override_repr=lambda x: "FOO")
|
| 699 |
+
assert_equal(repr(x), "FOO")
|
| 700 |
+
np.set_printoptions(override_repr=None)
|
| 701 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 702 |
+
|
| 703 |
+
with np.printoptions(override_repr=lambda x: "BAR"):
|
| 704 |
+
assert_equal(repr(x), "BAR")
|
| 705 |
+
assert_equal(repr(x), "array([0, 1, 2])")
|
| 706 |
+
|
| 707 |
+
def test_0d_arrays(self):
|
| 708 |
+
assert_equal(str(np.array('café', '<U4')), 'café')
|
| 709 |
+
|
| 710 |
+
assert_equal(repr(np.array('café', '<U4')),
|
| 711 |
+
"array('café', dtype='<U4')")
|
| 712 |
+
assert_equal(str(np.array('test', np.str_)), 'test')
|
| 713 |
+
|
| 714 |
+
a = np.zeros(1, dtype=[('a', '<i4', (3,))])
|
| 715 |
+
assert_equal(str(a[0]), '([0, 0, 0],)')
|
| 716 |
+
|
| 717 |
+
assert_equal(repr(np.datetime64('2005-02-25')[...]),
|
| 718 |
+
"array('2005-02-25', dtype='datetime64[D]')")
|
| 719 |
+
|
| 720 |
+
assert_equal(repr(np.timedelta64('10', 'Y')[...]),
|
| 721 |
+
"array(10, dtype='timedelta64[Y]')")
|
| 722 |
+
|
| 723 |
+
# repr of 0d arrays is affected by printoptions
|
| 724 |
+
x = np.array(1)
|
| 725 |
+
np.set_printoptions(formatter={'all':lambda x: "test"})
|
| 726 |
+
assert_equal(repr(x), "array(test)")
|
| 727 |
+
# str is unaffected
|
| 728 |
+
assert_equal(str(x), "1")
|
| 729 |
+
|
| 730 |
+
# check `style` arg raises
|
| 731 |
+
assert_warns(DeprecationWarning, np.array2string,
|
| 732 |
+
np.array(1.), style=repr)
|
| 733 |
+
# but not in legacy mode
|
| 734 |
+
np.array2string(np.array(1.), style=repr, legacy='1.13')
|
| 735 |
+
# gh-10934 style was broken in legacy mode, check it works
|
| 736 |
+
np.array2string(np.array(1.), legacy='1.13')
|
| 737 |
+
|
| 738 |
+
def test_float_spacing(self):
|
| 739 |
+
x = np.array([1., 2., 3.])
|
| 740 |
+
y = np.array([1., 2., -10.])
|
| 741 |
+
z = np.array([100., 2., -1.])
|
| 742 |
+
w = np.array([-100., 2., 1.])
|
| 743 |
+
|
| 744 |
+
assert_equal(repr(x), 'array([1., 2., 3.])')
|
| 745 |
+
assert_equal(repr(y), 'array([ 1., 2., -10.])')
|
| 746 |
+
assert_equal(repr(np.array(y[0])), 'array(1.)')
|
| 747 |
+
assert_equal(repr(np.array(y[-1])), 'array(-10.)')
|
| 748 |
+
assert_equal(repr(z), 'array([100., 2., -1.])')
|
| 749 |
+
assert_equal(repr(w), 'array([-100., 2., 1.])')
|
| 750 |
+
|
| 751 |
+
assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
|
| 752 |
+
assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
|
| 753 |
+
|
| 754 |
+
x = np.array([np.inf, 100000, 1.1234])
|
| 755 |
+
y = np.array([np.inf, 100000, -1.1234])
|
| 756 |
+
z = np.array([np.inf, 1.1234, -1e120])
|
| 757 |
+
np.set_printoptions(precision=2)
|
| 758 |
+
assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
|
| 759 |
+
assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
|
| 760 |
+
assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
|
| 761 |
+
|
| 762 |
+
def test_bool_spacing(self):
|
| 763 |
+
assert_equal(repr(np.array([True, True])),
|
| 764 |
+
'array([ True, True])')
|
| 765 |
+
assert_equal(repr(np.array([True, False])),
|
| 766 |
+
'array([ True, False])')
|
| 767 |
+
assert_equal(repr(np.array([True])),
|
| 768 |
+
'array([ True])')
|
| 769 |
+
assert_equal(repr(np.array(True)),
|
| 770 |
+
'array(True)')
|
| 771 |
+
assert_equal(repr(np.array(False)),
|
| 772 |
+
'array(False)')
|
| 773 |
+
|
| 774 |
+
def test_sign_spacing(self):
|
| 775 |
+
a = np.arange(4.)
|
| 776 |
+
b = np.array([1.234e9])
|
| 777 |
+
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
| 778 |
+
|
| 779 |
+
assert_equal(repr(a), 'array([0., 1., 2., 3.])')
|
| 780 |
+
assert_equal(repr(np.array(1.)), 'array(1.)')
|
| 781 |
+
assert_equal(repr(b), 'array([1.234e+09])')
|
| 782 |
+
assert_equal(repr(np.array([0.])), 'array([0.])')
|
| 783 |
+
assert_equal(repr(c),
|
| 784 |
+
"array([1. +1.j , 1.12345679+1.12345679j])")
|
| 785 |
+
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
| 786 |
+
|
| 787 |
+
np.set_printoptions(sign=' ')
|
| 788 |
+
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
| 789 |
+
assert_equal(repr(np.array(1.)), 'array( 1.)')
|
| 790 |
+
assert_equal(repr(b), 'array([ 1.234e+09])')
|
| 791 |
+
assert_equal(repr(c),
|
| 792 |
+
"array([ 1. +1.j , 1.12345679+1.12345679j])")
|
| 793 |
+
assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
|
| 794 |
+
|
| 795 |
+
np.set_printoptions(sign='+')
|
| 796 |
+
assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
|
| 797 |
+
assert_equal(repr(np.array(1.)), 'array(+1.)')
|
| 798 |
+
assert_equal(repr(b), 'array([+1.234e+09])')
|
| 799 |
+
assert_equal(repr(c),
|
| 800 |
+
"array([+1. +1.j , +1.12345679+1.12345679j])")
|
| 801 |
+
|
| 802 |
+
np.set_printoptions(legacy='1.13')
|
| 803 |
+
assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
|
| 804 |
+
assert_equal(repr(b), 'array([ 1.23400000e+09])')
|
| 805 |
+
assert_equal(repr(-b), 'array([ -1.23400000e+09])')
|
| 806 |
+
assert_equal(repr(np.array(1.)), 'array(1.0)')
|
| 807 |
+
assert_equal(repr(np.array([0.])), 'array([ 0.])')
|
| 808 |
+
assert_equal(repr(c),
|
| 809 |
+
"array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
|
| 810 |
+
# gh-10383
|
| 811 |
+
assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
|
| 812 |
+
|
| 813 |
+
assert_raises(TypeError, np.set_printoptions, wrongarg=True)
|
| 814 |
+
|
| 815 |
+
def test_float_overflow_nowarn(self):
|
| 816 |
+
# make sure internal computations in FloatingFormat don't
|
| 817 |
+
# warn about overflow
|
| 818 |
+
repr(np.array([1e4, 0.1], dtype='f2'))
|
| 819 |
+
|
| 820 |
+
def test_sign_spacing_structured(self):
|
| 821 |
+
a = np.ones(2, dtype='<f,<f')
|
| 822 |
+
assert_equal(repr(a),
|
| 823 |
+
"array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
|
| 824 |
+
assert_equal(repr(a[0]),
|
| 825 |
+
"np.void((1.0, 1.0), dtype=[('f0', '<f4'), ('f1', '<f4')])")
|
| 826 |
+
|
| 827 |
+
def test_floatmode(self):
|
| 828 |
+
x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
|
| 829 |
+
0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
|
| 830 |
+
y = np.array([0.2918820979355541, 0.5064172631089138,
|
| 831 |
+
0.2848750619642916, 0.4342965294660567,
|
| 832 |
+
0.7326538397312751, 0.3459503329096204,
|
| 833 |
+
0.0862072768214508, 0.39112753029631175],
|
| 834 |
+
dtype=np.float64)
|
| 835 |
+
z = np.arange(6, dtype=np.float16)/10
|
| 836 |
+
c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
|
| 837 |
+
|
| 838 |
+
# also make sure 1e23 is right (is between two fp numbers)
|
| 839 |
+
w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
|
| 840 |
+
# note: we construct w from the strings `1eXX` instead of doing
|
| 841 |
+
# `10.**arange(24)` because it turns out the two are not equivalent in
|
| 842 |
+
# python. On some architectures `1e23 != 10.**23`.
|
| 843 |
+
wp = np.array([1.234e1, 1e2, 1e123])
|
| 844 |
+
|
| 845 |
+
# unique mode
|
| 846 |
+
np.set_printoptions(floatmode='unique')
|
| 847 |
+
assert_equal(repr(x),
|
| 848 |
+
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
| 849 |
+
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
| 850 |
+
assert_equal(repr(y),
|
| 851 |
+
"array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
|
| 852 |
+
" 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
|
| 853 |
+
" 0.0862072768214508 , 0.39112753029631175])")
|
| 854 |
+
assert_equal(repr(z),
|
| 855 |
+
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 856 |
+
assert_equal(repr(w),
|
| 857 |
+
"array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
|
| 858 |
+
" 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
|
| 859 |
+
" 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
|
| 860 |
+
" 1.e+24])")
|
| 861 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 862 |
+
assert_equal(repr(c),
|
| 863 |
+
"array([1. +1.j , 1.123456789+1.123456789j])")
|
| 864 |
+
|
| 865 |
+
# maxprec mode, precision=8
|
| 866 |
+
np.set_printoptions(floatmode='maxprec', precision=8)
|
| 867 |
+
assert_equal(repr(x),
|
| 868 |
+
"array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
|
| 869 |
+
" 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
|
| 870 |
+
assert_equal(repr(y),
|
| 871 |
+
"array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
| 872 |
+
" 0.34595033, 0.08620728, 0.39112753])")
|
| 873 |
+
assert_equal(repr(z),
|
| 874 |
+
"array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 875 |
+
assert_equal(repr(w[::5]),
|
| 876 |
+
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
| 877 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 878 |
+
assert_equal(repr(c),
|
| 879 |
+
"array([1. +1.j , 1.12345679+1.12345679j])")
|
| 880 |
+
|
| 881 |
+
# fixed mode, precision=4
|
| 882 |
+
np.set_printoptions(floatmode='fixed', precision=4)
|
| 883 |
+
assert_equal(repr(x),
|
| 884 |
+
"array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
|
| 885 |
+
" 0.2383, 0.4226], dtype=float16)")
|
| 886 |
+
assert_equal(repr(y),
|
| 887 |
+
"array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
|
| 888 |
+
assert_equal(repr(z),
|
| 889 |
+
"array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
|
| 890 |
+
assert_equal(repr(w[::5]),
|
| 891 |
+
"array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
|
| 892 |
+
assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
|
| 893 |
+
assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
|
| 894 |
+
assert_equal(repr(c),
|
| 895 |
+
"array([1.0000+1.0000j, 1.1235+1.1235j])")
|
| 896 |
+
# for larger precision, representation error becomes more apparent:
|
| 897 |
+
np.set_printoptions(floatmode='fixed', precision=8)
|
| 898 |
+
assert_equal(repr(z),
|
| 899 |
+
"array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
|
| 900 |
+
" 0.50000000], dtype=float16)")
|
| 901 |
+
|
| 902 |
+
# maxprec_equal mode, precision=8
|
| 903 |
+
np.set_printoptions(floatmode='maxprec_equal', precision=8)
|
| 904 |
+
assert_equal(repr(x),
|
| 905 |
+
"array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
|
| 906 |
+
" 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
|
| 907 |
+
assert_equal(repr(y),
|
| 908 |
+
"array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
|
| 909 |
+
" 0.34595033, 0.08620728, 0.39112753])")
|
| 910 |
+
assert_equal(repr(z),
|
| 911 |
+
"array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
|
| 912 |
+
assert_equal(repr(w[::5]),
|
| 913 |
+
"array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
|
| 914 |
+
assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
|
| 915 |
+
assert_equal(repr(c),
|
| 916 |
+
"array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
|
| 917 |
+
|
| 918 |
+
# test unique special case (gh-18609)
|
| 919 |
+
a = np.float64.fromhex('-1p-97')
|
| 920 |
+
assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
|
| 921 |
+
|
| 922 |
+
def test_legacy_mode_scalars(self):
|
| 923 |
+
# in legacy mode, str of floats get truncated, and complex scalars
|
| 924 |
+
# use * for non-finite imaginary part
|
| 925 |
+
np.set_printoptions(legacy='1.13')
|
| 926 |
+
assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
|
| 927 |
+
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
|
| 928 |
+
|
| 929 |
+
np.set_printoptions(legacy=False)
|
| 930 |
+
assert_equal(str(np.float64(1.123456789123456789)),
|
| 931 |
+
'1.1234567891234568')
|
| 932 |
+
assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
|
| 933 |
+
|
| 934 |
+
def test_legacy_stray_comma(self):
|
| 935 |
+
np.set_printoptions(legacy='1.13')
|
| 936 |
+
assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
|
| 937 |
+
|
| 938 |
+
np.set_printoptions(legacy=False)
|
| 939 |
+
assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
|
| 940 |
+
|
| 941 |
+
def test_dtype_linewidth_wrapping(self):
|
| 942 |
+
np.set_printoptions(linewidth=75)
|
| 943 |
+
assert_equal(repr(np.arange(10,20., dtype='f4')),
|
| 944 |
+
"array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
|
| 945 |
+
assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
|
| 946 |
+
array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
|
| 947 |
+
dtype=float32)"""))
|
| 948 |
+
|
| 949 |
+
styp = '<U4'
|
| 950 |
+
assert_equal(repr(np.ones(3, dtype=styp)),
|
| 951 |
+
"array(['1', '1', '1'], dtype='{}')".format(styp))
|
| 952 |
+
assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
|
| 953 |
+
array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
|
| 954 |
+
dtype='{}')""".format(styp)))
|
| 955 |
+
|
| 956 |
+
@pytest.mark.parametrize(
|
| 957 |
+
['native'],
|
| 958 |
+
[
|
| 959 |
+
('bool',),
|
| 960 |
+
('uint8',),
|
| 961 |
+
('uint16',),
|
| 962 |
+
('uint32',),
|
| 963 |
+
('uint64',),
|
| 964 |
+
('int8',),
|
| 965 |
+
('int16',),
|
| 966 |
+
('int32',),
|
| 967 |
+
('int64',),
|
| 968 |
+
('float16',),
|
| 969 |
+
('float32',),
|
| 970 |
+
('float64',),
|
| 971 |
+
('U1',), # 4-byte width string
|
| 972 |
+
],
|
| 973 |
+
)
|
| 974 |
+
def test_dtype_endianness_repr(self, native):
|
| 975 |
+
'''
|
| 976 |
+
there was an issue where
|
| 977 |
+
repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2'))
|
| 978 |
+
both returned the same thing:
|
| 979 |
+
array([0], dtype=uint16)
|
| 980 |
+
even though their dtypes have different endianness.
|
| 981 |
+
'''
|
| 982 |
+
native_dtype = np.dtype(native)
|
| 983 |
+
non_native_dtype = native_dtype.newbyteorder()
|
| 984 |
+
non_native_repr = repr(np.array([1], non_native_dtype))
|
| 985 |
+
native_repr = repr(np.array([1], native_dtype))
|
| 986 |
+
# preserve the sensible default of only showing dtype if nonstandard
|
| 987 |
+
assert ('dtype' in native_repr) ^ (native_dtype in _typelessdata),\
|
| 988 |
+
("an array's repr should show dtype if and only if the type "
|
| 989 |
+
'of the array is NOT one of the standard types '
|
| 990 |
+
'(e.g., int32, bool, float64).')
|
| 991 |
+
if non_native_dtype.itemsize > 1:
|
| 992 |
+
# if the type is >1 byte, the non-native endian version
|
| 993 |
+
# must show endianness.
|
| 994 |
+
assert non_native_repr != native_repr
|
| 995 |
+
assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr
|
| 996 |
+
|
| 997 |
+
def test_linewidth_repr(self):
|
| 998 |
+
a = np.full(7, fill_value=2)
|
| 999 |
+
np.set_printoptions(linewidth=17)
|
| 1000 |
+
assert_equal(
|
| 1001 |
+
repr(a),
|
| 1002 |
+
textwrap.dedent("""\
|
| 1003 |
+
array([2, 2, 2,
|
| 1004 |
+
2, 2, 2,
|
| 1005 |
+
2])""")
|
| 1006 |
+
)
|
| 1007 |
+
np.set_printoptions(linewidth=17, legacy='1.13')
|
| 1008 |
+
assert_equal(
|
| 1009 |
+
repr(a),
|
| 1010 |
+
textwrap.dedent("""\
|
| 1011 |
+
array([2, 2, 2,
|
| 1012 |
+
2, 2, 2, 2])""")
|
| 1013 |
+
)
|
| 1014 |
+
|
| 1015 |
+
a = np.full(8, fill_value=2)
|
| 1016 |
+
|
| 1017 |
+
np.set_printoptions(linewidth=18, legacy=False)
|
| 1018 |
+
assert_equal(
|
| 1019 |
+
repr(a),
|
| 1020 |
+
textwrap.dedent("""\
|
| 1021 |
+
array([2, 2, 2,
|
| 1022 |
+
2, 2, 2,
|
| 1023 |
+
2, 2])""")
|
| 1024 |
+
)
|
| 1025 |
+
|
| 1026 |
+
np.set_printoptions(linewidth=18, legacy='1.13')
|
| 1027 |
+
assert_equal(
|
| 1028 |
+
repr(a),
|
| 1029 |
+
textwrap.dedent("""\
|
| 1030 |
+
array([2, 2, 2, 2,
|
| 1031 |
+
2, 2, 2, 2])""")
|
| 1032 |
+
)
|
| 1033 |
+
|
| 1034 |
+
def test_linewidth_str(self):
|
| 1035 |
+
a = np.full(18, fill_value=2)
|
| 1036 |
+
np.set_printoptions(linewidth=18)
|
| 1037 |
+
assert_equal(
|
| 1038 |
+
str(a),
|
| 1039 |
+
textwrap.dedent("""\
|
| 1040 |
+
[2 2 2 2 2 2 2 2
|
| 1041 |
+
2 2 2 2 2 2 2 2
|
| 1042 |
+
2 2]""")
|
| 1043 |
+
)
|
| 1044 |
+
np.set_printoptions(linewidth=18, legacy='1.13')
|
| 1045 |
+
assert_equal(
|
| 1046 |
+
str(a),
|
| 1047 |
+
textwrap.dedent("""\
|
| 1048 |
+
[2 2 2 2 2 2 2 2 2
|
| 1049 |
+
2 2 2 2 2 2 2 2 2]""")
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
def test_edgeitems(self):
|
| 1053 |
+
np.set_printoptions(edgeitems=1, threshold=1)
|
| 1054 |
+
a = np.arange(27).reshape((3, 3, 3))
|
| 1055 |
+
assert_equal(
|
| 1056 |
+
repr(a),
|
| 1057 |
+
textwrap.dedent("""\
|
| 1058 |
+
array([[[ 0, ..., 2],
|
| 1059 |
+
...,
|
| 1060 |
+
[ 6, ..., 8]],
|
| 1061 |
+
|
| 1062 |
+
...,
|
| 1063 |
+
|
| 1064 |
+
[[18, ..., 20],
|
| 1065 |
+
...,
|
| 1066 |
+
[24, ..., 26]]], shape=(3, 3, 3))""")
|
| 1067 |
+
)
|
| 1068 |
+
|
| 1069 |
+
b = np.zeros((3, 3, 1, 1))
|
| 1070 |
+
assert_equal(
|
| 1071 |
+
repr(b),
|
| 1072 |
+
textwrap.dedent("""\
|
| 1073 |
+
array([[[[0.]],
|
| 1074 |
+
|
| 1075 |
+
...,
|
| 1076 |
+
|
| 1077 |
+
[[0.]]],
|
| 1078 |
+
|
| 1079 |
+
|
| 1080 |
+
...,
|
| 1081 |
+
|
| 1082 |
+
|
| 1083 |
+
[[[0.]],
|
| 1084 |
+
|
| 1085 |
+
...,
|
| 1086 |
+
|
| 1087 |
+
[[0.]]]], shape=(3, 3, 1, 1))""")
|
| 1088 |
+
)
|
| 1089 |
+
|
| 1090 |
+
# 1.13 had extra trailing spaces, and was missing newlines
|
| 1091 |
+
try:
|
| 1092 |
+
np.set_printoptions(legacy='1.13')
|
| 1093 |
+
assert_equal(repr(a), (
|
| 1094 |
+
"array([[[ 0, ..., 2],\n"
|
| 1095 |
+
" ..., \n"
|
| 1096 |
+
" [ 6, ..., 8]],\n"
|
| 1097 |
+
"\n"
|
| 1098 |
+
" ..., \n"
|
| 1099 |
+
" [[18, ..., 20],\n"
|
| 1100 |
+
" ..., \n"
|
| 1101 |
+
" [24, ..., 26]]])")
|
| 1102 |
+
)
|
| 1103 |
+
assert_equal(repr(b), (
|
| 1104 |
+
"array([[[[ 0.]],\n"
|
| 1105 |
+
"\n"
|
| 1106 |
+
" ..., \n"
|
| 1107 |
+
" [[ 0.]]],\n"
|
| 1108 |
+
"\n"
|
| 1109 |
+
"\n"
|
| 1110 |
+
" ..., \n"
|
| 1111 |
+
" [[[ 0.]],\n"
|
| 1112 |
+
"\n"
|
| 1113 |
+
" ..., \n"
|
| 1114 |
+
" [[ 0.]]]])")
|
| 1115 |
+
)
|
| 1116 |
+
finally:
|
| 1117 |
+
np.set_printoptions(legacy=False)
|
| 1118 |
+
|
| 1119 |
+
def test_edgeitems_structured(self):
|
| 1120 |
+
np.set_printoptions(edgeitems=1, threshold=1)
|
| 1121 |
+
A = np.arange(5*2*3, dtype="<i8").view([('i', "<i8", (5, 2, 3))])
|
| 1122 |
+
reprA = (
|
| 1123 |
+
"array([([[[ 0, ..., 2], [ 3, ..., 5]], ..., "
|
| 1124 |
+
"[[24, ..., 26], [27, ..., 29]]],)],\n"
|
| 1125 |
+
" dtype=[('i', '<i8', (5, 2, 3))])"
|
| 1126 |
+
)
|
| 1127 |
+
assert_equal(repr(A), reprA)
|
| 1128 |
+
|
| 1129 |
+
def test_bad_args(self):
|
| 1130 |
+
assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
|
| 1131 |
+
assert_raises(TypeError, np.set_printoptions, threshold='1')
|
| 1132 |
+
assert_raises(TypeError, np.set_printoptions, threshold=b'1')
|
| 1133 |
+
|
| 1134 |
+
assert_raises(TypeError, np.set_printoptions, precision='1')
|
| 1135 |
+
assert_raises(TypeError, np.set_printoptions, precision=1.5)
|
| 1136 |
+
|
| 1137 |
+
def test_unicode_object_array():
|
| 1138 |
+
expected = "array(['é'], dtype=object)"
|
| 1139 |
+
x = np.array(['\xe9'], dtype=object)
|
| 1140 |
+
assert_equal(repr(x), expected)
|
| 1141 |
+
|
| 1142 |
+
|
| 1143 |
+
class TestContextManager:
|
| 1144 |
+
def test_ctx_mgr(self):
|
| 1145 |
+
# test that context manager actually works
|
| 1146 |
+
with np.printoptions(precision=2):
|
| 1147 |
+
s = str(np.array([2.0]) / 3)
|
| 1148 |
+
assert_equal(s, '[0.67]')
|
| 1149 |
+
|
| 1150 |
+
def test_ctx_mgr_restores(self):
|
| 1151 |
+
# test that print options are actually restored
|
| 1152 |
+
opts = np.get_printoptions()
|
| 1153 |
+
with np.printoptions(precision=opts['precision'] - 1,
|
| 1154 |
+
linewidth=opts['linewidth'] - 4):
|
| 1155 |
+
pass
|
| 1156 |
+
assert_equal(np.get_printoptions(), opts)
|
| 1157 |
+
|
| 1158 |
+
def test_ctx_mgr_exceptions(self):
|
| 1159 |
+
# test that print options are restored even if an exception is raised
|
| 1160 |
+
opts = np.get_printoptions()
|
| 1161 |
+
try:
|
| 1162 |
+
with np.printoptions(precision=2, linewidth=11):
|
| 1163 |
+
raise ValueError
|
| 1164 |
+
except ValueError:
|
| 1165 |
+
pass
|
| 1166 |
+
assert_equal(np.get_printoptions(), opts)
|
| 1167 |
+
|
| 1168 |
+
def test_ctx_mgr_as_smth(self):
|
| 1169 |
+
opts = {"precision": 2}
|
| 1170 |
+
with np.printoptions(**opts) as ctx:
|
| 1171 |
+
saved_opts = ctx.copy()
|
| 1172 |
+
assert_equal({k: saved_opts[k] for k in opts}, opts)
|
| 1173 |
+
|
| 1174 |
+
|
| 1175 |
+
@pytest.mark.parametrize("dtype", "bhilqpBHILQPefdgFDG")
|
| 1176 |
+
@pytest.mark.parametrize("value", [0, 1])
|
| 1177 |
+
def test_scalar_repr_numbers(dtype, value):
|
| 1178 |
+
# Test NEP 51 scalar repr (and legacy option) for numeric types
|
| 1179 |
+
dtype = np.dtype(dtype)
|
| 1180 |
+
scalar = np.array(value, dtype=dtype)[()]
|
| 1181 |
+
assert isinstance(scalar, np.generic)
|
| 1182 |
+
|
| 1183 |
+
string = str(scalar)
|
| 1184 |
+
repr_string = string.strip("()") # complex may have extra brackets
|
| 1185 |
+
representation = repr(scalar)
|
| 1186 |
+
if dtype.char == "g":
|
| 1187 |
+
assert representation == f"np.longdouble('{repr_string}')"
|
| 1188 |
+
elif dtype.char == 'G':
|
| 1189 |
+
assert representation == f"np.clongdouble('{repr_string}')"
|
| 1190 |
+
else:
|
| 1191 |
+
normalized_name = np.dtype(f"{dtype.kind}{dtype.itemsize}").type.__name__
|
| 1192 |
+
assert representation == f"np.{normalized_name}({repr_string})"
|
| 1193 |
+
|
| 1194 |
+
with np.printoptions(legacy="1.25"):
|
| 1195 |
+
assert repr(scalar) == string
|
| 1196 |
+
|
| 1197 |
+
|
| 1198 |
+
@pytest.mark.parametrize("scalar, legacy_repr, representation", [
|
| 1199 |
+
(np.True_, "True", "np.True_"),
|
| 1200 |
+
(np.bytes_(b'a'), "b'a'", "np.bytes_(b'a')"),
|
| 1201 |
+
(np.str_('a'), "'a'", "np.str_('a')"),
|
| 1202 |
+
(np.datetime64("2012"),
|
| 1203 |
+
"numpy.datetime64('2012')", "np.datetime64('2012')"),
|
| 1204 |
+
(np.timedelta64(1), "numpy.timedelta64(1)", "np.timedelta64(1)"),
|
| 1205 |
+
(np.void((True, 2), dtype="?,<i8"),
|
| 1206 |
+
"(True, 2)",
|
| 1207 |
+
"np.void((True, 2), dtype=[('f0', '?'), ('f1', '<i8')])"),
|
| 1208 |
+
(np.void((1, 2), dtype="<f8,>f4"),
|
| 1209 |
+
"(1., 2.)",
|
| 1210 |
+
"np.void((1.0, 2.0), dtype=[('f0', '<f8'), ('f1', '>f4')])"),
|
| 1211 |
+
(np.void(b'a'), r"void(b'\x61')", r"np.void(b'\x61')"),
|
| 1212 |
+
])
|
| 1213 |
+
def test_scalar_repr_special(scalar, legacy_repr, representation):
|
| 1214 |
+
# Test NEP 51 scalar repr (and legacy option) for numeric types
|
| 1215 |
+
assert repr(scalar) == representation
|
| 1216 |
+
|
| 1217 |
+
with np.printoptions(legacy="1.25"):
|
| 1218 |
+
assert repr(scalar) == legacy_repr
|
| 1219 |
+
|
| 1220 |
+
def test_scalar_void_float_str():
|
| 1221 |
+
# Note that based on this currently we do not print the same as a tuple
|
| 1222 |
+
# would, since the tuple would include the repr() inside for floats, but
|
| 1223 |
+
# we do not do that.
|
| 1224 |
+
scalar = np.void((1.0, 2.0), dtype=[('f0', '<f8'), ('f1', '>f4')])
|
| 1225 |
+
assert str(scalar) == "(1.0, 2.0)"
|
| 1226 |
+
|
| 1227 |
+
@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio")
|
| 1228 |
+
@pytest.mark.skipif(sys.version_info < (3, 11),
|
| 1229 |
+
reason="asyncio.barrier was added in Python 3.11")
|
| 1230 |
+
def test_printoptions_asyncio_safe():
|
| 1231 |
+
asyncio = pytest.importorskip("asyncio")
|
| 1232 |
+
|
| 1233 |
+
b = asyncio.Barrier(2)
|
| 1234 |
+
|
| 1235 |
+
async def legacy_113():
|
| 1236 |
+
np.set_printoptions(legacy='1.13', precision=12)
|
| 1237 |
+
await b.wait()
|
| 1238 |
+
po = np.get_printoptions()
|
| 1239 |
+
assert po['legacy'] == '1.13'
|
| 1240 |
+
assert po['precision'] == 12
|
| 1241 |
+
orig_linewidth = po['linewidth']
|
| 1242 |
+
with np.printoptions(linewidth=34, legacy='1.21'):
|
| 1243 |
+
po = np.get_printoptions()
|
| 1244 |
+
assert po['legacy'] == '1.21'
|
| 1245 |
+
assert po['precision'] == 12
|
| 1246 |
+
assert po['linewidth'] == 34
|
| 1247 |
+
po = np.get_printoptions()
|
| 1248 |
+
assert po['linewidth'] == orig_linewidth
|
| 1249 |
+
assert po['legacy'] == '1.13'
|
| 1250 |
+
assert po['precision'] == 12
|
| 1251 |
+
|
| 1252 |
+
async def legacy_125():
|
| 1253 |
+
np.set_printoptions(legacy='1.25', precision=7)
|
| 1254 |
+
await b.wait()
|
| 1255 |
+
po = np.get_printoptions()
|
| 1256 |
+
assert po['legacy'] == '1.25'
|
| 1257 |
+
assert po['precision'] == 7
|
| 1258 |
+
orig_linewidth = po['linewidth']
|
| 1259 |
+
with np.printoptions(linewidth=6, legacy='1.13'):
|
| 1260 |
+
po = np.get_printoptions()
|
| 1261 |
+
assert po['legacy'] == '1.13'
|
| 1262 |
+
assert po['precision'] == 7
|
| 1263 |
+
assert po['linewidth'] == 6
|
| 1264 |
+
po = np.get_printoptions()
|
| 1265 |
+
assert po['linewidth'] == orig_linewidth
|
| 1266 |
+
assert po['legacy'] == '1.25'
|
| 1267 |
+
assert po['precision'] == 7
|
| 1268 |
+
|
| 1269 |
+
async def main():
|
| 1270 |
+
await asyncio.gather(legacy_125(), legacy_125())
|
| 1271 |
+
|
| 1272 |
+
loop = asyncio.new_event_loop()
|
| 1273 |
+
asyncio.run(main())
|
| 1274 |
+
loop.close()
|
| 1275 |
+
|
| 1276 |
+
@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support threads")
|
| 1277 |
+
def test_multithreaded_array_printing():
|
| 1278 |
+
# the dragon4 implementation uses a static scratch space for performance
|
| 1279 |
+
# reasons this test makes sure it is set up in a thread-safe manner
|
| 1280 |
+
|
| 1281 |
+
run_threaded(TestPrintOptions().test_floatmode, 500)
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from pytest import param
|
| 3 |
+
from numpy.testing import IS_WASM
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def values_and_dtypes():
|
| 8 |
+
"""
|
| 9 |
+
Generate value+dtype pairs that generate floating point errors during
|
| 10 |
+
casts. The invalid casts to integers will generate "invalid" value
|
| 11 |
+
warnings, the float casts all generate "overflow".
|
| 12 |
+
|
| 13 |
+
(The Python int/float paths don't need to get tested in all the same
|
| 14 |
+
situations, but it does not hurt.)
|
| 15 |
+
"""
|
| 16 |
+
# Casting to float16:
|
| 17 |
+
yield param(70000, "float16", id="int-to-f2")
|
| 18 |
+
yield param("70000", "float16", id="str-to-f2")
|
| 19 |
+
yield param(70000.0, "float16", id="float-to-f2")
|
| 20 |
+
yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2")
|
| 21 |
+
yield param(np.float64(70000.), "float16", id="double-to-f2")
|
| 22 |
+
yield param(np.float32(70000.), "float16", id="float-to-f2")
|
| 23 |
+
# Casting to float32:
|
| 24 |
+
yield param(10**100, "float32", id="int-to-f4")
|
| 25 |
+
yield param(1e100, "float32", id="float-to-f2")
|
| 26 |
+
yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2")
|
| 27 |
+
yield param(np.float64(1e300), "float32", id="double-to-f2")
|
| 28 |
+
# Casting to float64:
|
| 29 |
+
# If longdouble is double-double, its max can be rounded down to the double
|
| 30 |
+
# max. So we correct the double spacing (a bit weird, admittedly):
|
| 31 |
+
max_ld = np.finfo(np.longdouble).max
|
| 32 |
+
spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0))
|
| 33 |
+
if max_ld - spacing > np.finfo("f8").max:
|
| 34 |
+
yield param(np.finfo(np.longdouble).max, "float64",
|
| 35 |
+
id="longdouble-to-f8")
|
| 36 |
+
|
| 37 |
+
# Cast to complex32:
|
| 38 |
+
yield param(2e300, "complex64", id="float-to-c8")
|
| 39 |
+
yield param(2e300+0j, "complex64", id="complex-to-c8")
|
| 40 |
+
yield param(2e300j, "complex64", id="complex-to-c8")
|
| 41 |
+
yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
|
| 42 |
+
|
| 43 |
+
# Invalid float to integer casts:
|
| 44 |
+
with np.errstate(over="ignore"):
|
| 45 |
+
for to_dt in np.typecodes["AllInteger"]:
|
| 46 |
+
for value in [np.inf, np.nan]:
|
| 47 |
+
for from_dt in np.typecodes["AllFloat"]:
|
| 48 |
+
from_dt = np.dtype(from_dt)
|
| 49 |
+
from_val = from_dt.type(value)
|
| 50 |
+
|
| 51 |
+
yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def check_operations(dtype, value):
|
| 55 |
+
"""
|
| 56 |
+
There are many dedicated paths in NumPy which cast and should check for
|
| 57 |
+
floating point errors which occurred during those casts.
|
| 58 |
+
"""
|
| 59 |
+
if dtype.kind != 'i':
|
| 60 |
+
# These assignments use the stricter setitem logic:
|
| 61 |
+
def assignment():
|
| 62 |
+
arr = np.empty(3, dtype=dtype)
|
| 63 |
+
arr[0] = value
|
| 64 |
+
|
| 65 |
+
yield assignment
|
| 66 |
+
|
| 67 |
+
def fill():
|
| 68 |
+
arr = np.empty(3, dtype=dtype)
|
| 69 |
+
arr.fill(value)
|
| 70 |
+
|
| 71 |
+
yield fill
|
| 72 |
+
|
| 73 |
+
def copyto_scalar():
|
| 74 |
+
arr = np.empty(3, dtype=dtype)
|
| 75 |
+
np.copyto(arr, value, casting="unsafe")
|
| 76 |
+
|
| 77 |
+
yield copyto_scalar
|
| 78 |
+
|
| 79 |
+
def copyto():
|
| 80 |
+
arr = np.empty(3, dtype=dtype)
|
| 81 |
+
np.copyto(arr, np.array([value, value, value]), casting="unsafe")
|
| 82 |
+
|
| 83 |
+
yield copyto
|
| 84 |
+
|
| 85 |
+
def copyto_scalar_masked():
|
| 86 |
+
arr = np.empty(3, dtype=dtype)
|
| 87 |
+
np.copyto(arr, value, casting="unsafe",
|
| 88 |
+
where=[True, False, True])
|
| 89 |
+
|
| 90 |
+
yield copyto_scalar_masked
|
| 91 |
+
|
| 92 |
+
def copyto_masked():
|
| 93 |
+
arr = np.empty(3, dtype=dtype)
|
| 94 |
+
np.copyto(arr, np.array([value, value, value]), casting="unsafe",
|
| 95 |
+
where=[True, False, True])
|
| 96 |
+
|
| 97 |
+
yield copyto_masked
|
| 98 |
+
|
| 99 |
+
def direct_cast():
|
| 100 |
+
np.array([value, value, value]).astype(dtype)
|
| 101 |
+
|
| 102 |
+
yield direct_cast
|
| 103 |
+
|
| 104 |
+
def direct_cast_nd_strided():
|
| 105 |
+
arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
|
| 106 |
+
arr.astype(dtype)
|
| 107 |
+
|
| 108 |
+
yield direct_cast_nd_strided
|
| 109 |
+
|
| 110 |
+
def boolean_array_assignment():
|
| 111 |
+
arr = np.empty(3, dtype=dtype)
|
| 112 |
+
arr[[True, False, True]] = np.array([value, value])
|
| 113 |
+
|
| 114 |
+
yield boolean_array_assignment
|
| 115 |
+
|
| 116 |
+
def integer_array_assignment():
|
| 117 |
+
arr = np.empty(3, dtype=dtype)
|
| 118 |
+
values = np.array([value, value])
|
| 119 |
+
|
| 120 |
+
arr[[0, 1]] = values
|
| 121 |
+
|
| 122 |
+
yield integer_array_assignment
|
| 123 |
+
|
| 124 |
+
def integer_array_assignment_with_subspace():
|
| 125 |
+
arr = np.empty((5, 3), dtype=dtype)
|
| 126 |
+
values = np.array([value, value, value])
|
| 127 |
+
|
| 128 |
+
arr[[0, 2]] = values
|
| 129 |
+
|
| 130 |
+
yield integer_array_assignment_with_subspace
|
| 131 |
+
|
| 132 |
+
def flat_assignment():
|
| 133 |
+
arr = np.empty((3,), dtype=dtype)
|
| 134 |
+
values = np.array([value, value, value])
|
| 135 |
+
arr.flat[:] = values
|
| 136 |
+
|
| 137 |
+
yield flat_assignment
|
| 138 |
+
|
| 139 |
+
@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
|
| 140 |
+
@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
|
| 141 |
+
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
|
| 142 |
+
def test_floatingpoint_errors_casting(dtype, value):
|
| 143 |
+
dtype = np.dtype(dtype)
|
| 144 |
+
for operation in check_operations(dtype, value):
|
| 145 |
+
dtype = np.dtype(dtype)
|
| 146 |
+
|
| 147 |
+
match = "invalid" if dtype.kind in 'iu' else "overflow"
|
| 148 |
+
with pytest.warns(RuntimeWarning, match=match):
|
| 149 |
+
operation()
|
| 150 |
+
|
| 151 |
+
with np.errstate(all="raise"):
|
| 152 |
+
with pytest.raises(FloatingPointError, match=match):
|
| 153 |
+
operation()
|
| 154 |
+
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_conversion_utils.py
ADDED
|
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for numpy/_core/src/multiarray/conversion_utils.c
|
| 3 |
+
"""
|
| 4 |
+
import re
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import numpy._core._multiarray_tests as mt
|
| 11 |
+
from numpy._core.multiarray import CLIP, WRAP, RAISE
|
| 12 |
+
from numpy.testing import assert_warns, IS_PYPY
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class StringConverterTestCase:
|
| 16 |
+
allow_bytes = True
|
| 17 |
+
case_insensitive = True
|
| 18 |
+
exact_match = False
|
| 19 |
+
warn = True
|
| 20 |
+
|
| 21 |
+
def _check_value_error(self, val):
|
| 22 |
+
pattern = r'\(got {}\)'.format(re.escape(repr(val)))
|
| 23 |
+
with pytest.raises(ValueError, match=pattern) as exc:
|
| 24 |
+
self.conv(val)
|
| 25 |
+
|
| 26 |
+
def _check_conv_assert_warn(self, val, expected):
|
| 27 |
+
if self.warn:
|
| 28 |
+
with assert_warns(DeprecationWarning) as exc:
|
| 29 |
+
assert self.conv(val) == expected
|
| 30 |
+
else:
|
| 31 |
+
assert self.conv(val) == expected
|
| 32 |
+
|
| 33 |
+
def _check(self, val, expected):
|
| 34 |
+
"""Takes valid non-deprecated inputs for converters,
|
| 35 |
+
runs converters on inputs, checks correctness of outputs,
|
| 36 |
+
warnings and errors"""
|
| 37 |
+
assert self.conv(val) == expected
|
| 38 |
+
|
| 39 |
+
if self.allow_bytes:
|
| 40 |
+
assert self.conv(val.encode('ascii')) == expected
|
| 41 |
+
else:
|
| 42 |
+
with pytest.raises(TypeError):
|
| 43 |
+
self.conv(val.encode('ascii'))
|
| 44 |
+
|
| 45 |
+
if len(val) != 1:
|
| 46 |
+
if self.exact_match:
|
| 47 |
+
self._check_value_error(val[:1])
|
| 48 |
+
self._check_value_error(val + '\0')
|
| 49 |
+
else:
|
| 50 |
+
self._check_conv_assert_warn(val[:1], expected)
|
| 51 |
+
|
| 52 |
+
if self.case_insensitive:
|
| 53 |
+
if val != val.lower():
|
| 54 |
+
self._check_conv_assert_warn(val.lower(), expected)
|
| 55 |
+
if val != val.upper():
|
| 56 |
+
self._check_conv_assert_warn(val.upper(), expected)
|
| 57 |
+
else:
|
| 58 |
+
if val != val.lower():
|
| 59 |
+
self._check_value_error(val.lower())
|
| 60 |
+
if val != val.upper():
|
| 61 |
+
self._check_value_error(val.upper())
|
| 62 |
+
|
| 63 |
+
def test_wrong_type(self):
|
| 64 |
+
# common cases which apply to all the below
|
| 65 |
+
with pytest.raises(TypeError):
|
| 66 |
+
self.conv({})
|
| 67 |
+
with pytest.raises(TypeError):
|
| 68 |
+
self.conv([])
|
| 69 |
+
|
| 70 |
+
def test_wrong_value(self):
|
| 71 |
+
# nonsense strings
|
| 72 |
+
self._check_value_error('')
|
| 73 |
+
self._check_value_error('\N{greek small letter pi}')
|
| 74 |
+
|
| 75 |
+
if self.allow_bytes:
|
| 76 |
+
self._check_value_error(b'')
|
| 77 |
+
# bytes which can't be converted to strings via utf8
|
| 78 |
+
self._check_value_error(b"\xFF")
|
| 79 |
+
if self.exact_match:
|
| 80 |
+
self._check_value_error("there's no way this is supported")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class TestByteorderConverter(StringConverterTestCase):
|
| 84 |
+
""" Tests of PyArray_ByteorderConverter """
|
| 85 |
+
conv = mt.run_byteorder_converter
|
| 86 |
+
warn = False
|
| 87 |
+
|
| 88 |
+
def test_valid(self):
|
| 89 |
+
for s in ['big', '>']:
|
| 90 |
+
self._check(s, 'NPY_BIG')
|
| 91 |
+
for s in ['little', '<']:
|
| 92 |
+
self._check(s, 'NPY_LITTLE')
|
| 93 |
+
for s in ['native', '=']:
|
| 94 |
+
self._check(s, 'NPY_NATIVE')
|
| 95 |
+
for s in ['ignore', '|']:
|
| 96 |
+
self._check(s, 'NPY_IGNORE')
|
| 97 |
+
for s in ['swap']:
|
| 98 |
+
self._check(s, 'NPY_SWAP')
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class TestSortkindConverter(StringConverterTestCase):
|
| 102 |
+
""" Tests of PyArray_SortkindConverter """
|
| 103 |
+
conv = mt.run_sortkind_converter
|
| 104 |
+
warn = False
|
| 105 |
+
|
| 106 |
+
def test_valid(self):
|
| 107 |
+
self._check('quicksort', 'NPY_QUICKSORT')
|
| 108 |
+
self._check('heapsort', 'NPY_HEAPSORT')
|
| 109 |
+
self._check('mergesort', 'NPY_STABLESORT') # alias
|
| 110 |
+
self._check('stable', 'NPY_STABLESORT')
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
class TestSelectkindConverter(StringConverterTestCase):
|
| 114 |
+
""" Tests of PyArray_SelectkindConverter """
|
| 115 |
+
conv = mt.run_selectkind_converter
|
| 116 |
+
case_insensitive = False
|
| 117 |
+
exact_match = True
|
| 118 |
+
|
| 119 |
+
def test_valid(self):
|
| 120 |
+
self._check('introselect', 'NPY_INTROSELECT')
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class TestSearchsideConverter(StringConverterTestCase):
|
| 124 |
+
""" Tests of PyArray_SearchsideConverter """
|
| 125 |
+
conv = mt.run_searchside_converter
|
| 126 |
+
def test_valid(self):
|
| 127 |
+
self._check('left', 'NPY_SEARCHLEFT')
|
| 128 |
+
self._check('right', 'NPY_SEARCHRIGHT')
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class TestOrderConverter(StringConverterTestCase):
|
| 132 |
+
""" Tests of PyArray_OrderConverter """
|
| 133 |
+
conv = mt.run_order_converter
|
| 134 |
+
warn = False
|
| 135 |
+
|
| 136 |
+
def test_valid(self):
|
| 137 |
+
self._check('c', 'NPY_CORDER')
|
| 138 |
+
self._check('f', 'NPY_FORTRANORDER')
|
| 139 |
+
self._check('a', 'NPY_ANYORDER')
|
| 140 |
+
self._check('k', 'NPY_KEEPORDER')
|
| 141 |
+
|
| 142 |
+
def test_flatten_invalid_order(self):
|
| 143 |
+
# invalid after gh-14596
|
| 144 |
+
with pytest.raises(ValueError):
|
| 145 |
+
self.conv('Z')
|
| 146 |
+
for order in [False, True, 0, 8]:
|
| 147 |
+
with pytest.raises(TypeError):
|
| 148 |
+
self.conv(order)
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
class TestClipmodeConverter(StringConverterTestCase):
|
| 152 |
+
""" Tests of PyArray_ClipmodeConverter """
|
| 153 |
+
conv = mt.run_clipmode_converter
|
| 154 |
+
def test_valid(self):
|
| 155 |
+
self._check('clip', 'NPY_CLIP')
|
| 156 |
+
self._check('wrap', 'NPY_WRAP')
|
| 157 |
+
self._check('raise', 'NPY_RAISE')
|
| 158 |
+
|
| 159 |
+
# integer values allowed here
|
| 160 |
+
assert self.conv(CLIP) == 'NPY_CLIP'
|
| 161 |
+
assert self.conv(WRAP) == 'NPY_WRAP'
|
| 162 |
+
assert self.conv(RAISE) == 'NPY_RAISE'
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
class TestCastingConverter(StringConverterTestCase):
|
| 166 |
+
""" Tests of PyArray_CastingConverter """
|
| 167 |
+
conv = mt.run_casting_converter
|
| 168 |
+
case_insensitive = False
|
| 169 |
+
exact_match = True
|
| 170 |
+
|
| 171 |
+
def test_valid(self):
|
| 172 |
+
self._check("no", "NPY_NO_CASTING")
|
| 173 |
+
self._check("equiv", "NPY_EQUIV_CASTING")
|
| 174 |
+
self._check("safe", "NPY_SAFE_CASTING")
|
| 175 |
+
self._check("same_kind", "NPY_SAME_KIND_CASTING")
|
| 176 |
+
self._check("unsafe", "NPY_UNSAFE_CASTING")
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
class TestIntpConverter:
|
| 180 |
+
""" Tests of PyArray_IntpConverter """
|
| 181 |
+
conv = mt.run_intp_converter
|
| 182 |
+
|
| 183 |
+
def test_basic(self):
|
| 184 |
+
assert self.conv(1) == (1,)
|
| 185 |
+
assert self.conv((1, 2)) == (1, 2)
|
| 186 |
+
assert self.conv([1, 2]) == (1, 2)
|
| 187 |
+
assert self.conv(()) == ()
|
| 188 |
+
|
| 189 |
+
def test_none(self):
|
| 190 |
+
# once the warning expires, this will raise TypeError
|
| 191 |
+
with pytest.warns(DeprecationWarning):
|
| 192 |
+
assert self.conv(None) == ()
|
| 193 |
+
|
| 194 |
+
@pytest.mark.skipif(IS_PYPY and sys.implementation.version <= (7, 3, 8),
|
| 195 |
+
reason="PyPy bug in error formatting")
|
| 196 |
+
def test_float(self):
|
| 197 |
+
with pytest.raises(TypeError):
|
| 198 |
+
self.conv(1.0)
|
| 199 |
+
with pytest.raises(TypeError):
|
| 200 |
+
self.conv([1, 1.0])
|
| 201 |
+
|
| 202 |
+
def test_too_large(self):
|
| 203 |
+
with pytest.raises(ValueError):
|
| 204 |
+
self.conv(2**64)
|
| 205 |
+
|
| 206 |
+
def test_too_many_dims(self):
|
| 207 |
+
assert self.conv([1]*64) == (1,)*64
|
| 208 |
+
with pytest.raises(ValueError):
|
| 209 |
+
self.conv([1]*65)
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_custom_dtypes.py
ADDED
|
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from tempfile import NamedTemporaryFile
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numpy.testing import assert_array_equal
|
| 7 |
+
from numpy._core._multiarray_umath import (
|
| 8 |
+
_discover_array_parameters as discover_array_params, _get_sfloat_dtype)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
SF = _get_sfloat_dtype()
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestSFloat:
|
| 15 |
+
def _get_array(self, scaling, aligned=True):
|
| 16 |
+
if not aligned:
|
| 17 |
+
a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
|
| 18 |
+
a = a.view(np.float64)
|
| 19 |
+
a[:] = [1., 2., 3.]
|
| 20 |
+
else:
|
| 21 |
+
a = np.array([1., 2., 3.])
|
| 22 |
+
|
| 23 |
+
a *= 1./scaling # the casting code also uses the reciprocal.
|
| 24 |
+
return a.view(SF(scaling))
|
| 25 |
+
|
| 26 |
+
def test_sfloat_rescaled(self):
|
| 27 |
+
sf = SF(1.)
|
| 28 |
+
sf2 = sf.scaled_by(2.)
|
| 29 |
+
assert sf2.get_scaling() == 2.
|
| 30 |
+
sf6 = sf2.scaled_by(3.)
|
| 31 |
+
assert sf6.get_scaling() == 6.
|
| 32 |
+
|
| 33 |
+
def test_class_discovery(self):
|
| 34 |
+
# This does not test much, since we always discover the scaling as 1.
|
| 35 |
+
# But most of NumPy (when writing) does not understand DType classes
|
| 36 |
+
dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
|
| 37 |
+
assert dt == SF(1.)
|
| 38 |
+
|
| 39 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 40 |
+
def test_scaled_float_from_floats(self, scaling):
|
| 41 |
+
a = np.array([1., 2., 3.], dtype=SF(scaling))
|
| 42 |
+
|
| 43 |
+
assert a.dtype.get_scaling() == scaling
|
| 44 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
| 45 |
+
|
| 46 |
+
def test_repr(self):
|
| 47 |
+
# Check the repr, mainly to cover the code paths:
|
| 48 |
+
assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
|
| 49 |
+
|
| 50 |
+
def test_dtype_name(self):
|
| 51 |
+
assert SF(1.).name == "_ScaledFloatTestDType64"
|
| 52 |
+
|
| 53 |
+
def test_sfloat_structured_dtype_printing(self):
|
| 54 |
+
dt = np.dtype([("id", int), ("value", SF(0.5))])
|
| 55 |
+
# repr of structured dtypes need special handling because the
|
| 56 |
+
# implementation bypasses the object repr
|
| 57 |
+
assert "('value', '_ScaledFloatTestDType64')" in repr(dt)
|
| 58 |
+
|
| 59 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 60 |
+
def test_sfloat_from_float(self, scaling):
|
| 61 |
+
a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
|
| 62 |
+
|
| 63 |
+
assert a.dtype.get_scaling() == scaling
|
| 64 |
+
assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 67 |
+
@pytest.mark.parametrize("scaling", [1., -1., 2.])
|
| 68 |
+
def test_sfloat_getitem(self, aligned, scaling):
|
| 69 |
+
a = self._get_array(1., aligned)
|
| 70 |
+
assert a.tolist() == [1., 2., 3.]
|
| 71 |
+
|
| 72 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 73 |
+
def test_sfloat_casts(self, aligned):
|
| 74 |
+
a = self._get_array(1., aligned)
|
| 75 |
+
|
| 76 |
+
assert np.can_cast(a, SF(-1.), casting="equiv")
|
| 77 |
+
assert not np.can_cast(a, SF(-1.), casting="no")
|
| 78 |
+
na = a.astype(SF(-1.))
|
| 79 |
+
assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
|
| 80 |
+
|
| 81 |
+
assert np.can_cast(a, SF(2.), casting="same_kind")
|
| 82 |
+
assert not np.can_cast(a, SF(2.), casting="safe")
|
| 83 |
+
a2 = a.astype(SF(2.))
|
| 84 |
+
assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
|
| 85 |
+
|
| 86 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 87 |
+
def test_sfloat_cast_internal_errors(self, aligned):
|
| 88 |
+
a = self._get_array(2e300, aligned)
|
| 89 |
+
|
| 90 |
+
with pytest.raises(TypeError,
|
| 91 |
+
match="error raised inside the core-loop: non-finite factor!"):
|
| 92 |
+
a.astype(SF(2e-300))
|
| 93 |
+
|
| 94 |
+
def test_sfloat_promotion(self):
|
| 95 |
+
assert np.result_type(SF(2.), SF(3.)) == SF(3.)
|
| 96 |
+
assert np.result_type(SF(3.), SF(2.)) == SF(3.)
|
| 97 |
+
# Float64 -> SF(1.) and then promotes normally, so both of this work:
|
| 98 |
+
assert np.result_type(SF(3.), np.float64) == SF(3.)
|
| 99 |
+
assert np.result_type(np.float64, SF(0.5)) == SF(1.)
|
| 100 |
+
|
| 101 |
+
# Test an undefined promotion:
|
| 102 |
+
with pytest.raises(TypeError):
|
| 103 |
+
np.result_type(SF(1.), np.int64)
|
| 104 |
+
|
| 105 |
+
def test_basic_multiply(self):
|
| 106 |
+
a = self._get_array(2.)
|
| 107 |
+
b = self._get_array(4.)
|
| 108 |
+
|
| 109 |
+
res = a * b
|
| 110 |
+
# multiplies dtype scaling and content separately:
|
| 111 |
+
assert res.dtype.get_scaling() == 8.
|
| 112 |
+
expected_view = a.view(np.float64) * b.view(np.float64)
|
| 113 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
| 114 |
+
|
| 115 |
+
def test_possible_and_impossible_reduce(self):
|
| 116 |
+
# For reductions to work, the first and last operand must have the
|
| 117 |
+
# same dtype. For this parametric DType that is not necessarily true.
|
| 118 |
+
a = self._get_array(2.)
|
| 119 |
+
# Addition reduction works (as of writing requires to pass initial
|
| 120 |
+
# because setting a scaled-float from the default `0` fails).
|
| 121 |
+
res = np.add.reduce(a, initial=0.)
|
| 122 |
+
assert res == a.astype(np.float64).sum()
|
| 123 |
+
|
| 124 |
+
# But each multiplication changes the factor, so a reduction is not
|
| 125 |
+
# possible (the relaxed version of the old refusal to handle any
|
| 126 |
+
# flexible dtype).
|
| 127 |
+
with pytest.raises(TypeError,
|
| 128 |
+
match="the resolved dtypes are not compatible"):
|
| 129 |
+
np.multiply.reduce(a)
|
| 130 |
+
|
| 131 |
+
def test_basic_ufunc_at(self):
|
| 132 |
+
float_a = np.array([1., 2., 3.])
|
| 133 |
+
b = self._get_array(2.)
|
| 134 |
+
|
| 135 |
+
float_b = b.view(np.float64).copy()
|
| 136 |
+
np.multiply.at(float_b, [1, 1, 1], float_a)
|
| 137 |
+
np.multiply.at(b, [1, 1, 1], float_a)
|
| 138 |
+
|
| 139 |
+
assert_array_equal(b.view(np.float64), float_b)
|
| 140 |
+
|
| 141 |
+
def test_basic_multiply_promotion(self):
|
| 142 |
+
float_a = np.array([1., 2., 3.])
|
| 143 |
+
b = self._get_array(2.)
|
| 144 |
+
|
| 145 |
+
res1 = float_a * b
|
| 146 |
+
res2 = b * float_a
|
| 147 |
+
|
| 148 |
+
# one factor is one, so we get the factor of b:
|
| 149 |
+
assert res1.dtype == res2.dtype == b.dtype
|
| 150 |
+
expected_view = float_a * b.view(np.float64)
|
| 151 |
+
assert_array_equal(res1.view(np.float64), expected_view)
|
| 152 |
+
assert_array_equal(res2.view(np.float64), expected_view)
|
| 153 |
+
|
| 154 |
+
# Check that promotion works when `out` is used:
|
| 155 |
+
np.multiply(b, float_a, out=res2)
|
| 156 |
+
with pytest.raises(TypeError):
|
| 157 |
+
# The promoter accepts this (maybe it should not), but the SFloat
|
| 158 |
+
# result cannot be cast to integer:
|
| 159 |
+
np.multiply(b, float_a, out=np.arange(3))
|
| 160 |
+
|
| 161 |
+
def test_basic_addition(self):
|
| 162 |
+
a = self._get_array(2.)
|
| 163 |
+
b = self._get_array(4.)
|
| 164 |
+
|
| 165 |
+
res = a + b
|
| 166 |
+
# addition uses the type promotion rules for the result:
|
| 167 |
+
assert res.dtype == np.result_type(a.dtype, b.dtype)
|
| 168 |
+
expected_view = (a.astype(res.dtype).view(np.float64) +
|
| 169 |
+
b.astype(res.dtype).view(np.float64))
|
| 170 |
+
assert_array_equal(res.view(np.float64), expected_view)
|
| 171 |
+
|
| 172 |
+
def test_addition_cast_safety(self):
|
| 173 |
+
"""The addition method is special for the scaled float, because it
|
| 174 |
+
includes the "cast" between different factors, thus cast-safety
|
| 175 |
+
is influenced by the implementation.
|
| 176 |
+
"""
|
| 177 |
+
a = self._get_array(2.)
|
| 178 |
+
b = self._get_array(-2.)
|
| 179 |
+
c = self._get_array(3.)
|
| 180 |
+
|
| 181 |
+
# sign change is "equiv":
|
| 182 |
+
np.add(a, b, casting="equiv")
|
| 183 |
+
with pytest.raises(TypeError):
|
| 184 |
+
np.add(a, b, casting="no")
|
| 185 |
+
|
| 186 |
+
# Different factor is "same_kind" (default) so check that "safe" fails
|
| 187 |
+
with pytest.raises(TypeError):
|
| 188 |
+
np.add(a, c, casting="safe")
|
| 189 |
+
|
| 190 |
+
# Check that casting the output fails also (done by the ufunc here)
|
| 191 |
+
with pytest.raises(TypeError):
|
| 192 |
+
np.add(a, a, out=c, casting="safe")
|
| 193 |
+
|
| 194 |
+
@pytest.mark.parametrize("ufunc",
|
| 195 |
+
[np.logical_and, np.logical_or, np.logical_xor])
|
| 196 |
+
def test_logical_ufuncs_casts_to_bool(self, ufunc):
|
| 197 |
+
a = self._get_array(2.)
|
| 198 |
+
a[0] = 0. # make sure first element is considered False.
|
| 199 |
+
|
| 200 |
+
float_equiv = a.astype(float)
|
| 201 |
+
expected = ufunc(float_equiv, float_equiv)
|
| 202 |
+
res = ufunc(a, a)
|
| 203 |
+
assert_array_equal(res, expected)
|
| 204 |
+
|
| 205 |
+
# also check that the same works for reductions:
|
| 206 |
+
expected = ufunc.reduce(float_equiv)
|
| 207 |
+
res = ufunc.reduce(a)
|
| 208 |
+
assert_array_equal(res, expected)
|
| 209 |
+
|
| 210 |
+
# The output casting does not match the bool, bool -> bool loop:
|
| 211 |
+
with pytest.raises(TypeError):
|
| 212 |
+
ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
|
| 213 |
+
|
| 214 |
+
def test_wrapped_and_wrapped_reductions(self):
|
| 215 |
+
a = self._get_array(2.)
|
| 216 |
+
float_equiv = a.astype(float)
|
| 217 |
+
|
| 218 |
+
expected = np.hypot(float_equiv, float_equiv)
|
| 219 |
+
res = np.hypot(a, a)
|
| 220 |
+
assert res.dtype == a.dtype
|
| 221 |
+
res_float = res.view(np.float64) * 2
|
| 222 |
+
assert_array_equal(res_float, expected)
|
| 223 |
+
|
| 224 |
+
# Also check reduction (keepdims, due to incorrect getitem)
|
| 225 |
+
res = np.hypot.reduce(a, keepdims=True)
|
| 226 |
+
assert res.dtype == a.dtype
|
| 227 |
+
expected = np.hypot.reduce(float_equiv, keepdims=True)
|
| 228 |
+
assert res.view(np.float64) * 2 == expected
|
| 229 |
+
|
| 230 |
+
def test_astype_class(self):
|
| 231 |
+
# Very simple test that we accept `.astype()` also on the class.
|
| 232 |
+
# ScaledFloat always returns the default descriptor, but it does
|
| 233 |
+
# check the relevant code paths.
|
| 234 |
+
arr = np.array([1., 2., 3.], dtype=object)
|
| 235 |
+
|
| 236 |
+
res = arr.astype(SF) # passing the class class
|
| 237 |
+
expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
|
| 238 |
+
assert_array_equal(res.view(np.float64), expected.view(np.float64))
|
| 239 |
+
|
| 240 |
+
def test_creation_class(self):
|
| 241 |
+
# passing in a dtype class should return
|
| 242 |
+
# the default descriptor
|
| 243 |
+
arr1 = np.array([1., 2., 3.], dtype=SF)
|
| 244 |
+
assert arr1.dtype == SF(1.)
|
| 245 |
+
arr2 = np.array([1., 2., 3.], dtype=SF(1.))
|
| 246 |
+
assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
|
| 247 |
+
assert arr1.dtype == arr2.dtype
|
| 248 |
+
|
| 249 |
+
assert np.empty(3, dtype=SF).dtype == SF(1.)
|
| 250 |
+
assert np.empty_like(arr1, dtype=SF).dtype == SF(1.)
|
| 251 |
+
assert np.zeros(3, dtype=SF).dtype == SF(1.)
|
| 252 |
+
assert np.zeros_like(arr1, dtype=SF).dtype == SF(1.)
|
| 253 |
+
|
| 254 |
+
def test_np_save_load(self):
|
| 255 |
+
# this monkeypatch is needed because pickle
|
| 256 |
+
# uses the repr of a type to reconstruct it
|
| 257 |
+
np._ScaledFloatTestDType = SF
|
| 258 |
+
|
| 259 |
+
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
|
| 260 |
+
|
| 261 |
+
# adapted from RoundtripTest.roundtrip in np.save tests
|
| 262 |
+
with NamedTemporaryFile("wb", delete=False, suffix=".npz") as f:
|
| 263 |
+
with pytest.warns(UserWarning) as record:
|
| 264 |
+
np.savez(f.name, arr)
|
| 265 |
+
|
| 266 |
+
assert len(record) == 1
|
| 267 |
+
|
| 268 |
+
with np.load(f.name, allow_pickle=True) as data:
|
| 269 |
+
larr = data["arr_0"]
|
| 270 |
+
assert_array_equal(arr.view(np.float64), larr.view(np.float64))
|
| 271 |
+
assert larr.dtype == arr.dtype == SF(1.0)
|
| 272 |
+
|
| 273 |
+
del np._ScaledFloatTestDType
|
| 274 |
+
|
| 275 |
+
def test_flatiter(self):
|
| 276 |
+
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
|
| 277 |
+
|
| 278 |
+
for i, val in enumerate(arr.flat):
|
| 279 |
+
assert arr[i] == val
|
| 280 |
+
|
| 281 |
+
@pytest.mark.parametrize(
|
| 282 |
+
"index", [
|
| 283 |
+
[1, 2], ..., slice(None, 2, None),
|
| 284 |
+
np.array([True, True, False]), np.array([0, 1])
|
| 285 |
+
], ids=["int_list", "ellipsis", "slice", "bool_array", "int_array"])
|
| 286 |
+
def test_flatiter_index(self, index):
|
| 287 |
+
arr = np.array([1.0, 2.0, 3.0], dtype=SF(1.0))
|
| 288 |
+
np.testing.assert_array_equal(
|
| 289 |
+
arr[index].view(np.float64), arr.flat[index].view(np.float64))
|
| 290 |
+
|
| 291 |
+
arr2 = arr.copy()
|
| 292 |
+
arr[index] = 5.0
|
| 293 |
+
arr2.flat[index] = 5.0
|
| 294 |
+
np.testing.assert_array_equal(
|
| 295 |
+
arr.view(np.float64), arr2.view(np.float64))
|
| 296 |
+
|
| 297 |
+
def test_type_pickle():
|
| 298 |
+
# can't actually unpickle, but we can pickle (if in namespace)
|
| 299 |
+
import pickle
|
| 300 |
+
|
| 301 |
+
np._ScaledFloatTestDType = SF
|
| 302 |
+
|
| 303 |
+
s = pickle.dumps(SF)
|
| 304 |
+
res = pickle.loads(s)
|
| 305 |
+
assert res is SF
|
| 306 |
+
|
| 307 |
+
del np._ScaledFloatTestDType
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def test_is_numeric():
|
| 311 |
+
assert SF._is_numeric
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_datetime.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_dlpack.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_array_equal, IS_PYPY
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def new_and_old_dlpack():
|
| 9 |
+
yield np.arange(5)
|
| 10 |
+
|
| 11 |
+
class OldDLPack(np.ndarray):
|
| 12 |
+
# Support only the "old" version
|
| 13 |
+
def __dlpack__(self, stream=None):
|
| 14 |
+
return super().__dlpack__(stream=None)
|
| 15 |
+
|
| 16 |
+
yield np.arange(5).view(OldDLPack)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class TestDLPack:
|
| 20 |
+
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
|
| 21 |
+
@pytest.mark.parametrize("max_version", [(0, 0), None, (1, 0), (100, 3)])
|
| 22 |
+
def test_dunder_dlpack_refcount(self, max_version):
|
| 23 |
+
x = np.arange(5)
|
| 24 |
+
y = x.__dlpack__(max_version=max_version)
|
| 25 |
+
assert sys.getrefcount(x) == 3
|
| 26 |
+
del y
|
| 27 |
+
assert sys.getrefcount(x) == 2
|
| 28 |
+
|
| 29 |
+
def test_dunder_dlpack_stream(self):
|
| 30 |
+
x = np.arange(5)
|
| 31 |
+
x.__dlpack__(stream=None)
|
| 32 |
+
|
| 33 |
+
with pytest.raises(RuntimeError):
|
| 34 |
+
x.__dlpack__(stream=1)
|
| 35 |
+
|
| 36 |
+
def test_dunder_dlpack_copy(self):
|
| 37 |
+
# Checks the argument parsing of __dlpack__ explicitly.
|
| 38 |
+
# Honoring the flag is tested in the from_dlpack round-tripping test.
|
| 39 |
+
x = np.arange(5)
|
| 40 |
+
x.__dlpack__(copy=True)
|
| 41 |
+
x.__dlpack__(copy=None)
|
| 42 |
+
x.__dlpack__(copy=False)
|
| 43 |
+
|
| 44 |
+
with pytest.raises(ValueError):
|
| 45 |
+
# NOTE: The copy converter should be stricter, but not just here.
|
| 46 |
+
x.__dlpack__(copy=np.array([1, 2, 3]))
|
| 47 |
+
|
| 48 |
+
def test_strides_not_multiple_of_itemsize(self):
|
| 49 |
+
dt = np.dtype([('int', np.int32), ('char', np.int8)])
|
| 50 |
+
y = np.zeros((5,), dtype=dt)
|
| 51 |
+
z = y['int']
|
| 52 |
+
|
| 53 |
+
with pytest.raises(BufferError):
|
| 54 |
+
np.from_dlpack(z)
|
| 55 |
+
|
| 56 |
+
@pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
|
| 57 |
+
@pytest.mark.parametrize("arr", new_and_old_dlpack())
|
| 58 |
+
def test_from_dlpack_refcount(self, arr):
|
| 59 |
+
arr = arr.copy()
|
| 60 |
+
y = np.from_dlpack(arr)
|
| 61 |
+
assert sys.getrefcount(arr) == 3
|
| 62 |
+
del y
|
| 63 |
+
assert sys.getrefcount(arr) == 2
|
| 64 |
+
|
| 65 |
+
@pytest.mark.parametrize("dtype", [
|
| 66 |
+
np.bool,
|
| 67 |
+
np.int8, np.int16, np.int32, np.int64,
|
| 68 |
+
np.uint8, np.uint16, np.uint32, np.uint64,
|
| 69 |
+
np.float16, np.float32, np.float64,
|
| 70 |
+
np.complex64, np.complex128
|
| 71 |
+
])
|
| 72 |
+
@pytest.mark.parametrize("arr", new_and_old_dlpack())
|
| 73 |
+
def test_dtype_passthrough(self, arr, dtype):
|
| 74 |
+
x = arr.astype(dtype)
|
| 75 |
+
y = np.from_dlpack(x)
|
| 76 |
+
|
| 77 |
+
assert y.dtype == x.dtype
|
| 78 |
+
assert_array_equal(x, y)
|
| 79 |
+
|
| 80 |
+
def test_invalid_dtype(self):
|
| 81 |
+
x = np.asarray(np.datetime64('2021-05-27'))
|
| 82 |
+
|
| 83 |
+
with pytest.raises(BufferError):
|
| 84 |
+
np.from_dlpack(x)
|
| 85 |
+
|
| 86 |
+
def test_invalid_byte_swapping(self):
|
| 87 |
+
dt = np.dtype('=i8').newbyteorder()
|
| 88 |
+
x = np.arange(5, dtype=dt)
|
| 89 |
+
|
| 90 |
+
with pytest.raises(BufferError):
|
| 91 |
+
np.from_dlpack(x)
|
| 92 |
+
|
| 93 |
+
def test_non_contiguous(self):
|
| 94 |
+
x = np.arange(25).reshape((5, 5))
|
| 95 |
+
|
| 96 |
+
y1 = x[0]
|
| 97 |
+
assert_array_equal(y1, np.from_dlpack(y1))
|
| 98 |
+
|
| 99 |
+
y2 = x[:, 0]
|
| 100 |
+
assert_array_equal(y2, np.from_dlpack(y2))
|
| 101 |
+
|
| 102 |
+
y3 = x[1, :]
|
| 103 |
+
assert_array_equal(y3, np.from_dlpack(y3))
|
| 104 |
+
|
| 105 |
+
y4 = x[1]
|
| 106 |
+
assert_array_equal(y4, np.from_dlpack(y4))
|
| 107 |
+
|
| 108 |
+
y5 = np.diagonal(x).copy()
|
| 109 |
+
assert_array_equal(y5, np.from_dlpack(y5))
|
| 110 |
+
|
| 111 |
+
@pytest.mark.parametrize("ndim", range(33))
|
| 112 |
+
def test_higher_dims(self, ndim):
|
| 113 |
+
shape = (1,) * ndim
|
| 114 |
+
x = np.zeros(shape, dtype=np.float64)
|
| 115 |
+
|
| 116 |
+
assert shape == np.from_dlpack(x).shape
|
| 117 |
+
|
| 118 |
+
def test_dlpack_device(self):
|
| 119 |
+
x = np.arange(5)
|
| 120 |
+
assert x.__dlpack_device__() == (1, 0)
|
| 121 |
+
y = np.from_dlpack(x)
|
| 122 |
+
assert y.__dlpack_device__() == (1, 0)
|
| 123 |
+
z = y[::2]
|
| 124 |
+
assert z.__dlpack_device__() == (1, 0)
|
| 125 |
+
|
| 126 |
+
def dlpack_deleter_exception(self, max_version):
|
| 127 |
+
x = np.arange(5)
|
| 128 |
+
_ = x.__dlpack__(max_version=max_version)
|
| 129 |
+
raise RuntimeError
|
| 130 |
+
|
| 131 |
+
@pytest.mark.parametrize("max_version", [None, (1, 0)])
|
| 132 |
+
def test_dlpack_destructor_exception(self, max_version):
|
| 133 |
+
with pytest.raises(RuntimeError):
|
| 134 |
+
self.dlpack_deleter_exception(max_version=max_version)
|
| 135 |
+
|
| 136 |
+
def test_readonly(self):
|
| 137 |
+
x = np.arange(5)
|
| 138 |
+
x.flags.writeable = False
|
| 139 |
+
# Raises without max_version
|
| 140 |
+
with pytest.raises(BufferError):
|
| 141 |
+
x.__dlpack__()
|
| 142 |
+
|
| 143 |
+
# But works fine if we try with version
|
| 144 |
+
y = np.from_dlpack(x)
|
| 145 |
+
assert not y.flags.writeable
|
| 146 |
+
|
| 147 |
+
def test_ndim0(self):
|
| 148 |
+
x = np.array(1.0)
|
| 149 |
+
y = np.from_dlpack(x)
|
| 150 |
+
assert_array_equal(x, y)
|
| 151 |
+
|
| 152 |
+
def test_size1dims_arrays(self):
|
| 153 |
+
x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
|
| 154 |
+
buffer=np.ones(1000, dtype=np.uint8), order='F')
|
| 155 |
+
y = np.from_dlpack(x)
|
| 156 |
+
assert_array_equal(x, y)
|
| 157 |
+
|
| 158 |
+
def test_copy(self):
|
| 159 |
+
x = np.arange(5)
|
| 160 |
+
|
| 161 |
+
y = np.from_dlpack(x)
|
| 162 |
+
assert np.may_share_memory(x, y)
|
| 163 |
+
y = np.from_dlpack(x, copy=False)
|
| 164 |
+
assert np.may_share_memory(x, y)
|
| 165 |
+
y = np.from_dlpack(x, copy=True)
|
| 166 |
+
assert not np.may_share_memory(x, y)
|
| 167 |
+
|
| 168 |
+
def test_device(self):
|
| 169 |
+
x = np.arange(5)
|
| 170 |
+
# requesting (1, 0), i.e. CPU device works in both calls:
|
| 171 |
+
x.__dlpack__(dl_device=(1, 0))
|
| 172 |
+
np.from_dlpack(x, device="cpu")
|
| 173 |
+
np.from_dlpack(x, device=None)
|
| 174 |
+
|
| 175 |
+
with pytest.raises(ValueError):
|
| 176 |
+
x.__dlpack__(dl_device=(10, 0))
|
| 177 |
+
with pytest.raises(ValueError):
|
| 178 |
+
np.from_dlpack(x, device="gpu")
|
janus/lib/python3.10/site-packages/numpy/_core/tests/test_errstate.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
import sysconfig
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy.testing import assert_, assert_raises, IS_WASM
|
| 6 |
+
|
| 7 |
+
# The floating point emulation on ARM EABI systems lacking a hardware FPU is
|
| 8 |
+
# known to be buggy. This is an attempt to identify these hosts. It may not
|
| 9 |
+
# catch all possible cases, but it catches the known cases of gh-413 and
|
| 10 |
+
# gh-15562.
|
| 11 |
+
hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
|
| 12 |
+
arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
|
| 13 |
+
|
| 14 |
+
class TestErrstate:
|
| 15 |
+
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
|
| 16 |
+
@pytest.mark.skipif(arm_softfloat,
|
| 17 |
+
reason='platform/cpu issue with FPU (gh-413,-15562)')
|
| 18 |
+
def test_invalid(self):
|
| 19 |
+
with np.errstate(all='raise', under='ignore'):
|
| 20 |
+
a = -np.arange(3)
|
| 21 |
+
# This should work
|
| 22 |
+
with np.errstate(invalid='ignore'):
|
| 23 |
+
np.sqrt(a)
|
| 24 |
+
# While this should fail!
|
| 25 |
+
with assert_raises(FloatingPointError):
|
| 26 |
+
np.sqrt(a)
|
| 27 |
+
|
| 28 |
+
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
|
| 29 |
+
@pytest.mark.skipif(arm_softfloat,
|
| 30 |
+
reason='platform/cpu issue with FPU (gh-15562)')
|
| 31 |
+
def test_divide(self):
|
| 32 |
+
with np.errstate(all='raise', under='ignore'):
|
| 33 |
+
a = -np.arange(3)
|
| 34 |
+
# This should work
|
| 35 |
+
with np.errstate(divide='ignore'):
|
| 36 |
+
a // 0
|
| 37 |
+
# While this should fail!
|
| 38 |
+
with assert_raises(FloatingPointError):
|
| 39 |
+
a // 0
|
| 40 |
+
# As should this, see gh-15562
|
| 41 |
+
with assert_raises(FloatingPointError):
|
| 42 |
+
a // a
|
| 43 |
+
|
| 44 |
+
@pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
|
| 45 |
+
@pytest.mark.skipif(arm_softfloat,
|
| 46 |
+
reason='platform/cpu issue with FPU (gh-15562)')
|
| 47 |
+
def test_errcall(self):
|
| 48 |
+
count = 0
|
| 49 |
+
def foo(*args):
|
| 50 |
+
nonlocal count
|
| 51 |
+
count += 1
|
| 52 |
+
|
| 53 |
+
olderrcall = np.geterrcall()
|
| 54 |
+
with np.errstate(call=foo):
|
| 55 |
+
assert np.geterrcall() is foo
|
| 56 |
+
with np.errstate(call=None):
|
| 57 |
+
assert np.geterrcall() is None
|
| 58 |
+
assert np.geterrcall() is olderrcall
|
| 59 |
+
assert count == 0
|
| 60 |
+
|
| 61 |
+
with np.errstate(call=foo, invalid="call"):
|
| 62 |
+
np.array(np.inf) - np.array(np.inf)
|
| 63 |
+
|
| 64 |
+
assert count == 1
|
| 65 |
+
|
| 66 |
+
def test_errstate_decorator(self):
|
| 67 |
+
@np.errstate(all='ignore')
|
| 68 |
+
def foo():
|
| 69 |
+
a = -np.arange(3)
|
| 70 |
+
a // 0
|
| 71 |
+
|
| 72 |
+
foo()
|
| 73 |
+
|
| 74 |
+
def test_errstate_enter_once(self):
|
| 75 |
+
errstate = np.errstate(invalid="warn")
|
| 76 |
+
with errstate:
|
| 77 |
+
pass
|
| 78 |
+
|
| 79 |
+
# The errstate context cannot be entered twice as that would not be
|
| 80 |
+
# thread-safe
|
| 81 |
+
with pytest.raises(TypeError,
|
| 82 |
+
match="Cannot enter `np.errstate` twice"):
|
| 83 |
+
with errstate:
|
| 84 |
+
pass
|
| 85 |
+
|
| 86 |
+
@pytest.mark.skipif(IS_WASM, reason="wasm doesn't support asyncio")
|
| 87 |
+
def test_asyncio_safe(self):
|
| 88 |
+
# asyncio may not always work, lets assume its fine if missing
|
| 89 |
+
# Pyodide/wasm doesn't support it. If this test makes problems,
|
| 90 |
+
# it should just be skipped liberally (or run differently).
|
| 91 |
+
asyncio = pytest.importorskip("asyncio")
|
| 92 |
+
|
| 93 |
+
@np.errstate(invalid="ignore")
|
| 94 |
+
def decorated():
|
| 95 |
+
# Decorated non-async function (it is not safe to decorate an
|
| 96 |
+
# async one)
|
| 97 |
+
assert np.geterr()["invalid"] == "ignore"
|
| 98 |
+
|
| 99 |
+
async def func1():
|
| 100 |
+
decorated()
|
| 101 |
+
await asyncio.sleep(0.1)
|
| 102 |
+
decorated()
|
| 103 |
+
|
| 104 |
+
async def func2():
|
| 105 |
+
with np.errstate(invalid="raise"):
|
| 106 |
+
assert np.geterr()["invalid"] == "raise"
|
| 107 |
+
await asyncio.sleep(0.125)
|
| 108 |
+
assert np.geterr()["invalid"] == "raise"
|
| 109 |
+
|
| 110 |
+
# for good sport, a third one with yet another state:
|
| 111 |
+
async def func3():
|
| 112 |
+
with np.errstate(invalid="print"):
|
| 113 |
+
assert np.geterr()["invalid"] == "print"
|
| 114 |
+
await asyncio.sleep(0.11)
|
| 115 |
+
assert np.geterr()["invalid"] == "print"
|
| 116 |
+
|
| 117 |
+
async def main():
|
| 118 |
+
# simply run all three function multiple times:
|
| 119 |
+
await asyncio.gather(
|
| 120 |
+
func1(), func2(), func3(), func1(), func2(), func3(),
|
| 121 |
+
func1(), func2(), func3(), func1(), func2(), func3())
|
| 122 |
+
|
| 123 |
+
loop = asyncio.new_event_loop()
|
| 124 |
+
with np.errstate(invalid="warn"):
|
| 125 |
+
asyncio.run(main())
|
| 126 |
+
assert np.geterr()["invalid"] == "warn"
|
| 127 |
+
|
| 128 |
+
assert np.geterr()["invalid"] == "warn" # the default
|
| 129 |
+
loop.close()
|