content
stringlengths
1
103k
path
stringlengths
8
216
filename
stringlengths
2
179
language
stringclasses
15 values
size_bytes
int64
2
189k
quality_score
float64
0.5
0.95
complexity
float64
0
1
documentation_ratio
float64
0
1
repository
stringclasses
5 values
stars
int64
0
1k
created_date
stringdate
2023-07-10 19:21:08
2025-07-09 19:11:45
license
stringclasses
4 values
is_test
bool
2 classes
file_hash
stringlengths
32
32
import contextlib\nimport re\nimport sys\nimport types\nimport unittest\nimport warnings\nfrom collections.abc import Callable\nfrom pathlib import Path\nfrom typing import Any, TypeVar, assert_type\n\nimport numpy as np\nimport numpy.typing as npt\n\nAR_f8: npt.NDArray[np.float64]\nAR_i8: npt.NDArray[np.int64]\n\nbool_obj: bool\nsuppress_obj: np.testing.suppress_warnings\nFT = TypeVar("FT", bound=Callable[..., Any])\n\ndef func() -> int: ...\n\ndef func2(\n x: npt.NDArray[np.number],\n y: npt.NDArray[np.number],\n) -> npt.NDArray[np.bool]: ...\n\nassert_type(np.testing.KnownFailureException(), np.testing.KnownFailureException)\nassert_type(np.testing.IgnoreException(), np.testing.IgnoreException)\n\nassert_type(\n np.testing.clear_and_catch_warnings(modules=[np.testing]),\n np.testing.clear_and_catch_warnings[None],\n)\nassert_type(\n np.testing.clear_and_catch_warnings(True),\n np.testing.clear_and_catch_warnings[list[warnings.WarningMessage]],\n)\nassert_type(\n np.testing.clear_and_catch_warnings(False),\n np.testing.clear_and_catch_warnings[None],\n)\nassert_type(\n np.testing.clear_and_catch_warnings(bool_obj),\n np.testing.clear_and_catch_warnings,\n)\nassert_type(\n np.testing.clear_and_catch_warnings.class_modules,\n tuple[types.ModuleType, ...],\n)\nassert_type(\n np.testing.clear_and_catch_warnings.modules,\n set[types.ModuleType],\n)\n\nwith np.testing.clear_and_catch_warnings(True) as c1:\n assert_type(c1, list[warnings.WarningMessage])\nwith np.testing.clear_and_catch_warnings() as c2:\n assert_type(c2, None)\n\nassert_type(np.testing.suppress_warnings("once"), np.testing.suppress_warnings)\nassert_type(np.testing.suppress_warnings()(func), Callable[[], int])\nassert_type(suppress_obj.filter(RuntimeWarning), None)\nassert_type(suppress_obj.record(RuntimeWarning), list[warnings.WarningMessage])\nwith suppress_obj as c3:\n assert_type(c3, np.testing.suppress_warnings)\n\nassert_type(np.testing.verbose, int)\nassert_type(np.testing.IS_PYPY, bool)\nassert_type(np.testing.HAS_REFCOUNT, bool)\nassert_type(np.testing.HAS_LAPACK64, bool)\n\nassert_type(np.testing.assert_(1, msg="test"), None)\nassert_type(np.testing.assert_(2, msg=lambda: "test"), None)\n\nif sys.platform == "win32" or sys.platform == "cygwin":\n assert_type(np.testing.memusage(), int)\nelif sys.platform == "linux":\n assert_type(np.testing.memusage(), int | None)\n\nassert_type(np.testing.jiffies(), int)\n\nassert_type(np.testing.build_err_msg([0, 1, 2], "test"), str)\nassert_type(np.testing.build_err_msg(range(2), "test", header="header"), str)\nassert_type(np.testing.build_err_msg(np.arange(9).reshape(3, 3), "test", verbose=False), str)\nassert_type(np.testing.build_err_msg("abc", "test", names=["x", "y"]), str)\nassert_type(np.testing.build_err_msg([1.0, 2.0], "test", precision=5), str)\n\nassert_type(np.testing.assert_equal({1}, {1}), None)\nassert_type(np.testing.assert_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None)\nassert_type(np.testing.assert_equal(1, 1.0, verbose=True), None)\n\nassert_type(np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1]), None)\n\nassert_type(np.testing.assert_almost_equal(1.0, 1.1), None)\nassert_type(np.testing.assert_almost_equal([1, 2, 3], [1, 2, 3], err_msg="fail"), None)\nassert_type(np.testing.assert_almost_equal(1, 1.0, verbose=True), None)\nassert_type(np.testing.assert_almost_equal(1, 1.0001, decimal=2), None)\n\nassert_type(np.testing.assert_approx_equal(1.0, 1.1), None)\nassert_type(np.testing.assert_approx_equal("1", "2", err_msg="fail"), None)\nassert_type(np.testing.assert_approx_equal(1, 1.0, verbose=True), None)\nassert_type(np.testing.assert_approx_equal(1, 1.0001, significant=2), None)\n\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, err_msg="test"), None)\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, verbose=True), None)\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, header="header"), None)\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, precision=np.int64()), None)\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_nan=False), None)\nassert_type(np.testing.assert_array_compare(func2, AR_i8, AR_f8, equal_inf=True), None)\n\nassert_type(np.testing.assert_array_equal(AR_i8, AR_f8), None)\nassert_type(np.testing.assert_array_equal(AR_i8, AR_f8, err_msg="test"), None)\nassert_type(np.testing.assert_array_equal(AR_i8, AR_f8, verbose=True), None)\n\nassert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8), None)\nassert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, err_msg="test"), None)\nassert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, verbose=True), None)\nassert_type(np.testing.assert_array_almost_equal(AR_i8, AR_f8, decimal=1), None)\n\nassert_type(np.testing.assert_array_less(AR_i8, AR_f8), None)\nassert_type(np.testing.assert_array_less(AR_i8, AR_f8, err_msg="test"), None)\nassert_type(np.testing.assert_array_less(AR_i8, AR_f8, verbose=True), None)\n\nassert_type(np.testing.runstring("1 + 1", {}), Any)\nassert_type(np.testing.runstring("int64() + 1", {"int64": np.int64}), Any)\n\nassert_type(np.testing.assert_string_equal("1", "1"), None)\n\nassert_type(np.testing.rundocs(), None)\nassert_type(np.testing.rundocs("test.py"), None)\nassert_type(np.testing.rundocs(Path("test.py"), raise_on_error=True), None)\n\ndef func3(a: int) -> bool: ...\n\nassert_type(\n np.testing.assert_raises(RuntimeWarning),\n unittest.case._AssertRaisesContext[RuntimeWarning],\n)\nassert_type(np.testing.assert_raises(RuntimeWarning, func3, 5), None)\n\nassert_type(\n np.testing.assert_raises_regex(RuntimeWarning, r"test"),\n unittest.case._AssertRaisesContext[RuntimeWarning],\n)\nassert_type(np.testing.assert_raises_regex(RuntimeWarning, b"test", func3, 5), None)\nassert_type(np.testing.assert_raises_regex(RuntimeWarning, re.compile(b"test"), func3, 5), None)\n\nclass Test: ...\n\ndef decorate(a: FT) -> FT:\n return a\n\nassert_type(np.testing.decorate_methods(Test, decorate), None)\nassert_type(np.testing.decorate_methods(Test, decorate, None), None)\nassert_type(np.testing.decorate_methods(Test, decorate, "test"), None)\nassert_type(np.testing.decorate_methods(Test, decorate, b"test"), None)\nassert_type(np.testing.decorate_methods(Test, decorate, re.compile("test")), None)\n\nassert_type(np.testing.measure("for i in range(1000): np.sqrt(i**2)"), float)\nassert_type(np.testing.measure(b"for i in range(1000): np.sqrt(i**2)", times=5), float)\n\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8), None)\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8, rtol=0.005), None)\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8, atol=1), None)\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8, equal_nan=True), None)\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8, err_msg="err"), None)\nassert_type(np.testing.assert_allclose(AR_i8, AR_f8, verbose=False), None)\n\nassert_type(np.testing.assert_array_almost_equal_nulp(AR_i8, AR_f8, nulp=2), None)\n\nassert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, maxulp=2), npt.NDArray[Any])\nassert_type(np.testing.assert_array_max_ulp(AR_i8, AR_f8, dtype=np.float32), npt.NDArray[Any])\n\nassert_type(np.testing.assert_warns(RuntimeWarning), contextlib._GeneratorContextManager[None])\nassert_type(np.testing.assert_warns(RuntimeWarning, func3, 5), bool)\n\ndef func4(a: int, b: str) -> bool: ...\n\nassert_type(np.testing.assert_no_warnings(), contextlib._GeneratorContextManager[None])\nassert_type(np.testing.assert_no_warnings(func3, 5), bool)\nassert_type(np.testing.assert_no_warnings(func4, a=1, b="test"), bool)\nassert_type(np.testing.assert_no_warnings(func4, 1, "test"), bool)\n\nassert_type(np.testing.tempdir("test_dir"), contextlib._GeneratorContextManager[str])\nassert_type(np.testing.tempdir(prefix=b"test"), contextlib._GeneratorContextManager[bytes])\nassert_type(np.testing.tempdir("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str])\n\nassert_type(np.testing.temppath("test_dir", text=True), contextlib._GeneratorContextManager[str])\nassert_type(np.testing.temppath(prefix=b"test"), contextlib._GeneratorContextManager[bytes])\nassert_type(np.testing.temppath("test_dir", dir=Path("here")), contextlib._GeneratorContextManager[str])\n\nassert_type(np.testing.assert_no_gc_cycles(), contextlib._GeneratorContextManager[None])\nassert_type(np.testing.assert_no_gc_cycles(func3, 5), None)\n\nassert_type(np.testing.break_cycles(), None)\n\nassert_type(np.testing.TestCase(), unittest.case.TestCase)\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\testing.pyi
testing.pyi
Other
8,641
0.85
0.045455
0
vue-tools
717
2024-03-02T04:43:00.997963
Apache-2.0
true
513d00efee4246221943f21593af8959
from typing import Any, TypeVar, assert_type\n\nimport numpy as np\nimport numpy.typing as npt\n\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n\ndef func1(ar: npt.NDArray[_ScalarT], a: int) -> npt.NDArray[_ScalarT]: ...\n\ndef func2(ar: npt.NDArray[np.number], a: str) -> npt.NDArray[np.float64]: ...\n\nAR_b: npt.NDArray[np.bool]\nAR_u: npt.NDArray[np.uint64]\nAR_i: npt.NDArray[np.int64]\nAR_f: npt.NDArray[np.float64]\nAR_c: npt.NDArray[np.complex128]\nAR_O: npt.NDArray[np.object_]\n\nAR_LIKE_b: list[bool]\nAR_LIKE_c: list[complex]\n\nassert_type(np.fliplr(AR_b), npt.NDArray[np.bool])\nassert_type(np.fliplr(AR_LIKE_b), npt.NDArray[Any])\n\nassert_type(np.flipud(AR_b), npt.NDArray[np.bool])\nassert_type(np.flipud(AR_LIKE_b), npt.NDArray[Any])\n\nassert_type(np.eye(10), npt.NDArray[np.float64])\nassert_type(np.eye(10, M=20, dtype=np.int64), npt.NDArray[np.int64])\nassert_type(np.eye(10, k=2, dtype=int), npt.NDArray[Any])\n\nassert_type(np.diag(AR_b), npt.NDArray[np.bool])\nassert_type(np.diag(AR_LIKE_b, k=0), npt.NDArray[Any])\n\nassert_type(np.diagflat(AR_b), npt.NDArray[np.bool])\nassert_type(np.diagflat(AR_LIKE_b, k=0), npt.NDArray[Any])\n\nassert_type(np.tri(10), npt.NDArray[np.float64])\nassert_type(np.tri(10, M=20, dtype=np.int64), npt.NDArray[np.int64])\nassert_type(np.tri(10, k=2, dtype=int), npt.NDArray[Any])\n\nassert_type(np.tril(AR_b), npt.NDArray[np.bool])\nassert_type(np.tril(AR_LIKE_b, k=0), npt.NDArray[Any])\n\nassert_type(np.triu(AR_b), npt.NDArray[np.bool])\nassert_type(np.triu(AR_LIKE_b, k=0), npt.NDArray[Any])\n\nassert_type(np.vander(AR_b), npt.NDArray[np.signedinteger])\nassert_type(np.vander(AR_u), npt.NDArray[np.signedinteger])\nassert_type(np.vander(AR_i, N=2), npt.NDArray[np.signedinteger])\nassert_type(np.vander(AR_f, increasing=True), npt.NDArray[np.floating])\nassert_type(np.vander(AR_c), npt.NDArray[np.complexfloating])\nassert_type(np.vander(AR_O), npt.NDArray[np.object_])\n\nassert_type(\n np.histogram2d(AR_LIKE_c, AR_LIKE_c),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.complex128 | np.float64],\n npt.NDArray[np.complex128 | np.float64],\n ],\n)\nassert_type(\n np.histogram2d(AR_i, AR_b),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n ],\n)\nassert_type(\n np.histogram2d(AR_f, AR_i),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n ],\n)\nassert_type(\n np.histogram2d(AR_i, AR_f),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n npt.NDArray[np.float64],\n ],\n)\nassert_type(\n np.histogram2d(AR_f, AR_c, weights=AR_LIKE_b),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.complex128],\n npt.NDArray[np.complex128],\n ],\n)\nassert_type(\n np.histogram2d(AR_f, AR_c, bins=8),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.complex128],\n npt.NDArray[np.complex128],\n ],\n)\nassert_type(\n np.histogram2d(AR_c, AR_f, bins=(8, 5)),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.complex128],\n npt.NDArray[np.complex128],\n ],\n)\nassert_type(\n np.histogram2d(AR_c, AR_i, bins=AR_u),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.uint64],\n npt.NDArray[np.uint64],\n ],\n)\nassert_type(\n np.histogram2d(AR_c, AR_c, bins=(AR_u, AR_u)),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.uint64],\n npt.NDArray[np.uint64],\n ],\n)\nassert_type(\n np.histogram2d(AR_c, AR_c, bins=(AR_b, 8)),\n tuple[\n npt.NDArray[np.float64],\n npt.NDArray[np.bool | np.complex128],\n npt.NDArray[np.bool | np.complex128],\n ],\n)\n\nassert_type(np.mask_indices(10, func1), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]])\nassert_type(np.mask_indices(8, func2, "0"), tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]])\n\nassert_type(np.tril_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]])\n\nassert_type(np.tril_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]])\n\nassert_type(np.triu_indices(10), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]])\n\nassert_type(np.triu_indices_from(AR_b), tuple[npt.NDArray[np.int_], npt.NDArray[np.int_]])\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\twodim_base.pyi
twodim_base.pyi
Other
4,382
0.85
0.013793
0
node-utils
403
2025-04-29T09:41:23.903248
BSD-3-Clause
true
5a809b0ab5571dd26f00923623294652
from typing import Any, Literal, assert_type\n\nimport numpy as np\nimport numpy.typing as npt\n\nf8: np.float64\nf: float\n\n# NOTE: Avoid importing the platform specific `np.float128` type\nAR_i8: npt.NDArray[np.int64]\nAR_i4: npt.NDArray[np.int32]\nAR_f2: npt.NDArray[np.float16]\nAR_f8: npt.NDArray[np.float64]\nAR_f16: npt.NDArray[np.longdouble]\nAR_c8: npt.NDArray[np.complex64]\nAR_c16: npt.NDArray[np.complex128]\n\nAR_LIKE_f: list[float]\n\nclass ComplexObj:\n real: slice\n imag: slice\n\nassert_type(np.mintypecode(["f8"], typeset="qfQF"), str)\n\nassert_type(np.real(ComplexObj()), slice)\nassert_type(np.real(AR_f8), npt.NDArray[np.float64])\nassert_type(np.real(AR_c16), npt.NDArray[np.float64])\nassert_type(np.real(AR_LIKE_f), npt.NDArray[Any])\n\nassert_type(np.imag(ComplexObj()), slice)\nassert_type(np.imag(AR_f8), npt.NDArray[np.float64])\nassert_type(np.imag(AR_c16), npt.NDArray[np.float64])\nassert_type(np.imag(AR_LIKE_f), npt.NDArray[Any])\n\nassert_type(np.iscomplex(f8), np.bool)\nassert_type(np.iscomplex(AR_f8), npt.NDArray[np.bool])\nassert_type(np.iscomplex(AR_LIKE_f), npt.NDArray[np.bool])\n\nassert_type(np.isreal(f8), np.bool)\nassert_type(np.isreal(AR_f8), npt.NDArray[np.bool])\nassert_type(np.isreal(AR_LIKE_f), npt.NDArray[np.bool])\n\nassert_type(np.iscomplexobj(f8), bool)\nassert_type(np.isrealobj(f8), bool)\n\nassert_type(np.nan_to_num(f8), np.float64)\nassert_type(np.nan_to_num(f, copy=True), Any)\nassert_type(np.nan_to_num(AR_f8, nan=1.5), npt.NDArray[np.float64])\nassert_type(np.nan_to_num(AR_LIKE_f, posinf=9999), npt.NDArray[Any])\n\nassert_type(np.real_if_close(AR_f8), npt.NDArray[np.float64])\nassert_type(np.real_if_close(AR_c16), npt.NDArray[np.float64 | np.complex128])\nassert_type(np.real_if_close(AR_c8), npt.NDArray[np.float32 | np.complex64])\nassert_type(np.real_if_close(AR_LIKE_f), npt.NDArray[Any])\n\nassert_type(np.typename("h"), Literal["short"])\nassert_type(np.typename("B"), Literal["unsigned char"])\nassert_type(np.typename("V"), Literal["void"])\nassert_type(np.typename("S1"), Literal["character"])\n\nassert_type(np.common_type(AR_i4), type[np.float64])\nassert_type(np.common_type(AR_f2), type[np.float16])\nassert_type(np.common_type(AR_f2, AR_i4), type[np.float64])\nassert_type(np.common_type(AR_f16, AR_i4), type[np.longdouble])\nassert_type(np.common_type(AR_c8, AR_f2), type[np.complex64])\nassert_type(np.common_type(AR_f2, AR_c8, AR_i4), type[np.complexfloating])\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\type_check.pyi
type_check.pyi
Other
2,459
0.95
0.014925
0.019231
react-lib
38
2025-04-06T20:01:10.046726
BSD-3-Clause
true
f3833d78c6543c7e568e8ad9363dbb5b
from typing import Any, assert_type\n\nimport numpy as np\nimport numpy.typing as npt\n\nAR_LIKE_b: list[bool]\nAR_LIKE_u: list[np.uint32]\nAR_LIKE_i: list[int]\nAR_LIKE_f: list[float]\nAR_LIKE_O: list[np.object_]\n\nAR_U: npt.NDArray[np.str_]\n\nassert_type(np.fix(AR_LIKE_b), npt.NDArray[np.floating])\nassert_type(np.fix(AR_LIKE_u), npt.NDArray[np.floating])\nassert_type(np.fix(AR_LIKE_i), npt.NDArray[np.floating])\nassert_type(np.fix(AR_LIKE_f), npt.NDArray[np.floating])\nassert_type(np.fix(AR_LIKE_O), npt.NDArray[np.object_])\nassert_type(np.fix(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_])\n\nassert_type(np.isposinf(AR_LIKE_b), npt.NDArray[np.bool])\nassert_type(np.isposinf(AR_LIKE_u), npt.NDArray[np.bool])\nassert_type(np.isposinf(AR_LIKE_i), npt.NDArray[np.bool])\nassert_type(np.isposinf(AR_LIKE_f), npt.NDArray[np.bool])\nassert_type(np.isposinf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_])\n\nassert_type(np.isneginf(AR_LIKE_b), npt.NDArray[np.bool])\nassert_type(np.isneginf(AR_LIKE_u), npt.NDArray[np.bool])\nassert_type(np.isneginf(AR_LIKE_i), npt.NDArray[np.bool])\nassert_type(np.isneginf(AR_LIKE_f), npt.NDArray[np.bool])\nassert_type(np.isneginf(AR_LIKE_f, out=AR_U), npt.NDArray[np.str_])\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\ufunclike.pyi
ufunclike.pyi
Other
1,214
0.85
0
0
react-lib
61
2024-01-09T13:50:18.387259
BSD-3-Clause
true
91fcbdd7216b4801649d74fa74b70c42
from typing import Any, Literal, NoReturn, assert_type\n\nimport numpy as np\nimport numpy.typing as npt\n\ni8: np.int64\nf8: np.float64\nAR_f8: npt.NDArray[np.float64]\nAR_i8: npt.NDArray[np.int64]\n\nassert_type(np.absolute.__doc__, str)\nassert_type(np.absolute.types, list[str])\n\nassert_type(np.absolute.__name__, Literal["absolute"])\nassert_type(np.absolute.__qualname__, Literal["absolute"])\nassert_type(np.absolute.ntypes, Literal[20])\nassert_type(np.absolute.identity, None)\nassert_type(np.absolute.nin, Literal[1])\nassert_type(np.absolute.nin, Literal[1])\nassert_type(np.absolute.nout, Literal[1])\nassert_type(np.absolute.nargs, Literal[2])\nassert_type(np.absolute.signature, None)\nassert_type(np.absolute(f8), Any)\nassert_type(np.absolute(AR_f8), npt.NDArray[Any])\nassert_type(np.absolute.at(AR_f8, AR_i8), None)\n\nassert_type(np.add.__name__, Literal["add"])\nassert_type(np.add.__qualname__, Literal["add"])\nassert_type(np.add.ntypes, Literal[22])\nassert_type(np.add.identity, Literal[0])\nassert_type(np.add.nin, Literal[2])\nassert_type(np.add.nout, Literal[1])\nassert_type(np.add.nargs, Literal[3])\nassert_type(np.add.signature, None)\nassert_type(np.add(f8, f8), Any)\nassert_type(np.add(AR_f8, f8), npt.NDArray[Any])\nassert_type(np.add.at(AR_f8, AR_i8, f8), None)\nassert_type(np.add.reduce(AR_f8, axis=0), Any)\nassert_type(np.add.accumulate(AR_f8), npt.NDArray[Any])\nassert_type(np.add.reduceat(AR_f8, AR_i8), npt.NDArray[Any])\nassert_type(np.add.outer(f8, f8), Any)\nassert_type(np.add.outer(AR_f8, f8), npt.NDArray[Any])\n\nassert_type(np.frexp.__name__, Literal["frexp"])\nassert_type(np.frexp.__qualname__, Literal["frexp"])\nassert_type(np.frexp.ntypes, Literal[4])\nassert_type(np.frexp.identity, None)\nassert_type(np.frexp.nin, Literal[1])\nassert_type(np.frexp.nout, Literal[2])\nassert_type(np.frexp.nargs, Literal[3])\nassert_type(np.frexp.signature, None)\nassert_type(np.frexp(f8), tuple[Any, Any])\nassert_type(np.frexp(AR_f8), tuple[npt.NDArray[Any], npt.NDArray[Any]])\n\nassert_type(np.divmod.__name__, Literal["divmod"])\nassert_type(np.divmod.__qualname__, Literal["divmod"])\nassert_type(np.divmod.ntypes, Literal[15])\nassert_type(np.divmod.identity, None)\nassert_type(np.divmod.nin, Literal[2])\nassert_type(np.divmod.nout, Literal[2])\nassert_type(np.divmod.nargs, Literal[4])\nassert_type(np.divmod.signature, None)\nassert_type(np.divmod(f8, f8), tuple[Any, Any])\nassert_type(np.divmod(AR_f8, f8), tuple[npt.NDArray[Any], npt.NDArray[Any]])\n\nassert_type(np.matmul.__name__, Literal["matmul"])\nassert_type(np.matmul.__qualname__, Literal["matmul"])\nassert_type(np.matmul.ntypes, Literal[19])\nassert_type(np.matmul.identity, None)\nassert_type(np.matmul.nin, Literal[2])\nassert_type(np.matmul.nout, Literal[1])\nassert_type(np.matmul.nargs, Literal[3])\nassert_type(np.matmul.signature, Literal["(n?,k),(k,m?)->(n?,m?)"])\nassert_type(np.matmul.identity, None)\nassert_type(np.matmul(AR_f8, AR_f8), Any)\nassert_type(np.matmul(AR_f8, AR_f8, axes=[(0, 1), (0, 1), (0, 1)]), Any)\n\nassert_type(np.vecdot.__name__, Literal["vecdot"])\nassert_type(np.vecdot.__qualname__, Literal["vecdot"])\nassert_type(np.vecdot.ntypes, Literal[19])\nassert_type(np.vecdot.identity, None)\nassert_type(np.vecdot.nin, Literal[2])\nassert_type(np.vecdot.nout, Literal[1])\nassert_type(np.vecdot.nargs, Literal[3])\nassert_type(np.vecdot.signature, Literal["(n),(n)->()"])\nassert_type(np.vecdot.identity, None)\nassert_type(np.vecdot(AR_f8, AR_f8), Any)\n\nassert_type(np.bitwise_count.__name__, Literal["bitwise_count"])\nassert_type(np.bitwise_count.__qualname__, Literal["bitwise_count"])\nassert_type(np.bitwise_count.ntypes, Literal[11])\nassert_type(np.bitwise_count.identity, None)\nassert_type(np.bitwise_count.nin, Literal[1])\nassert_type(np.bitwise_count.nout, Literal[1])\nassert_type(np.bitwise_count.nargs, Literal[2])\nassert_type(np.bitwise_count.signature, None)\nassert_type(np.bitwise_count.identity, None)\nassert_type(np.bitwise_count(i8), Any)\nassert_type(np.bitwise_count(AR_i8), npt.NDArray[Any])\n\nassert_type(np.absolute.outer(), NoReturn)\nassert_type(np.frexp.outer(), NoReturn)\nassert_type(np.divmod.outer(), NoReturn)\nassert_type(np.matmul.outer(), NoReturn)\n\nassert_type(np.absolute.reduceat(), NoReturn)\nassert_type(np.frexp.reduceat(), NoReturn)\nassert_type(np.divmod.reduceat(), NoReturn)\nassert_type(np.matmul.reduceat(), NoReturn)\n\nassert_type(np.absolute.reduce(), NoReturn)\nassert_type(np.frexp.reduce(), NoReturn)\nassert_type(np.divmod.reduce(), NoReturn)\nassert_type(np.matmul.reduce(), NoReturn)\n\nassert_type(np.absolute.accumulate(), NoReturn)\nassert_type(np.frexp.accumulate(), NoReturn)\nassert_type(np.divmod.accumulate(), NoReturn)\nassert_type(np.matmul.accumulate(), NoReturn)\n\nassert_type(np.frexp.at(), NoReturn)\nassert_type(np.divmod.at(), NoReturn)\nassert_type(np.matmul.at(), NoReturn)\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\ufuncs.pyi
ufuncs.pyi
Other
4,912
0.85
0
0
node-utils
449
2024-06-30T01:42:27.579087
Apache-2.0
true
9a6cd1cc6758fe56d7b78163066e3c4e
"""Typing tests for `_core._ufunc_config`."""\n\nfrom collections.abc import Callable\nfrom typing import Any, assert_type\n\nfrom _typeshed import SupportsWrite\n\nimport numpy as np\n\ndef func(a: str, b: int) -> None: ...\n\nclass Write:\n def write(self, value: str) -> None: ...\n\nassert_type(np.seterr(all=None), np._core._ufunc_config._ErrDict)\nassert_type(np.seterr(divide="ignore"), np._core._ufunc_config._ErrDict)\nassert_type(np.seterr(over="warn"), np._core._ufunc_config._ErrDict)\nassert_type(np.seterr(under="call"), np._core._ufunc_config._ErrDict)\nassert_type(np.seterr(invalid="raise"), np._core._ufunc_config._ErrDict)\nassert_type(np.geterr(), np._core._ufunc_config._ErrDict)\n\nassert_type(np.setbufsize(4096), int)\nassert_type(np.getbufsize(), int)\n\nassert_type(np.seterrcall(func), Callable[[str, int], Any] | SupportsWrite[str] | None)\nassert_type(np.seterrcall(Write()), Callable[[str, int], Any] | SupportsWrite[str] | None)\nassert_type(np.geterrcall(), Callable[[str, int], Any] | SupportsWrite[str] | None)\n\nassert_type(np.errstate(call=func, all="call"), np.errstate)\nassert_type(np.errstate(call=Write(), divide="log", over="log"), np.errstate)\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\ufunc_config.pyi
ufunc_config.pyi
Other
1,192
0.85
0.133333
0
vue-tools
866
2024-08-30T07:59:44.851444
Apache-2.0
true
3286011ff3534ebdb4daddd66dc3cd08
from typing import assert_type\n\nimport numpy.exceptions as ex\n\nassert_type(ex.ModuleDeprecationWarning(), ex.ModuleDeprecationWarning)\nassert_type(ex.VisibleDeprecationWarning(), ex.VisibleDeprecationWarning)\nassert_type(ex.ComplexWarning(), ex.ComplexWarning)\nassert_type(ex.RankWarning(), ex.RankWarning)\nassert_type(ex.TooHardError(), ex.TooHardError)\nassert_type(ex.AxisError("test"), ex.AxisError)\nassert_type(ex.AxisError(5, 1), ex.AxisError)\n
.venv\Lib\site-packages\numpy\typing\tests\data\reveal\warnings_and_errors.pyi
warnings_and_errors.pyi
Other
460
0.85
0
0
vue-tools
71
2024-03-08T22:12:07.449027
BSD-3-Clause
true
67534135349f16420bd04ef9690af470
\n\n
.venv\Lib\site-packages\numpy\typing\tests\__pycache__\test_isfile.cpython-313.pyc
test_isfile.cpython-313.pyc
Other
1,606
0.8
0.035714
0
python-kit
355
2024-11-04T22:18:58.481033
BSD-3-Clause
true
08d4cac39e84563713f6e6b4ad2eaf7c
\n\n
.venv\Lib\site-packages\numpy\typing\tests\__pycache__\test_runtime.cpython-313.pyc
test_runtime.cpython-313.pyc
Other
6,053
0.8
0
0.037037
python-kit
834
2024-12-31T23:17:37.313818
MIT
true
91f973c59187dcd75c05c2e3a2f2a8d8
\n\n
.venv\Lib\site-packages\numpy\typing\tests\__pycache__\test_typing.cpython-313.pyc
test_typing.cpython-313.pyc
Other
9,461
0.95
0.030303
0.011765
awesome-app
373
2024-10-28T12:54:07.980186
GPL-3.0
true
663ff95c425c7b15e944ea25c389605e
\n\n
.venv\Lib\site-packages\numpy\typing\tests\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
193
0.7
0
0
python-kit
782
2023-07-22T01:57:23.437174
GPL-3.0
true
368b3315f33019d0372da189537c28de
\n\n
.venv\Lib\site-packages\numpy\typing\__pycache__\mypy_plugin.cpython-313.pyc
mypy_plugin.cpython-313.pyc
Other
8,501
0.95
0.034783
0.05
vue-tools
574
2024-04-28T10:27:24.749169
MIT
false
9195a1fa109814c7e888bfcbc9ce1e53
\n\n
.venv\Lib\site-packages\numpy\typing\__pycache__\__init__.cpython-313.pyc
__init__.cpython-313.pyc
Other
6,541
0.95
0.044693
0.023077
react-lib
614
2025-07-08T10:41:48.999691
Apache-2.0
false
d714ecc15d1caf104d0f40635685dfd7
"""Array printing function\n\n$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $\n\n"""\n__all__ = ["array2string", "array_str", "array_repr",\n "set_printoptions", "get_printoptions", "printoptions",\n "format_float_positional", "format_float_scientific"]\n__docformat__ = 'restructuredtext'\n\n#\n# Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>\n# last revision: 1996-3-13\n# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)\n# and by Perry Greenfield 2000-4-1 for numarray\n# and by Travis Oliphant 2005-8-22 for numpy\n\n\n# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy\n# scalars but for different purposes. scalartypes.c.src has str/reprs for when\n# the scalar is printed on its own, while arrayprint.py has strs for when\n# scalars are printed inside an ndarray. Only the latter strs are currently\n# user-customizable.\n\nimport functools\nimport numbers\nimport sys\n\ntry:\n from _thread import get_ident\nexcept ImportError:\n from _dummy_thread import get_ident\n\nimport contextlib\nimport operator\nimport warnings\n\nimport numpy as np\n\nfrom . import numerictypes as _nt\nfrom .fromnumeric import any\nfrom .multiarray import (\n array,\n datetime_as_string,\n datetime_data,\n dragon4_positional,\n dragon4_scientific,\n ndarray,\n)\nfrom .numeric import asarray, concatenate, errstate\nfrom .numerictypes import complex128, flexible, float64, int_\nfrom .overrides import array_function_dispatch, set_module\nfrom .printoptions import format_options\nfrom .umath import absolute, isfinite, isinf, isnat\n\n\ndef _make_options_dict(precision=None, threshold=None, edgeitems=None,\n linewidth=None, suppress=None, nanstr=None, infstr=None,\n sign=None, formatter=None, floatmode=None, legacy=None,\n override_repr=None):\n """\n Make a dictionary out of the non-None arguments, plus conversion of\n *legacy* and sanity checks.\n """\n\n options = {k: v for k, v in list(locals().items()) if v is not None}\n\n if suppress is not None:\n options['suppress'] = bool(suppress)\n\n modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']\n if floatmode not in modes + [None]:\n raise ValueError("floatmode option must be one of " +\n ", ".join(f'"{m}"' for m in modes))\n\n if sign not in [None, '-', '+', ' ']:\n raise ValueError("sign option must be one of ' ', '+', or '-'")\n\n if legacy is False:\n options['legacy'] = sys.maxsize\n elif legacy == False: # noqa: E712\n warnings.warn(\n f"Passing `legacy={legacy!r}` is deprecated.",\n FutureWarning, stacklevel=3\n )\n options['legacy'] = sys.maxsize\n elif legacy == '1.13':\n options['legacy'] = 113\n elif legacy == '1.21':\n options['legacy'] = 121\n elif legacy == '1.25':\n options['legacy'] = 125\n elif legacy == '2.1':\n options['legacy'] = 201\n elif legacy == '2.2':\n options['legacy'] = 202\n elif legacy is None:\n pass # OK, do nothing.\n else:\n warnings.warn(\n "legacy printing option can currently only be '1.13', '1.21', "\n "'1.25', '2.1', '2.2' or `False`", stacklevel=3)\n\n if threshold is not None:\n # forbid the bad threshold arg suggested by stack overflow, gh-12351\n if not isinstance(threshold, numbers.Number):\n raise TypeError("threshold must be numeric")\n if np.isnan(threshold):\n raise ValueError("threshold must be non-NAN, try "\n "sys.maxsize for untruncated representation")\n\n if precision is not None:\n # forbid the bad precision arg as suggested by issue #18254\n try:\n options['precision'] = operator.index(precision)\n except TypeError as e:\n raise TypeError('precision must be an integer') from e\n\n return options\n\n\n@set_module('numpy')\ndef set_printoptions(precision=None, threshold=None, edgeitems=None,\n linewidth=None, suppress=None, nanstr=None,\n infstr=None, formatter=None, sign=None, floatmode=None,\n *, legacy=None, override_repr=None):\n """\n Set printing options.\n\n These options determine the way floating point numbers, arrays and\n other NumPy objects are displayed.\n\n Parameters\n ----------\n precision : int or None, optional\n Number of digits of precision for floating point output (default 8).\n May be None if `floatmode` is not `fixed`, to print as many digits as\n necessary to uniquely specify the value.\n threshold : int, optional\n Total number of array elements which trigger summarization\n rather than full repr (default 1000).\n To always use the full repr without summarization, pass `sys.maxsize`.\n edgeitems : int, optional\n Number of array items in summary at beginning and end of\n each dimension (default 3).\n linewidth : int, optional\n The number of characters per line for the purpose of inserting\n line breaks (default 75).\n suppress : bool, optional\n If True, always print floating point numbers using fixed point\n notation, in which case numbers equal to zero in the current precision\n will print as zero. If False, then scientific notation is used when\n absolute value of the smallest number is < 1e-4 or the ratio of the\n maximum absolute value to the minimum is > 1e3. The default is False.\n nanstr : str, optional\n String representation of floating point not-a-number (default nan).\n infstr : str, optional\n String representation of floating point infinity (default inf).\n sign : string, either '-', '+', or ' ', optional\n Controls printing of the sign of floating-point types. If '+', always\n print the sign of positive values. If ' ', always prints a space\n (whitespace character) in the sign position of positive values. If\n '-', omit the sign character of positive values. (default '-')\n\n .. versionchanged:: 2.0\n The sign parameter can now be an integer type, previously\n types were floating-point types.\n\n formatter : dict of callables, optional\n If not None, the keys should indicate the type(s) that the respective\n formatting function applies to. Callables should return a string.\n Types that are not specified (by their corresponding keys) are handled\n by the default formatters. Individual types for which a formatter\n can be set are:\n\n - 'bool'\n - 'int'\n - 'timedelta' : a `numpy.timedelta64`\n - 'datetime' : a `numpy.datetime64`\n - 'float'\n - 'longfloat' : 128-bit floats\n - 'complexfloat'\n - 'longcomplexfloat' : composed of two 128-bit floats\n - 'numpystr' : types `numpy.bytes_` and `numpy.str_`\n - 'object' : `np.object_` arrays\n\n Other keys that can be used to set a group of types at once are:\n\n - 'all' : sets all types\n - 'int_kind' : sets 'int'\n - 'float_kind' : sets 'float' and 'longfloat'\n - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'\n - 'str_kind' : sets 'numpystr'\n floatmode : str, optional\n Controls the interpretation of the `precision` option for\n floating-point types. Can take the following values\n (default maxprec_equal):\n\n * 'fixed': Always print exactly `precision` fractional digits,\n even if this would print more or fewer digits than\n necessary to specify the value uniquely.\n * 'unique': Print the minimum number of fractional digits necessary\n to represent each value uniquely. Different elements may\n have a different number of digits. The value of the\n `precision` option is ignored.\n * 'maxprec': Print at most `precision` fractional digits, but if\n an element can be uniquely represented with fewer digits\n only print it with that many.\n * 'maxprec_equal': Print at most `precision` fractional digits,\n but if every element in the array can be uniquely\n represented with an equal number of fewer digits, use that\n many digits for all elements.\n legacy : string or `False`, optional\n If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This\n approximates numpy 1.13 print output by including a space in the sign\n position of floats and different behavior for 0d arrays. This also\n enables 1.21 legacy printing mode (described below).\n\n If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This\n approximates numpy 1.21 print output of complex structured dtypes\n by not inserting spaces after commas that separate fields and after\n colons.\n\n If set to ``'1.25'`` approximates printing of 1.25 which mainly means\n that numeric scalars are printed without their type information, e.g.\n as ``3.0`` rather than ``np.float64(3.0)``.\n\n If set to ``'2.1'``, shape information is not given when arrays are\n summarized (i.e., multiple elements replaced with ``...``).\n\n If set to ``'2.2'``, the transition to use scientific notation for\n printing ``np.float16`` and ``np.float32`` types may happen later or\n not at all for larger values.\n\n If set to `False`, disables legacy mode.\n\n Unrecognized strings will be ignored with a warning for forward\n compatibility.\n\n .. versionchanged:: 1.22.0\n .. versionchanged:: 2.2\n\n override_repr: callable, optional\n If set a passed function will be used for generating arrays' repr.\n Other options will be ignored.\n\n See Also\n --------\n get_printoptions, printoptions, array2string\n\n Notes\n -----\n `formatter` is always reset with a call to `set_printoptions`.\n\n Use `printoptions` as a context manager to set the values temporarily.\n\n Examples\n --------\n Floating point precision can be set:\n\n >>> import numpy as np\n >>> np.set_printoptions(precision=4)\n >>> np.array([1.123456789])\n [1.1235]\n\n Long arrays can be summarised:\n\n >>> np.set_printoptions(threshold=5)\n >>> np.arange(10)\n array([0, 1, 2, ..., 7, 8, 9], shape=(10,))\n\n Small results can be suppressed:\n\n >>> eps = np.finfo(float).eps\n >>> x = np.arange(4.)\n >>> x**2 - (x + eps)**2\n array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])\n >>> np.set_printoptions(suppress=True)\n >>> x**2 - (x + eps)**2\n array([-0., -0., 0., 0.])\n\n A custom formatter can be used to display array elements as desired:\n\n >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})\n >>> x = np.arange(3)\n >>> x\n array([int: 0, int: -1, int: -2])\n >>> np.set_printoptions() # formatter gets reset\n >>> x\n array([0, 1, 2])\n\n To put back the default options, you can use:\n\n >>> np.set_printoptions(edgeitems=3, infstr='inf',\n ... linewidth=75, nanstr='nan', precision=8,\n ... suppress=False, threshold=1000, formatter=None)\n\n Also to temporarily override options, use `printoptions`\n as a context manager:\n\n >>> with np.printoptions(precision=2, suppress=True, threshold=5):\n ... np.linspace(0, 10, 10)\n array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,))\n\n """\n _set_printoptions(precision, threshold, edgeitems, linewidth, suppress,\n nanstr, infstr, formatter, sign, floatmode,\n legacy=legacy, override_repr=override_repr)\n\n\ndef _set_printoptions(precision=None, threshold=None, edgeitems=None,\n linewidth=None, suppress=None, nanstr=None,\n infstr=None, formatter=None, sign=None, floatmode=None,\n *, legacy=None, override_repr=None):\n new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth,\n suppress, nanstr, infstr, sign, formatter,\n floatmode, legacy)\n # formatter and override_repr are always reset\n new_opt['formatter'] = formatter\n new_opt['override_repr'] = override_repr\n\n updated_opt = format_options.get() | new_opt\n updated_opt.update(new_opt)\n\n if updated_opt['legacy'] == 113:\n updated_opt['sign'] = '-'\n\n return format_options.set(updated_opt)\n\n\n@set_module('numpy')\ndef get_printoptions():\n """\n Return the current print options.\n\n Returns\n -------\n print_opts : dict\n Dictionary of current print options with keys\n\n - precision : int\n - threshold : int\n - edgeitems : int\n - linewidth : int\n - suppress : bool\n - nanstr : str\n - infstr : str\n - sign : str\n - formatter : dict of callables\n - floatmode : str\n - legacy : str or False\n\n For a full description of these options, see `set_printoptions`.\n\n See Also\n --------\n set_printoptions, printoptions\n\n Examples\n --------\n >>> import numpy as np\n\n >>> np.get_printoptions()\n {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None}\n\n >>> np.get_printoptions()['linewidth']\n 75\n >>> np.set_printoptions(linewidth=100)\n >>> np.get_printoptions()['linewidth']\n 100\n\n """\n opts = format_options.get().copy()\n opts['legacy'] = {\n 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1',\n 202: '2.2', sys.maxsize: False,\n }[opts['legacy']]\n return opts\n\n\ndef _get_legacy_print_mode():\n """Return the legacy print mode as an int."""\n return format_options.get()['legacy']\n\n\n@set_module('numpy')\n@contextlib.contextmanager\ndef printoptions(*args, **kwargs):\n """Context manager for setting print options.\n\n Set print options for the scope of the `with` block, and restore the old\n options at the end. See `set_printoptions` for the full description of\n available options.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> from numpy.testing import assert_equal\n >>> with np.printoptions(precision=2):\n ... np.array([2.0]) / 3\n array([0.67])\n\n The `as`-clause of the `with`-statement gives the current print options:\n\n >>> with np.printoptions(precision=2) as opts:\n ... assert_equal(opts, np.get_printoptions())\n\n See Also\n --------\n set_printoptions, get_printoptions\n\n """\n token = _set_printoptions(*args, **kwargs)\n\n try:\n yield get_printoptions()\n finally:\n format_options.reset(token)\n\n\ndef _leading_trailing(a, edgeitems, index=()):\n """\n Keep only the N-D corners (leading and trailing edges) of an array.\n\n Should be passed a base-class ndarray, since it makes no guarantees about\n preserving subclasses.\n """\n axis = len(index)\n if axis == a.ndim:\n return a[index]\n\n if a.shape[axis] > 2 * edgeitems:\n return concatenate((\n _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]),\n _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])\n ), axis=axis)\n else:\n return _leading_trailing(a, edgeitems, index + np.index_exp[:])\n\n\ndef _object_format(o):\n """ Object arrays containing lists should be printed unambiguously """\n if type(o) is list:\n fmt = 'list({!r})'\n else:\n fmt = '{!r}'\n return fmt.format(o)\n\ndef repr_format(x):\n if isinstance(x, (np.str_, np.bytes_)):\n return repr(x.item())\n return repr(x)\n\ndef str_format(x):\n if isinstance(x, (np.str_, np.bytes_)):\n return str(x.item())\n return str(x)\n\ndef _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,\n formatter, **kwargs):\n # note: extra arguments in kwargs are ignored\n\n # wrapped in lambdas to avoid taking a code path\n # with the wrong type of data\n formatdict = {\n 'bool': lambda: BoolFormat(data),\n 'int': lambda: IntegerFormat(data, sign),\n 'float': lambda: FloatingFormat(\n data, precision, floatmode, suppress, sign, legacy=legacy),\n 'longfloat': lambda: FloatingFormat(\n data, precision, floatmode, suppress, sign, legacy=legacy),\n 'complexfloat': lambda: ComplexFloatingFormat(\n data, precision, floatmode, suppress, sign, legacy=legacy),\n 'longcomplexfloat': lambda: ComplexFloatingFormat(\n data, precision, floatmode, suppress, sign, legacy=legacy),\n 'datetime': lambda: DatetimeFormat(data, legacy=legacy),\n 'timedelta': lambda: TimedeltaFormat(data),\n 'object': lambda: _object_format,\n 'void': lambda: str_format,\n 'numpystr': lambda: repr_format}\n\n # we need to wrap values in `formatter` in a lambda, so that the interface\n # is the same as the above values.\n def indirect(x):\n return lambda: x\n\n if formatter is not None:\n fkeys = [k for k in formatter.keys() if formatter[k] is not None]\n if 'all' in fkeys:\n for key in formatdict.keys():\n formatdict[key] = indirect(formatter['all'])\n if 'int_kind' in fkeys:\n for key in ['int']:\n formatdict[key] = indirect(formatter['int_kind'])\n if 'float_kind' in fkeys:\n for key in ['float', 'longfloat']:\n formatdict[key] = indirect(formatter['float_kind'])\n if 'complex_kind' in fkeys:\n for key in ['complexfloat', 'longcomplexfloat']:\n formatdict[key] = indirect(formatter['complex_kind'])\n if 'str_kind' in fkeys:\n formatdict['numpystr'] = indirect(formatter['str_kind'])\n for key in formatdict.keys():\n if key in fkeys:\n formatdict[key] = indirect(formatter[key])\n\n return formatdict\n\ndef _get_format_function(data, **options):\n """\n find the right formatting function for the dtype_\n """\n dtype_ = data.dtype\n dtypeobj = dtype_.type\n formatdict = _get_formatdict(data, **options)\n if dtypeobj is None:\n return formatdict["numpystr"]()\n elif issubclass(dtypeobj, _nt.bool):\n return formatdict['bool']()\n elif issubclass(dtypeobj, _nt.integer):\n if issubclass(dtypeobj, _nt.timedelta64):\n return formatdict['timedelta']()\n else:\n return formatdict['int']()\n elif issubclass(dtypeobj, _nt.floating):\n if issubclass(dtypeobj, _nt.longdouble):\n return formatdict['longfloat']()\n else:\n return formatdict['float']()\n elif issubclass(dtypeobj, _nt.complexfloating):\n if issubclass(dtypeobj, _nt.clongdouble):\n return formatdict['longcomplexfloat']()\n else:\n return formatdict['complexfloat']()\n elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):\n return formatdict['numpystr']()\n elif issubclass(dtypeobj, _nt.datetime64):\n return formatdict['datetime']()\n elif issubclass(dtypeobj, _nt.object_):\n return formatdict['object']()\n elif issubclass(dtypeobj, _nt.void):\n if dtype_.names is not None:\n return StructuredVoidFormat.from_data(data, **options)\n else:\n return formatdict['void']()\n else:\n return formatdict['numpystr']()\n\n\ndef _recursive_guard(fillvalue='...'):\n """\n Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs\n\n Decorates a function such that if it calls itself with the same first\n argument, it returns `fillvalue` instead of recursing.\n\n Largely copied from reprlib.recursive_repr\n """\n\n def decorating_function(f):\n repr_running = set()\n\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n key = id(self), get_ident()\n if key in repr_running:\n return fillvalue\n repr_running.add(key)\n try:\n return f(self, *args, **kwargs)\n finally:\n repr_running.discard(key)\n\n return wrapper\n\n return decorating_function\n\n\n# gracefully handle recursive calls, when object arrays contain themselves\n@_recursive_guard()\ndef _array2string(a, options, separator=' ', prefix=""):\n # The formatter __init__s in _get_format_function cannot deal with\n # subclasses yet, and we also need to avoid recursion issues in\n # _formatArray with subclasses which return 0d arrays in place of scalars\n data = asarray(a)\n if a.shape == ():\n a = data\n\n if a.size > options['threshold']:\n summary_insert = "..."\n data = _leading_trailing(data, options['edgeitems'])\n else:\n summary_insert = ""\n\n # find the right formatting function for the array\n format_function = _get_format_function(data, **options)\n\n # skip over "["\n next_line_prefix = " "\n # skip over array(\n next_line_prefix += " " * len(prefix)\n\n lst = _formatArray(a, format_function, options['linewidth'],\n next_line_prefix, separator, options['edgeitems'],\n summary_insert, options['legacy'])\n return lst\n\n\ndef _array2string_dispatcher(\n a, max_line_width=None, precision=None,\n suppress_small=None, separator=None, prefix=None,\n style=None, formatter=None, threshold=None,\n edgeitems=None, sign=None, floatmode=None, suffix=None,\n *, legacy=None):\n return (a,)\n\n\n@array_function_dispatch(_array2string_dispatcher, module='numpy')\ndef array2string(a, max_line_width=None, precision=None,\n suppress_small=None, separator=' ', prefix="",\n style=np._NoValue, formatter=None, threshold=None,\n edgeitems=None, sign=None, floatmode=None, suffix="",\n *, legacy=None):\n """\n Return a string representation of an array.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n max_line_width : int, optional\n Inserts newlines if text is longer than `max_line_width`.\n Defaults to ``numpy.get_printoptions()['linewidth']``.\n precision : int or None, optional\n Floating point precision.\n Defaults to ``numpy.get_printoptions()['precision']``.\n suppress_small : bool, optional\n Represent numbers "very close" to zero as zero; default is False.\n Very close is defined by precision: if the precision is 8, e.g.,\n numbers smaller (in absolute value) than 5e-9 are represented as\n zero.\n Defaults to ``numpy.get_printoptions()['suppress']``.\n separator : str, optional\n Inserted between elements.\n prefix : str, optional\n suffix : str, optional\n The length of the prefix and suffix strings are used to respectively\n align and wrap the output. An array is typically printed as::\n\n prefix + array2string(a) + suffix\n\n The output is left-padded by the length of the prefix string, and\n wrapping is forced at the column ``max_line_width - len(suffix)``.\n It should be noted that the content of prefix and suffix strings are\n not included in the output.\n style : _NoValue, optional\n Has no effect, do not use.\n\n .. deprecated:: 1.14.0\n formatter : dict of callables, optional\n If not None, the keys should indicate the type(s) that the respective\n formatting function applies to. Callables should return a string.\n Types that are not specified (by their corresponding keys) are handled\n by the default formatters. Individual types for which a formatter\n can be set are:\n\n - 'bool'\n - 'int'\n - 'timedelta' : a `numpy.timedelta64`\n - 'datetime' : a `numpy.datetime64`\n - 'float'\n - 'longfloat' : 128-bit floats\n - 'complexfloat'\n - 'longcomplexfloat' : composed of two 128-bit floats\n - 'void' : type `numpy.void`\n - 'numpystr' : types `numpy.bytes_` and `numpy.str_`\n\n Other keys that can be used to set a group of types at once are:\n\n - 'all' : sets all types\n - 'int_kind' : sets 'int'\n - 'float_kind' : sets 'float' and 'longfloat'\n - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'\n - 'str_kind' : sets 'numpystr'\n threshold : int, optional\n Total number of array elements which trigger summarization\n rather than full repr.\n Defaults to ``numpy.get_printoptions()['threshold']``.\n edgeitems : int, optional\n Number of array items in summary at beginning and end of\n each dimension.\n Defaults to ``numpy.get_printoptions()['edgeitems']``.\n sign : string, either '-', '+', or ' ', optional\n Controls printing of the sign of floating-point types. If '+', always\n print the sign of positive values. If ' ', always prints a space\n (whitespace character) in the sign position of positive values. If\n '-', omit the sign character of positive values.\n Defaults to ``numpy.get_printoptions()['sign']``.\n\n .. versionchanged:: 2.0\n The sign parameter can now be an integer type, previously\n types were floating-point types.\n\n floatmode : str, optional\n Controls the interpretation of the `precision` option for\n floating-point types.\n Defaults to ``numpy.get_printoptions()['floatmode']``.\n Can take the following values:\n\n - 'fixed': Always print exactly `precision` fractional digits,\n even if this would print more or fewer digits than\n necessary to specify the value uniquely.\n - 'unique': Print the minimum number of fractional digits necessary\n to represent each value uniquely. Different elements may\n have a different number of digits. The value of the\n `precision` option is ignored.\n - 'maxprec': Print at most `precision` fractional digits, but if\n an element can be uniquely represented with fewer digits\n only print it with that many.\n - 'maxprec_equal': Print at most `precision` fractional digits,\n but if every element in the array can be uniquely\n represented with an equal number of fewer digits, use that\n many digits for all elements.\n legacy : string or `False`, optional\n If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This\n approximates numpy 1.13 print output by including a space in the sign\n position of floats and different behavior for 0d arrays. If set to\n `False`, disables legacy mode. Unrecognized strings will be ignored\n with a warning for forward compatibility.\n\n Returns\n -------\n array_str : str\n String representation of the array.\n\n Raises\n ------\n TypeError\n if a callable in `formatter` does not return a string.\n\n See Also\n --------\n array_str, array_repr, set_printoptions, get_printoptions\n\n Notes\n -----\n If a formatter is specified for a certain type, the `precision` keyword is\n ignored for that type.\n\n This is a very flexible function; `array_repr` and `array_str` are using\n `array2string` internally so keywords with the same name should work\n identically in all three functions.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1e-16,1,2,3])\n >>> np.array2string(x, precision=2, separator=',',\n ... suppress_small=True)\n '[0.,1.,2.,3.]'\n\n >>> x = np.arange(3.)\n >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})\n '[0.00 1.00 2.00]'\n\n >>> x = np.arange(3)\n >>> np.array2string(x, formatter={'int':lambda x: hex(x)})\n '[0x0 0x1 0x2]'\n\n """\n\n overrides = _make_options_dict(precision, threshold, edgeitems,\n max_line_width, suppress_small, None, None,\n sign, formatter, floatmode, legacy)\n options = format_options.get().copy()\n options.update(overrides)\n\n if options['legacy'] <= 113:\n if style is np._NoValue:\n style = repr\n\n if a.shape == () and a.dtype.names is None:\n return style(a.item())\n elif style is not np._NoValue:\n # Deprecation 11-9-2017 v1.14\n warnings.warn("'style' argument is deprecated and no longer functional"\n " except in 1.13 'legacy' mode",\n DeprecationWarning, stacklevel=2)\n\n if options['legacy'] > 113:\n options['linewidth'] -= len(suffix)\n\n # treat as a null array if any of shape elements == 0\n if a.size == 0:\n return "[]"\n\n return _array2string(a, options, separator, prefix)\n\n\ndef _extendLine(s, line, word, line_width, next_line_prefix, legacy):\n needs_wrap = len(line) + len(word) > line_width\n if legacy > 113:\n # don't wrap lines if it won't help\n if len(line) <= len(next_line_prefix):\n needs_wrap = False\n\n if needs_wrap:\n s += line.rstrip() + "\n"\n line = next_line_prefix\n line += word\n return s, line\n\n\ndef _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):\n """\n Extends line with nicely formatted (possibly multi-line) string ``word``.\n """\n words = word.splitlines()\n if len(words) == 1 or legacy <= 113:\n return _extendLine(s, line, word, line_width, next_line_prefix, legacy)\n\n max_word_length = max(len(word) for word in words)\n if (len(line) + max_word_length > line_width and\n len(line) > len(next_line_prefix)):\n s += line.rstrip() + '\n'\n line = next_line_prefix + words[0]\n indent = next_line_prefix\n else:\n indent = len(line) * ' '\n line += words[0]\n\n for word in words[1::]:\n s += line.rstrip() + '\n'\n line = indent + word\n\n suffix_length = max_word_length - len(words[-1])\n line += suffix_length * ' '\n\n return s, line\n\ndef _formatArray(a, format_function, line_width, next_line_prefix,\n separator, edge_items, summary_insert, legacy):\n """formatArray is designed for two modes of operation:\n\n 1. Full output\n\n 2. Summarized output\n\n """\n def recurser(index, hanging_indent, curr_width):\n """\n By using this local function, we don't need to recurse with all the\n arguments. Since this function is not created recursively, the cost is\n not significant\n """\n axis = len(index)\n axes_left = a.ndim - axis\n\n if axes_left == 0:\n return format_function(a[index])\n\n # when recursing, add a space to align with the [ added, and reduce the\n # length of the line by 1\n next_hanging_indent = hanging_indent + ' '\n if legacy <= 113:\n next_width = curr_width\n else:\n next_width = curr_width - len(']')\n\n a_len = a.shape[axis]\n show_summary = summary_insert and 2 * edge_items < a_len\n if show_summary:\n leading_items = edge_items\n trailing_items = edge_items\n else:\n leading_items = 0\n trailing_items = a_len\n\n # stringify the array with the hanging indent on the first line too\n s = ''\n\n # last axis (rows) - wrap elements if they would not fit on one line\n if axes_left == 1:\n # the length up until the beginning of the separator / bracket\n if legacy <= 113:\n elem_width = curr_width - len(separator.rstrip())\n else:\n elem_width = curr_width - max(\n len(separator.rstrip()), len(']')\n )\n\n line = hanging_indent\n for i in range(leading_items):\n word = recurser(index + (i,), next_hanging_indent, next_width)\n s, line = _extendLine_pretty(\n s, line, word, elem_width, hanging_indent, legacy)\n line += separator\n\n if show_summary:\n s, line = _extendLine(\n s, line, summary_insert, elem_width, hanging_indent, legacy\n )\n if legacy <= 113:\n line += ", "\n else:\n line += separator\n\n for i in range(trailing_items, 1, -1):\n word = recurser(index + (-i,), next_hanging_indent, next_width)\n s, line = _extendLine_pretty(\n s, line, word, elem_width, hanging_indent, legacy)\n line += separator\n\n if legacy <= 113:\n # width of the separator is not considered on 1.13\n elem_width = curr_width\n word = recurser(index + (-1,), next_hanging_indent, next_width)\n s, line = _extendLine_pretty(\n s, line, word, elem_width, hanging_indent, legacy)\n\n s += line\n\n # other axes - insert newlines between rows\n else:\n s = ''\n line_sep = separator.rstrip() + '\n' * (axes_left - 1)\n\n for i in range(leading_items):\n nested = recurser(\n index + (i,), next_hanging_indent, next_width\n )\n s += hanging_indent + nested + line_sep\n\n if show_summary:\n if legacy <= 113:\n # trailing space, fixed nbr of newlines,\n # and fixed separator\n s += hanging_indent + summary_insert + ", \n"\n else:\n s += hanging_indent + summary_insert + line_sep\n\n for i in range(trailing_items, 1, -1):\n nested = recurser(index + (-i,), next_hanging_indent,\n next_width)\n s += hanging_indent + nested + line_sep\n\n nested = recurser(index + (-1,), next_hanging_indent, next_width)\n s += hanging_indent + nested\n\n # remove the hanging indent, and wrap in []\n s = '[' + s[len(hanging_indent):] + ']'\n return s\n\n try:\n # invoke the recursive part with an initial index and prefix\n return recurser(index=(),\n hanging_indent=next_line_prefix,\n curr_width=line_width)\n finally:\n # recursive closures have a cyclic reference to themselves, which\n # requires gc to collect (gh-10620). To avoid this problem, for\n # performance and PyPy friendliness, we break the cycle:\n recurser = None\n\ndef _none_or_positive_arg(x, name):\n if x is None:\n return -1\n if x < 0:\n raise ValueError(f"{name} must be >= 0")\n return x\n\nclass FloatingFormat:\n """ Formatter for subtypes of np.floating """\n def __init__(self, data, precision, floatmode, suppress_small, sign=False,\n *, legacy=None):\n # for backcompatibility, accept bools\n if isinstance(sign, bool):\n sign = '+' if sign else '-'\n\n self._legacy = legacy\n if self._legacy <= 113:\n # when not 0d, legacy does not support '-'\n if data.shape != () and sign == '-':\n sign = ' '\n\n self.floatmode = floatmode\n if floatmode == 'unique':\n self.precision = None\n else:\n self.precision = precision\n\n self.precision = _none_or_positive_arg(self.precision, 'precision')\n\n self.suppress_small = suppress_small\n self.sign = sign\n self.exp_format = False\n self.large_exponent = False\n self.fillFormat(data)\n\n def fillFormat(self, data):\n # only the finite values are used to compute the number of digits\n finite_vals = data[isfinite(data)]\n\n # choose exponential mode based on the non-zero finite values:\n abs_non_zero = absolute(finite_vals[finite_vals != 0])\n if len(abs_non_zero) != 0:\n max_val = np.max(abs_non_zero)\n min_val = np.min(abs_non_zero)\n if self._legacy <= 202:\n exp_cutoff_max = 1.e8\n else:\n # consider data type while deciding the max cutoff for exp format\n exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision)\n with errstate(over='ignore'): # division can overflow\n if max_val >= exp_cutoff_max or (not self.suppress_small and\n (min_val < 0.0001 or max_val / min_val > 1000.)):\n self.exp_format = True\n\n # do a first pass of printing all the numbers, to determine sizes\n if len(finite_vals) == 0:\n self.pad_left = 0\n self.pad_right = 0\n self.trim = '.'\n self.exp_size = -1\n self.unique = True\n self.min_digits = None\n elif self.exp_format:\n trim, unique = '.', True\n if self.floatmode == 'fixed' or self._legacy <= 113:\n trim, unique = 'k', False\n strs = (dragon4_scientific(x, precision=self.precision,\n unique=unique, trim=trim, sign=self.sign == '+')\n for x in finite_vals)\n frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))\n int_part, frac_part = zip(*(s.split('.') for s in frac_strs))\n self.exp_size = max(len(s) for s in exp_strs) - 1\n\n self.trim = 'k'\n self.precision = max(len(s) for s in frac_part)\n self.min_digits = self.precision\n self.unique = unique\n\n # for back-compat with np 1.13, use 2 spaces & sign and full prec\n if self._legacy <= 113:\n self.pad_left = 3\n else:\n # this should be only 1 or 2. Can be calculated from sign.\n self.pad_left = max(len(s) for s in int_part)\n # pad_right is only needed for nan length calculation\n self.pad_right = self.exp_size + 2 + self.precision\n else:\n trim, unique = '.', True\n if self.floatmode == 'fixed':\n trim, unique = 'k', False\n strs = (dragon4_positional(x, precision=self.precision,\n fractional=True,\n unique=unique, trim=trim,\n sign=self.sign == '+')\n for x in finite_vals)\n int_part, frac_part = zip(*(s.split('.') for s in strs))\n if self._legacy <= 113:\n self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)\n else:\n self.pad_left = max(len(s) for s in int_part)\n self.pad_right = max(len(s) for s in frac_part)\n self.exp_size = -1\n self.unique = unique\n\n if self.floatmode in ['fixed', 'maxprec_equal']:\n self.precision = self.min_digits = self.pad_right\n self.trim = 'k'\n else:\n self.trim = '.'\n self.min_digits = 0\n\n if self._legacy > 113:\n # account for sign = ' ' by adding one to pad_left\n if self.sign == ' ' and not any(np.signbit(finite_vals)):\n self.pad_left += 1\n\n # if there are non-finite values, may need to increase pad_left\n if data.size != finite_vals.size:\n neginf = self.sign != '-' or any(data[isinf(data)] < 0)\n offset = self.pad_right + 1 # +1 for decimal pt\n current_options = format_options.get()\n self.pad_left = max(\n self.pad_left, len(current_options['nanstr']) - offset,\n len(current_options['infstr']) + neginf - offset\n )\n\n def __call__(self, x):\n if not np.isfinite(x):\n with errstate(invalid='ignore'):\n current_options = format_options.get()\n if np.isnan(x):\n sign = '+' if self.sign == '+' else ''\n ret = sign + current_options['nanstr']\n else: # isinf\n sign = '-' if x < 0 else '+' if self.sign == '+' else ''\n ret = sign + current_options['infstr']\n return ' ' * (\n self.pad_left + self.pad_right + 1 - len(ret)\n ) + ret\n\n if self.exp_format:\n return dragon4_scientific(x,\n precision=self.precision,\n min_digits=self.min_digits,\n unique=self.unique,\n trim=self.trim,\n sign=self.sign == '+',\n pad_left=self.pad_left,\n exp_digits=self.exp_size)\n else:\n return dragon4_positional(x,\n precision=self.precision,\n min_digits=self.min_digits,\n unique=self.unique,\n fractional=True,\n trim=self.trim,\n sign=self.sign == '+',\n pad_left=self.pad_left,\n pad_right=self.pad_right)\n\n\n@set_module('numpy')\ndef format_float_scientific(x, precision=None, unique=True, trim='k',\n sign=False, pad_left=None, exp_digits=None,\n min_digits=None):\n """\n Format a floating-point scalar as a decimal string in scientific notation.\n\n Provides control over rounding, trimming and padding. Uses and assumes\n IEEE unbiased rounding. Uses the "Dragon4" algorithm.\n\n Parameters\n ----------\n x : python float or numpy floating scalar\n Value to format.\n precision : non-negative integer or None, optional\n Maximum number of digits to print. May be None if `unique` is\n `True`, but must be an integer if unique is `False`.\n unique : boolean, optional\n If `True`, use a digit-generation strategy which gives the shortest\n representation which uniquely identifies the floating-point number from\n other values of the same type, by judicious rounding. If `precision`\n is given fewer digits than necessary can be printed. If `min_digits`\n is given more can be printed, in which cases the last digit is rounded\n with unbiased rounding.\n If `False`, digits are generated as if printing an infinite-precision\n value and stopping after `precision` digits, rounding the remaining\n value with unbiased rounding\n trim : one of 'k', '.', '0', '-', optional\n Controls post-processing trimming of trailing digits, as follows:\n\n * 'k' : keep trailing zeros, keep decimal point (no trimming)\n * '.' : trim all trailing zeros, leave decimal point\n * '0' : trim all but the zero before the decimal point. Insert the\n zero if it is missing.\n * '-' : trim trailing zeros and any trailing decimal point\n sign : boolean, optional\n Whether to show the sign for positive values.\n pad_left : non-negative integer, optional\n Pad the left side of the string with whitespace until at least that\n many characters are to the left of the decimal point.\n exp_digits : non-negative integer, optional\n Pad the exponent with zeros until it contains at least this\n many digits. If omitted, the exponent will be at least 2 digits.\n min_digits : non-negative integer or None, optional\n Minimum number of digits to print. This only has an effect for\n `unique=True`. In that case more digits than necessary to uniquely\n identify the value may be printed and rounded unbiased.\n\n .. versionadded:: 1.21.0\n\n Returns\n -------\n rep : string\n The string representation of the floating point value\n\n See Also\n --------\n format_float_positional\n\n Examples\n --------\n >>> import numpy as np\n >>> np.format_float_scientific(np.float32(np.pi))\n '3.1415927e+00'\n >>> s = np.float32(1.23e24)\n >>> np.format_float_scientific(s, unique=False, precision=15)\n '1.230000071797338e+24'\n >>> np.format_float_scientific(s, exp_digits=4)\n '1.23e+0024'\n """\n precision = _none_or_positive_arg(precision, 'precision')\n pad_left = _none_or_positive_arg(pad_left, 'pad_left')\n exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')\n min_digits = _none_or_positive_arg(min_digits, 'min_digits')\n if min_digits > 0 and precision > 0 and min_digits > precision:\n raise ValueError("min_digits must be less than or equal to precision")\n return dragon4_scientific(x, precision=precision, unique=unique,\n trim=trim, sign=sign, pad_left=pad_left,\n exp_digits=exp_digits, min_digits=min_digits)\n\n\n@set_module('numpy')\ndef format_float_positional(x, precision=None, unique=True,\n fractional=True, trim='k', sign=False,\n pad_left=None, pad_right=None, min_digits=None):\n """\n Format a floating-point scalar as a decimal string in positional notation.\n\n Provides control over rounding, trimming and padding. Uses and assumes\n IEEE unbiased rounding. Uses the "Dragon4" algorithm.\n\n Parameters\n ----------\n x : python float or numpy floating scalar\n Value to format.\n precision : non-negative integer or None, optional\n Maximum number of digits to print. May be None if `unique` is\n `True`, but must be an integer if unique is `False`.\n unique : boolean, optional\n If `True`, use a digit-generation strategy which gives the shortest\n representation which uniquely identifies the floating-point number from\n other values of the same type, by judicious rounding. If `precision`\n is given fewer digits than necessary can be printed, or if `min_digits`\n is given more can be printed, in which cases the last digit is rounded\n with unbiased rounding.\n If `False`, digits are generated as if printing an infinite-precision\n value and stopping after `precision` digits, rounding the remaining\n value with unbiased rounding\n fractional : boolean, optional\n If `True`, the cutoffs of `precision` and `min_digits` refer to the\n total number of digits after the decimal point, including leading\n zeros.\n If `False`, `precision` and `min_digits` refer to the total number of\n significant digits, before or after the decimal point, ignoring leading\n zeros.\n trim : one of 'k', '.', '0', '-', optional\n Controls post-processing trimming of trailing digits, as follows:\n\n * 'k' : keep trailing zeros, keep decimal point (no trimming)\n * '.' : trim all trailing zeros, leave decimal point\n * '0' : trim all but the zero before the decimal point. Insert the\n zero if it is missing.\n * '-' : trim trailing zeros and any trailing decimal point\n sign : boolean, optional\n Whether to show the sign for positive values.\n pad_left : non-negative integer, optional\n Pad the left side of the string with whitespace until at least that\n many characters are to the left of the decimal point.\n pad_right : non-negative integer, optional\n Pad the right side of the string with whitespace until at least that\n many characters are to the right of the decimal point.\n min_digits : non-negative integer or None, optional\n Minimum number of digits to print. Only has an effect if `unique=True`\n in which case additional digits past those necessary to uniquely\n identify the value may be printed, rounding the last additional digit.\n\n .. versionadded:: 1.21.0\n\n Returns\n -------\n rep : string\n The string representation of the floating point value\n\n See Also\n --------\n format_float_scientific\n\n Examples\n --------\n >>> import numpy as np\n >>> np.format_float_positional(np.float32(np.pi))\n '3.1415927'\n >>> np.format_float_positional(np.float16(np.pi))\n '3.14'\n >>> np.format_float_positional(np.float16(0.3))\n '0.3'\n >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)\n '0.3000488281'\n """\n precision = _none_or_positive_arg(precision, 'precision')\n pad_left = _none_or_positive_arg(pad_left, 'pad_left')\n pad_right = _none_or_positive_arg(pad_right, 'pad_right')\n min_digits = _none_or_positive_arg(min_digits, 'min_digits')\n if not fractional and precision == 0:\n raise ValueError("precision must be greater than 0 if "\n "fractional=False")\n if min_digits > 0 and precision > 0 and min_digits > precision:\n raise ValueError("min_digits must be less than or equal to precision")\n return dragon4_positional(x, precision=precision, unique=unique,\n fractional=fractional, trim=trim,\n sign=sign, pad_left=pad_left,\n pad_right=pad_right, min_digits=min_digits)\n\nclass IntegerFormat:\n def __init__(self, data, sign='-'):\n if data.size > 0:\n data_max = np.max(data)\n data_min = np.min(data)\n data_max_str_len = len(str(data_max))\n if sign == ' ' and data_min < 0:\n sign = '-'\n if data_max >= 0 and sign in "+ ":\n data_max_str_len += 1\n max_str_len = max(data_max_str_len,\n len(str(data_min)))\n else:\n max_str_len = 0\n self.format = f'{{:{sign}{max_str_len}d}}'\n\n def __call__(self, x):\n return self.format.format(x)\n\nclass BoolFormat:\n def __init__(self, data, **kwargs):\n # add an extra space so " True" and "False" have the same length and\n # array elements align nicely when printed, except in 0d arrays\n self.truestr = ' True' if data.shape != () else 'True'\n\n def __call__(self, x):\n return self.truestr if x else "False"\n\n\nclass ComplexFloatingFormat:\n """ Formatter for subtypes of np.complexfloating """\n def __init__(self, x, precision, floatmode, suppress_small,\n sign=False, *, legacy=None):\n # for backcompatibility, accept bools\n if isinstance(sign, bool):\n sign = '+' if sign else '-'\n\n floatmode_real = floatmode_imag = floatmode\n if legacy <= 113:\n floatmode_real = 'maxprec_equal'\n floatmode_imag = 'maxprec'\n\n self.real_format = FloatingFormat(\n x.real, precision, floatmode_real, suppress_small,\n sign=sign, legacy=legacy\n )\n self.imag_format = FloatingFormat(\n x.imag, precision, floatmode_imag, suppress_small,\n sign='+', legacy=legacy\n )\n\n def __call__(self, x):\n r = self.real_format(x.real)\n i = self.imag_format(x.imag)\n\n # add the 'j' before the terminal whitespace in i\n sp = len(i.rstrip())\n i = i[:sp] + 'j' + i[sp:]\n\n return r + i\n\n\nclass _TimelikeFormat:\n def __init__(self, data):\n non_nat = data[~isnat(data)]\n if len(non_nat) > 0:\n # Max str length of non-NaT elements\n max_str_len = max(len(self._format_non_nat(np.max(non_nat))),\n len(self._format_non_nat(np.min(non_nat))))\n else:\n max_str_len = 0\n if len(non_nat) < data.size:\n # data contains a NaT\n max_str_len = max(max_str_len, 5)\n self._format = f'%{max_str_len}s'\n self._nat = "'NaT'".rjust(max_str_len)\n\n def _format_non_nat(self, x):\n # override in subclass\n raise NotImplementedError\n\n def __call__(self, x):\n if isnat(x):\n return self._nat\n else:\n return self._format % self._format_non_nat(x)\n\n\nclass DatetimeFormat(_TimelikeFormat):\n def __init__(self, x, unit=None, timezone=None, casting='same_kind',\n legacy=False):\n # Get the unit from the dtype\n if unit is None:\n if x.dtype.kind == 'M':\n unit = datetime_data(x.dtype)[0]\n else:\n unit = 's'\n\n if timezone is None:\n timezone = 'naive'\n self.timezone = timezone\n self.unit = unit\n self.casting = casting\n self.legacy = legacy\n\n # must be called after the above are configured\n super().__init__(x)\n\n def __call__(self, x):\n if self.legacy <= 113:\n return self._format_non_nat(x)\n return super().__call__(x)\n\n def _format_non_nat(self, x):\n return "'%s'" % datetime_as_string(x,\n unit=self.unit,\n timezone=self.timezone,\n casting=self.casting)\n\n\nclass TimedeltaFormat(_TimelikeFormat):\n def _format_non_nat(self, x):\n return str(x.astype('i8'))\n\n\nclass SubArrayFormat:\n def __init__(self, format_function, **options):\n self.format_function = format_function\n self.threshold = options['threshold']\n self.edge_items = options['edgeitems']\n\n def __call__(self, a):\n self.summary_insert = "..." if a.size > self.threshold else ""\n return self.format_array(a)\n\n def format_array(self, a):\n if np.ndim(a) == 0:\n return self.format_function(a)\n\n if self.summary_insert and a.shape[0] > 2 * self.edge_items:\n formatted = (\n [self.format_array(a_) for a_ in a[:self.edge_items]]\n + [self.summary_insert]\n + [self.format_array(a_) for a_ in a[-self.edge_items:]]\n )\n else:\n formatted = [self.format_array(a_) for a_ in a]\n\n return "[" + ", ".join(formatted) + "]"\n\n\nclass StructuredVoidFormat:\n """\n Formatter for structured np.void objects.\n\n This does not work on structured alias types like\n np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information,\n and the implementation relies upon np.void.__getitem__.\n """\n def __init__(self, format_functions):\n self.format_functions = format_functions\n\n @classmethod\n def from_data(cls, data, **options):\n """\n This is a second way to initialize StructuredVoidFormat,\n using the raw data as input. Added to avoid changing\n the signature of __init__.\n """\n format_functions = []\n for field_name in data.dtype.names:\n format_function = _get_format_function(data[field_name], **options)\n if data.dtype[field_name].shape != ():\n format_function = SubArrayFormat(format_function, **options)\n format_functions.append(format_function)\n return cls(format_functions)\n\n def __call__(self, x):\n str_fields = [\n format_function(field)\n for field, format_function in zip(x, self.format_functions)\n ]\n if len(str_fields) == 1:\n return f"({str_fields[0]},)"\n else:\n return f"({', '.join(str_fields)})"\n\n\ndef _void_scalar_to_string(x, is_repr=True):\n """\n Implements the repr for structured-void scalars. It is called from the\n scalartypes.c.src code, and is placed here because it uses the elementwise\n formatters defined above.\n """\n options = format_options.get().copy()\n\n if options["legacy"] <= 125:\n return StructuredVoidFormat.from_data(array(x), **options)(x)\n\n if options.get('formatter') is None:\n options['formatter'] = {}\n options['formatter'].setdefault('float_kind', str)\n val_repr = StructuredVoidFormat.from_data(array(x), **options)(x)\n if not is_repr:\n return val_repr\n cls = type(x)\n cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__\n void_dtype = np.dtype((np.void, x.dtype))\n return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})"\n\n\n_typelessdata = [int_, float64, complex128, _nt.bool]\n\n\ndef dtype_is_implied(dtype):\n """\n Determine if the given dtype is implied by the representation\n of its values.\n\n Parameters\n ----------\n dtype : dtype\n Data type\n\n Returns\n -------\n implied : bool\n True if the dtype is implied by the representation of its values.\n\n Examples\n --------\n >>> import numpy as np\n >>> np._core.arrayprint.dtype_is_implied(int)\n True\n >>> np.array([1, 2, 3], int)\n array([1, 2, 3])\n >>> np._core.arrayprint.dtype_is_implied(np.int8)\n False\n >>> np.array([1, 2, 3], np.int8)\n array([1, 2, 3], dtype=int8)\n """\n dtype = np.dtype(dtype)\n if format_options.get()['legacy'] <= 113 and dtype.type == np.bool:\n return False\n\n # not just void types can be structured, and names are not part of the repr\n if dtype.names is not None:\n return False\n\n # should care about endianness *unless size is 1* (e.g., int8, bool)\n if not dtype.isnative:\n return False\n\n return dtype.type in _typelessdata\n\n\ndef dtype_short_repr(dtype):\n """\n Convert a dtype to a short form which evaluates to the same dtype.\n\n The intent is roughly that the following holds\n\n >>> from numpy import *\n >>> dt = np.int64([1, 2]).dtype\n >>> assert eval(dtype_short_repr(dt)) == dt\n """\n if type(dtype).__repr__ != np.dtype.__repr__:\n # TODO: Custom repr for user DTypes, logic should likely move.\n return repr(dtype)\n if dtype.names is not None:\n # structured dtypes give a list or tuple repr\n return str(dtype)\n elif issubclass(dtype.type, flexible):\n # handle these separately so they don't give garbage like str256\n return f"'{str(dtype)}'"\n\n typename = dtype.name\n if not dtype.isnative:\n # deal with cases like dtype('<u2') that are identical to an\n # established dtype (in this case uint16)\n # except that they have a different endianness.\n return f"'{str(dtype)}'"\n # quote typenames which can't be represented as python variable names\n if typename and not (typename[0].isalpha() and typename.isalnum()):\n typename = repr(typename)\n return typename\n\n\ndef _array_repr_implementation(\n arr, max_line_width=None, precision=None, suppress_small=None,\n array2string=array2string):\n """Internal version of array_repr() that allows overriding array2string."""\n current_options = format_options.get()\n override_repr = current_options["override_repr"]\n if override_repr is not None:\n return override_repr(arr)\n\n if max_line_width is None:\n max_line_width = current_options['linewidth']\n\n if type(arr) is not ndarray:\n class_name = type(arr).__name__\n else:\n class_name = "array"\n\n prefix = class_name + "("\n if (current_options['legacy'] <= 113 and\n arr.shape == () and not arr.dtype.names):\n lst = repr(arr.item())\n else:\n lst = array2string(arr, max_line_width, precision, suppress_small,\n ', ', prefix, suffix=")")\n\n # Add dtype and shape information if these cannot be inferred from\n # the array string.\n extras = []\n if ((arr.size == 0 and arr.shape != (0,))\n or (current_options['legacy'] > 210\n and arr.size > current_options['threshold'])):\n extras.append(f"shape={arr.shape}")\n if not dtype_is_implied(arr.dtype) or arr.size == 0:\n extras.append(f"dtype={dtype_short_repr(arr.dtype)}")\n\n if not extras:\n return prefix + lst + ")"\n\n arr_str = prefix + lst + ","\n extra_str = ", ".join(extras) + ")"\n # compute whether we should put extras on a new line: Do so if adding the\n # extras would extend the last line past max_line_width.\n # Note: This line gives the correct result even when rfind returns -1.\n last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)\n spacer = " "\n if current_options['legacy'] <= 113:\n if issubclass(arr.dtype.type, flexible):\n spacer = '\n' + ' ' * len(prefix)\n elif last_line_len + len(extra_str) + 1 > max_line_width:\n spacer = '\n' + ' ' * len(prefix)\n\n return arr_str + spacer + extra_str\n\n\ndef _array_repr_dispatcher(\n arr, max_line_width=None, precision=None, suppress_small=None):\n return (arr,)\n\n\n@array_function_dispatch(_array_repr_dispatcher, module='numpy')\ndef array_repr(arr, max_line_width=None, precision=None, suppress_small=None):\n """\n Return the string representation of an array.\n\n Parameters\n ----------\n arr : ndarray\n Input array.\n max_line_width : int, optional\n Inserts newlines if text is longer than `max_line_width`.\n Defaults to ``numpy.get_printoptions()['linewidth']``.\n precision : int, optional\n Floating point precision.\n Defaults to ``numpy.get_printoptions()['precision']``.\n suppress_small : bool, optional\n Represent numbers "very close" to zero as zero; default is False.\n Very close is defined by precision: if the precision is 8, e.g.,\n numbers smaller (in absolute value) than 5e-9 are represented as\n zero.\n Defaults to ``numpy.get_printoptions()['suppress']``.\n\n Returns\n -------\n string : str\n The string representation of an array.\n\n See Also\n --------\n array_str, array2string, set_printoptions\n\n Examples\n --------\n >>> import numpy as np\n >>> np.array_repr(np.array([1,2]))\n 'array([1, 2])'\n >>> np.array_repr(np.ma.array([0.]))\n 'MaskedArray([0.])'\n >>> np.array_repr(np.array([], np.int32))\n 'array([], dtype=int32)'\n\n >>> x = np.array([1e-6, 4e-7, 2, 3])\n >>> np.array_repr(x, precision=6, suppress_small=True)\n 'array([0.000001, 0. , 2. , 3. ])'\n\n """\n return _array_repr_implementation(\n arr, max_line_width, precision, suppress_small)\n\n\n@_recursive_guard()\ndef _guarded_repr_or_str(v):\n if isinstance(v, bytes):\n return repr(v)\n return str(v)\n\n\ndef _array_str_implementation(\n a, max_line_width=None, precision=None, suppress_small=None,\n array2string=array2string):\n """Internal version of array_str() that allows overriding array2string."""\n if (format_options.get()['legacy'] <= 113 and\n a.shape == () and not a.dtype.names):\n return str(a.item())\n\n # the str of 0d arrays is a special case: It should appear like a scalar,\n # so floats are not truncated by `precision`, and strings are not wrapped\n # in quotes. So we return the str of the scalar value.\n if a.shape == ():\n # obtain a scalar and call str on it, avoiding problems for subclasses\n # for which indexing with () returns a 0d instead of a scalar by using\n # ndarray's getindex. Also guard against recursive 0d object arrays.\n return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))\n\n return array2string(a, max_line_width, precision, suppress_small, ' ', "")\n\n\ndef _array_str_dispatcher(\n a, max_line_width=None, precision=None, suppress_small=None):\n return (a,)\n\n\n@array_function_dispatch(_array_str_dispatcher, module='numpy')\ndef array_str(a, max_line_width=None, precision=None, suppress_small=None):\n """\n Return a string representation of the data in an array.\n\n The data in the array is returned as a single string. This function is\n similar to `array_repr`, the difference being that `array_repr` also\n returns information on the kind of array and its data type.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n max_line_width : int, optional\n Inserts newlines if text is longer than `max_line_width`.\n Defaults to ``numpy.get_printoptions()['linewidth']``.\n precision : int, optional\n Floating point precision.\n Defaults to ``numpy.get_printoptions()['precision']``.\n suppress_small : bool, optional\n Represent numbers "very close" to zero as zero; default is False.\n Very close is defined by precision: if the precision is 8, e.g.,\n numbers smaller (in absolute value) than 5e-9 are represented as\n zero.\n Defaults to ``numpy.get_printoptions()['suppress']``.\n\n See Also\n --------\n array2string, array_repr, set_printoptions\n\n Examples\n --------\n >>> import numpy as np\n >>> np.array_str(np.arange(3))\n '[0 1 2]'\n\n """\n return _array_str_implementation(\n a, max_line_width, precision, suppress_small)\n\n\n# needed if __array_function__ is disabled\n_array2string_impl = getattr(array2string, '__wrapped__', array2string)\n_default_array_str = functools.partial(_array_str_implementation,\n array2string=_array2string_impl)\n_default_array_repr = functools.partial(_array_repr_implementation,\n array2string=_array2string_impl)\n
.venv\Lib\site-packages\numpy\_core\arrayprint.py
arrayprint.py
Python
67,053
0.75
0.181408
0.068733
awesome-app
369
2023-09-26T02:50:03.731005
GPL-3.0
false
16e96d175b08275a568a3aca4fbd67be
from collections.abc import Callable\n\n# Using a private class is by no means ideal, but it is simply a consequence\n# of a `contextlib.context` returning an instance of aforementioned class\nfrom contextlib import _GeneratorContextManager\nfrom typing import (\n Any,\n Final,\n Literal,\n SupportsIndex,\n TypeAlias,\n TypedDict,\n overload,\n type_check_only,\n)\n\nfrom typing_extensions import deprecated\n\nimport numpy as np\nfrom numpy._globals import _NoValueType\nfrom numpy._typing import NDArray, _CharLike_co, _FloatLike_co\n\n__all__ = [\n "array2string",\n "array_repr",\n "array_str",\n "format_float_positional",\n "format_float_scientific",\n "get_printoptions",\n "printoptions",\n "set_printoptions",\n]\n\n###\n\n_FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"]\n_LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False]\n_Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle]\n_Sign: TypeAlias = Literal["-", "+", " "]\n_Trim: TypeAlias = Literal["k", ".", "0", "-"]\n_ReprFunc: TypeAlias = Callable[[NDArray[Any]], str]\n\n@type_check_only\nclass _FormatDict(TypedDict, total=False):\n bool: Callable[[np.bool], str]\n int: Callable[[np.integer], str]\n timedelta: Callable[[np.timedelta64], str]\n datetime: Callable[[np.datetime64], str]\n float: Callable[[np.floating], str]\n longfloat: Callable[[np.longdouble], str]\n complexfloat: Callable[[np.complexfloating], str]\n longcomplexfloat: Callable[[np.clongdouble], str]\n void: Callable[[np.void], str]\n numpystr: Callable[[_CharLike_co], str]\n object: Callable[[object], str]\n all: Callable[[object], str]\n int_kind: Callable[[np.integer], str]\n float_kind: Callable[[np.floating], str]\n complex_kind: Callable[[np.complexfloating], str]\n str_kind: Callable[[_CharLike_co], str]\n\n@type_check_only\nclass _FormatOptions(TypedDict):\n precision: int\n threshold: int\n edgeitems: int\n linewidth: int\n suppress: bool\n nanstr: str\n infstr: str\n formatter: _FormatDict | None\n sign: _Sign\n floatmode: _FloatMode\n legacy: _Legacy\n\n###\n\n__docformat__: Final = "restructuredtext" # undocumented\n\ndef set_printoptions(\n precision: SupportsIndex | None = ...,\n threshold: int | None = ...,\n edgeitems: int | None = ...,\n linewidth: int | None = ...,\n suppress: bool | None = ...,\n nanstr: str | None = ...,\n infstr: str | None = ...,\n formatter: _FormatDict | None = ...,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n *,\n legacy: _Legacy | None = None,\n override_repr: _ReprFunc | None = None,\n) -> None: ...\ndef get_printoptions() -> _FormatOptions: ...\n\n# public numpy export\n@overload # no style\ndef array2string(\n a: NDArray[Any],\n max_line_width: int | None = None,\n precision: SupportsIndex | None = None,\n suppress_small: bool | None = None,\n separator: str = " ",\n prefix: str = "",\n style: _NoValueType = ...,\n formatter: _FormatDict | None = None,\n threshold: int | None = None,\n edgeitems: int | None = None,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n suffix: str = "",\n *,\n legacy: _Legacy | None = None,\n) -> str: ...\n@overload # style=<given> (positional), legacy="1.13"\ndef array2string(\n a: NDArray[Any],\n max_line_width: int | None,\n precision: SupportsIndex | None,\n suppress_small: bool | None,\n separator: str,\n prefix: str,\n style: _ReprFunc,\n formatter: _FormatDict | None = None,\n threshold: int | None = None,\n edgeitems: int | None = None,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n suffix: str = "",\n *,\n legacy: Literal["1.13"],\n) -> str: ...\n@overload # style=<given> (keyword), legacy="1.13"\ndef array2string(\n a: NDArray[Any],\n max_line_width: int | None = None,\n precision: SupportsIndex | None = None,\n suppress_small: bool | None = None,\n separator: str = " ",\n prefix: str = "",\n *,\n style: _ReprFunc,\n formatter: _FormatDict | None = None,\n threshold: int | None = None,\n edgeitems: int | None = None,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n suffix: str = "",\n legacy: Literal["1.13"],\n) -> str: ...\n@overload # style=<given> (positional), legacy!="1.13"\n@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")\ndef array2string(\n a: NDArray[Any],\n max_line_width: int | None,\n precision: SupportsIndex | None,\n suppress_small: bool | None,\n separator: str,\n prefix: str,\n style: _ReprFunc,\n formatter: _FormatDict | None = None,\n threshold: int | None = None,\n edgeitems: int | None = None,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n suffix: str = "",\n *,\n legacy: _LegacyNoStyle | None = None,\n) -> str: ...\n@overload # style=<given> (keyword), legacy="1.13"\n@deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")\ndef array2string(\n a: NDArray[Any],\n max_line_width: int | None = None,\n precision: SupportsIndex | None = None,\n suppress_small: bool | None = None,\n separator: str = " ",\n prefix: str = "",\n *,\n style: _ReprFunc,\n formatter: _FormatDict | None = None,\n threshold: int | None = None,\n edgeitems: int | None = None,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n suffix: str = "",\n legacy: _LegacyNoStyle | None = None,\n) -> str: ...\n\ndef format_float_scientific(\n x: _FloatLike_co,\n precision: int | None = ...,\n unique: bool = ...,\n trim: _Trim = "k",\n sign: bool = ...,\n pad_left: int | None = ...,\n exp_digits: int | None = ...,\n min_digits: int | None = ...,\n) -> str: ...\ndef format_float_positional(\n x: _FloatLike_co,\n precision: int | None = ...,\n unique: bool = ...,\n fractional: bool = ...,\n trim: _Trim = "k",\n sign: bool = ...,\n pad_left: int | None = ...,\n pad_right: int | None = ...,\n min_digits: int | None = ...,\n) -> str: ...\ndef array_repr(\n arr: NDArray[Any],\n max_line_width: int | None = ...,\n precision: SupportsIndex | None = ...,\n suppress_small: bool | None = ...,\n) -> str: ...\ndef array_str(\n a: NDArray[Any],\n max_line_width: int | None = ...,\n precision: SupportsIndex | None = ...,\n suppress_small: bool | None = ...,\n) -> str: ...\ndef printoptions(\n precision: SupportsIndex | None = ...,\n threshold: int | None = ...,\n edgeitems: int | None = ...,\n linewidth: int | None = ...,\n suppress: bool | None = ...,\n nanstr: str | None = ...,\n infstr: str | None = ...,\n formatter: _FormatDict | None = ...,\n sign: _Sign | None = None,\n floatmode: _FloatMode | None = None,\n *,\n legacy: _Legacy | None = None,\n override_repr: _ReprFunc | None = None,\n) -> _GeneratorContextManager[_FormatOptions]: ...\n
.venv\Lib\site-packages\numpy\_core\arrayprint.pyi
arrayprint.pyi
Other
7,209
0.95
0.067227
0.053333
python-kit
257
2023-11-12T12:35:19.876079
BSD-3-Clause
false
cb70fd2e8ce9e3708387a355a02abdcb
"""Simple script to compute the api hash of the current API.\n\nThe API has is defined by numpy_api_order and ufunc_api_order.\n\n"""\nfrom os.path import dirname\n\nfrom code_generators.genapi import fullapi_hash\nfrom code_generators.numpy_api import full_api\n\nif __name__ == '__main__':\n curdir = dirname(__file__)\n print(fullapi_hash(full_api))\n
.venv\Lib\site-packages\numpy\_core\cversions.py
cversions.py
Python
360
0.85
0.076923
0
awesome-app
642
2024-01-24T16:17:39.260116
MIT
false
f995e727741d37f76c92ec98ef1cf399
"""\nThis module contains a set of functions for vectorized string\noperations and methods.\n\n.. note::\n The `chararray` class exists for backwards compatibility with\n Numarray, it is not recommended for new development. Starting from numpy\n 1.4, if one needs arrays of strings, it is recommended to use arrays of\n `dtype` `object_`, `bytes_` or `str_`, and use the free functions\n in the `numpy.char` module for fast vectorized string operations.\n\nSome methods will only be available if the corresponding string method is\navailable in your version of Python.\n\nThe preferred alias for `defchararray` is `numpy.char`.\n\n"""\nimport functools\n\nimport numpy as np\nfrom numpy._core import overrides\nfrom numpy._core.multiarray import compare_chararrays\nfrom numpy._core.strings import (\n _join as join,\n)\nfrom numpy._core.strings import (\n _rsplit as rsplit,\n)\nfrom numpy._core.strings import (\n _split as split,\n)\nfrom numpy._core.strings import (\n _splitlines as splitlines,\n)\nfrom numpy._utils import set_module\nfrom numpy.strings import *\nfrom numpy.strings import (\n multiply as strings_multiply,\n)\nfrom numpy.strings import (\n partition as strings_partition,\n)\nfrom numpy.strings import (\n rpartition as strings_rpartition,\n)\n\nfrom .numeric import array as narray\nfrom .numeric import asarray as asnarray\nfrom .numeric import ndarray\nfrom .numerictypes import bytes_, character, str_\n\n__all__ = [\n 'equal', 'not_equal', 'greater_equal', 'less_equal',\n 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',\n 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',\n 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',\n 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',\n 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',\n 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',\n 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',\n 'array', 'asarray', 'compare_chararrays', 'chararray'\n ]\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy.char')\n\n\ndef _binary_op_dispatcher(x1, x2):\n return (x1, x2)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef equal(x1, x2):\n """\n Return (x1 == x2) element-wise.\n\n Unlike `numpy.equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n Examples\n --------\n >>> import numpy as np\n >>> y = "aa "\n >>> x = "aa"\n >>> np.char.equal(x, y)\n array(True)\n\n See Also\n --------\n not_equal, greater_equal, less_equal, greater, less\n """\n return compare_chararrays(x1, x2, '==', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef not_equal(x1, x2):\n """\n Return (x1 != x2) element-wise.\n\n Unlike `numpy.not_equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n See Also\n --------\n equal, greater_equal, less_equal, greater, less\n\n Examples\n --------\n >>> import numpy as np\n >>> x1 = np.array(['a', 'b', 'c'])\n >>> np.char.not_equal(x1, 'b')\n array([ True, False, True])\n\n """\n return compare_chararrays(x1, x2, '!=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef greater_equal(x1, x2):\n """\n Return (x1 >= x2) element-wise.\n\n Unlike `numpy.greater_equal`, this comparison is performed by\n first stripping whitespace characters from the end of the string.\n This behavior is provided for backward-compatibility with\n numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n See Also\n --------\n equal, not_equal, less_equal, greater, less\n\n Examples\n --------\n >>> import numpy as np\n >>> x1 = np.array(['a', 'b', 'c'])\n >>> np.char.greater_equal(x1, 'b')\n array([False, True, True])\n\n """\n return compare_chararrays(x1, x2, '>=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef less_equal(x1, x2):\n """\n Return (x1 <= x2) element-wise.\n\n Unlike `numpy.less_equal`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n See Also\n --------\n equal, not_equal, greater_equal, greater, less\n\n Examples\n --------\n >>> import numpy as np\n >>> x1 = np.array(['a', 'b', 'c'])\n >>> np.char.less_equal(x1, 'b')\n array([ True, True, False])\n\n """\n return compare_chararrays(x1, x2, '<=', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef greater(x1, x2):\n """\n Return (x1 > x2) element-wise.\n\n Unlike `numpy.greater`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n See Also\n --------\n equal, not_equal, greater_equal, less_equal, less\n\n Examples\n --------\n >>> import numpy as np\n >>> x1 = np.array(['a', 'b', 'c'])\n >>> np.char.greater(x1, 'b')\n array([False, False, True])\n\n """\n return compare_chararrays(x1, x2, '>', True)\n\n\n@array_function_dispatch(_binary_op_dispatcher)\ndef less(x1, x2):\n """\n Return (x1 < x2) element-wise.\n\n Unlike `numpy.greater`, this comparison is performed by first\n stripping whitespace characters from the end of the string. This\n behavior is provided for backward-compatibility with numarray.\n\n Parameters\n ----------\n x1, x2 : array_like of str or unicode\n Input arrays of the same shape.\n\n Returns\n -------\n out : ndarray\n Output array of bools.\n\n See Also\n --------\n equal, not_equal, greater_equal, less_equal, greater\n\n Examples\n --------\n >>> import numpy as np\n >>> x1 = np.array(['a', 'b', 'c'])\n >>> np.char.less(x1, 'b')\n array([True, False, False])\n\n """\n return compare_chararrays(x1, x2, '<', True)\n\n\n@set_module("numpy.char")\ndef multiply(a, i):\n """\n Return (a * i), that is string multiple concatenation,\n element-wise.\n\n Values in ``i`` of less than 0 are treated as 0 (which yields an\n empty string).\n\n Parameters\n ----------\n a : array_like, with `np.bytes_` or `np.str_` dtype\n\n i : array_like, with any integer dtype\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input types\n\n Notes\n -----\n This is a thin wrapper around np.strings.multiply that raises\n `ValueError` when ``i`` is not an integer. It only\n exists for backwards-compatibility.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["a", "b", "c"])\n >>> np.strings.multiply(a, 3)\n array(['aaa', 'bbb', 'ccc'], dtype='<U3')\n >>> i = np.array([1, 2, 3])\n >>> np.strings.multiply(a, i)\n array(['a', 'bb', 'ccc'], dtype='<U3')\n >>> np.strings.multiply(np.array(['a']), i)\n array(['a', 'aa', 'aaa'], dtype='<U3')\n >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))\n >>> np.strings.multiply(a, 3)\n array([['aaa', 'bbb', 'ccc'],\n ['ddd', 'eee', 'fff']], dtype='<U3')\n >>> np.strings.multiply(a, i)\n array([['a', 'bb', 'ccc'],\n ['d', 'ee', 'fff']], dtype='<U3')\n\n """\n try:\n return strings_multiply(a, i)\n except TypeError:\n raise ValueError("Can only multiply by integers")\n\n\n@set_module("numpy.char")\ndef partition(a, sep):\n """\n Partition each element in `a` around `sep`.\n\n Calls :meth:`str.partition` element-wise.\n\n For each element in `a`, split the element as the first\n occurrence of `sep`, and return 3 strings containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, return 3 strings\n containing the string itself, followed by two empty strings.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n sep : {str, unicode}\n Separator to split each string element in `a`.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types. The output array will have an extra\n dimension with 3 elements per input element.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array(["Numpy is nice!"])\n >>> np.char.partition(x, " ")\n array([['Numpy', ' ', 'is nice!']], dtype='<U8')\n\n See Also\n --------\n str.partition\n\n """\n return np.stack(strings_partition(a, sep), axis=-1)\n\n\n@set_module("numpy.char")\ndef rpartition(a, sep):\n """\n Partition (split) each element around the right-most separator.\n\n Calls :meth:`str.rpartition` element-wise.\n\n For each element in `a`, split the element as the last\n occurrence of `sep`, and return 3 strings containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, return 3 strings\n containing the string itself, followed by two empty strings.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n sep : str or unicode\n Right-most separator to split each element in array.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types. The output array will have an extra\n dimension with 3 elements per input element.\n\n See Also\n --------\n str.rpartition\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> np.char.rpartition(a, 'A')\n array([['aAaAa', 'A', ''],\n [' a', 'A', ' '],\n ['abB', 'A', 'Bba']], dtype='<U5')\n\n """\n return np.stack(strings_rpartition(a, sep), axis=-1)\n\n\n@set_module("numpy.char")\nclass chararray(ndarray):\n """\n chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,\n strides=None, order=None)\n\n Provides a convenient view on arrays of string and unicode values.\n\n .. note::\n The `chararray` class exists for backwards compatibility with\n Numarray, it is not recommended for new development. Starting from numpy\n 1.4, if one needs arrays of strings, it is recommended to use arrays of\n `dtype` `~numpy.object_`, `~numpy.bytes_` or `~numpy.str_`, and use\n the free functions in the `numpy.char` module for fast vectorized\n string operations.\n\n Versus a NumPy array of dtype `~numpy.bytes_` or `~numpy.str_`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)\n\n chararrays should be created using `numpy.char.array` or\n `numpy.char.asarray`, rather than this constructor directly.\n\n This constructor creates the array, using `buffer` (with `offset`\n and `strides`) if it is not ``None``. If `buffer` is ``None``, then\n constructs a new array with `strides` in "C order", unless both\n ``len(shape) >= 2`` and ``order='F'``, in which case `strides`\n is in "Fortran order".\n\n Methods\n -------\n astype\n argsort\n copy\n count\n decode\n dump\n dumps\n encode\n endswith\n expandtabs\n fill\n find\n flatten\n getfield\n index\n isalnum\n isalpha\n isdecimal\n isdigit\n islower\n isnumeric\n isspace\n istitle\n isupper\n item\n join\n ljust\n lower\n lstrip\n nonzero\n put\n ravel\n repeat\n replace\n reshape\n resize\n rfind\n rindex\n rjust\n rsplit\n rstrip\n searchsorted\n setfield\n setflags\n sort\n split\n splitlines\n squeeze\n startswith\n strip\n swapaxes\n swapcase\n take\n title\n tofile\n tolist\n tostring\n translate\n transpose\n upper\n view\n zfill\n\n Parameters\n ----------\n shape : tuple\n Shape of the array.\n itemsize : int, optional\n Length of each array element, in number of characters. Default is 1.\n unicode : bool, optional\n Are the array elements of type unicode (True) or string (False).\n Default is False.\n buffer : object exposing the buffer interface or str, optional\n Memory address of the start of the array data. Default is None,\n in which case a new array is created.\n offset : int, optional\n Fixed stride displacement from the beginning of an axis?\n Default is 0. Needs to be >=0.\n strides : array_like of ints, optional\n Strides for the array (see `~numpy.ndarray.strides` for\n full description). Default is None.\n order : {'C', 'F'}, optional\n The order in which the array data is stored in memory: 'C' ->\n "row major" order (the default), 'F' -> "column major"\n (Fortran) order.\n\n Examples\n --------\n >>> import numpy as np\n >>> charar = np.char.chararray((3, 3))\n >>> charar[:] = 'a'\n >>> charar\n chararray([[b'a', b'a', b'a'],\n [b'a', b'a', b'a'],\n [b'a', b'a', b'a']], dtype='|S1')\n\n >>> charar = np.char.chararray(charar.shape, itemsize=5)\n >>> charar[:] = 'abc'\n >>> charar\n chararray([[b'abc', b'abc', b'abc'],\n [b'abc', b'abc', b'abc'],\n [b'abc', b'abc', b'abc']], dtype='|S5')\n\n """\n def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,\n offset=0, strides=None, order='C'):\n if unicode:\n dtype = str_\n else:\n dtype = bytes_\n\n # force itemsize to be a Python int, since using NumPy integer\n # types results in itemsize.itemsize being used as the size of\n # strings in the new array.\n itemsize = int(itemsize)\n\n if isinstance(buffer, str):\n # unicode objects do not have the buffer interface\n filler = buffer\n buffer = None\n else:\n filler = None\n\n if buffer is None:\n self = ndarray.__new__(subtype, shape, (dtype, itemsize),\n order=order)\n else:\n self = ndarray.__new__(subtype, shape, (dtype, itemsize),\n buffer=buffer,\n offset=offset, strides=strides,\n order=order)\n if filler is not None:\n self[...] = filler\n\n return self\n\n def __array_wrap__(self, arr, context=None, return_scalar=False):\n # When calling a ufunc (and some other functions), we return a\n # chararray if the ufunc output is a string-like array,\n # or an ndarray otherwise\n if arr.dtype.char in "SUbc":\n return arr.view(type(self))\n return arr\n\n def __array_finalize__(self, obj):\n # The b is a special case because it is used for reconstructing.\n if self.dtype.char not in 'VSUbc':\n raise ValueError("Can only create a chararray from string data.")\n\n def __getitem__(self, obj):\n val = ndarray.__getitem__(self, obj)\n if isinstance(val, character):\n return val.rstrip()\n return val\n\n # IMPLEMENTATION NOTE: Most of the methods of this class are\n # direct delegations to the free functions in this module.\n # However, those that return an array of strings should instead\n # return a chararray, so some extra wrapping is required.\n\n def __eq__(self, other):\n """\n Return (self == other) element-wise.\n\n See Also\n --------\n equal\n """\n return equal(self, other)\n\n def __ne__(self, other):\n """\n Return (self != other) element-wise.\n\n See Also\n --------\n not_equal\n """\n return not_equal(self, other)\n\n def __ge__(self, other):\n """\n Return (self >= other) element-wise.\n\n See Also\n --------\n greater_equal\n """\n return greater_equal(self, other)\n\n def __le__(self, other):\n """\n Return (self <= other) element-wise.\n\n See Also\n --------\n less_equal\n """\n return less_equal(self, other)\n\n def __gt__(self, other):\n """\n Return (self > other) element-wise.\n\n See Also\n --------\n greater\n """\n return greater(self, other)\n\n def __lt__(self, other):\n """\n Return (self < other) element-wise.\n\n See Also\n --------\n less\n """\n return less(self, other)\n\n def __add__(self, other):\n """\n Return (self + other), that is string concatenation,\n element-wise for a pair of array_likes of str or unicode.\n\n See Also\n --------\n add\n """\n return add(self, other)\n\n def __radd__(self, other):\n """\n Return (other + self), that is string concatenation,\n element-wise for a pair of array_likes of `bytes_` or `str_`.\n\n See Also\n --------\n add\n """\n return add(other, self)\n\n def __mul__(self, i):\n """\n Return (self * i), that is string multiple concatenation,\n element-wise.\n\n See Also\n --------\n multiply\n """\n return asarray(multiply(self, i))\n\n def __rmul__(self, i):\n """\n Return (self * i), that is string multiple concatenation,\n element-wise.\n\n See Also\n --------\n multiply\n """\n return asarray(multiply(self, i))\n\n def __mod__(self, i):\n """\n Return (self % i), that is pre-Python 2.6 string formatting\n (interpolation), element-wise for a pair of array_likes of `bytes_`\n or `str_`.\n\n See Also\n --------\n mod\n """\n return asarray(mod(self, i))\n\n def __rmod__(self, other):\n return NotImplemented\n\n def argsort(self, axis=-1, kind=None, order=None):\n """\n Return the indices that sort the array lexicographically.\n\n For full documentation see `numpy.argsort`, for which this method is\n in fact merely a "thin wrapper."\n\n Examples\n --------\n >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')\n >>> c = c.view(np.char.chararray); c\n chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],\n dtype='|S5')\n >>> c[c.argsort()]\n chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],\n dtype='|S5')\n\n """\n return self.__array__().argsort(axis, kind, order)\n argsort.__doc__ = ndarray.argsort.__doc__\n\n def capitalize(self):\n """\n Return a copy of `self` with only the first character of each element\n capitalized.\n\n See Also\n --------\n char.capitalize\n\n """\n return asarray(capitalize(self))\n\n def center(self, width, fillchar=' '):\n """\n Return a copy of `self` with its elements centered in a\n string of length `width`.\n\n See Also\n --------\n center\n """\n return asarray(center(self, width, fillchar))\n\n def count(self, sub, start=0, end=None):\n """\n Returns an array with the number of non-overlapping occurrences of\n substring `sub` in the range [`start`, `end`].\n\n See Also\n --------\n char.count\n\n """\n return count(self, sub, start, end)\n\n def decode(self, encoding=None, errors=None):\n """\n Calls ``bytes.decode`` element-wise.\n\n See Also\n --------\n char.decode\n\n """\n return decode(self, encoding, errors)\n\n def encode(self, encoding=None, errors=None):\n """\n Calls :meth:`str.encode` element-wise.\n\n See Also\n --------\n char.encode\n\n """\n return encode(self, encoding, errors)\n\n def endswith(self, suffix, start=0, end=None):\n """\n Returns a boolean array which is `True` where the string element\n in `self` ends with `suffix`, otherwise `False`.\n\n See Also\n --------\n char.endswith\n\n """\n return endswith(self, suffix, start, end)\n\n def expandtabs(self, tabsize=8):\n """\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces.\n\n See Also\n --------\n char.expandtabs\n\n """\n return asarray(expandtabs(self, tabsize))\n\n def find(self, sub, start=0, end=None):\n """\n For each element, return the lowest index in the string where\n substring `sub` is found.\n\n See Also\n --------\n char.find\n\n """\n return find(self, sub, start, end)\n\n def index(self, sub, start=0, end=None):\n """\n Like `find`, but raises :exc:`ValueError` when the substring is not\n found.\n\n See Also\n --------\n char.index\n\n """\n return index(self, sub, start, end)\n\n def isalnum(self):\n """\n Returns true for each element if all characters in the string\n are alphanumeric and there is at least one character, false\n otherwise.\n\n See Also\n --------\n char.isalnum\n\n """\n return isalnum(self)\n\n def isalpha(self):\n """\n Returns true for each element if all characters in the string\n are alphabetic and there is at least one character, false\n otherwise.\n\n See Also\n --------\n char.isalpha\n\n """\n return isalpha(self)\n\n def isdigit(self):\n """\n Returns true for each element if all characters in the string are\n digits and there is at least one character, false otherwise.\n\n See Also\n --------\n char.isdigit\n\n """\n return isdigit(self)\n\n def islower(self):\n """\n Returns true for each element if all cased characters in the\n string are lowercase and there is at least one cased character,\n false otherwise.\n\n See Also\n --------\n char.islower\n\n """\n return islower(self)\n\n def isspace(self):\n """\n Returns true for each element if there are only whitespace\n characters in the string and there is at least one character,\n false otherwise.\n\n See Also\n --------\n char.isspace\n\n """\n return isspace(self)\n\n def istitle(self):\n """\n Returns true for each element if the element is a titlecased\n string and there is at least one character, false otherwise.\n\n See Also\n --------\n char.istitle\n\n """\n return istitle(self)\n\n def isupper(self):\n """\n Returns true for each element if all cased characters in the\n string are uppercase and there is at least one character, false\n otherwise.\n\n See Also\n --------\n char.isupper\n\n """\n return isupper(self)\n\n def join(self, seq):\n """\n Return a string which is the concatenation of the strings in the\n sequence `seq`.\n\n See Also\n --------\n char.join\n\n """\n return join(self, seq)\n\n def ljust(self, width, fillchar=' '):\n """\n Return an array with the elements of `self` left-justified in a\n string of length `width`.\n\n See Also\n --------\n char.ljust\n\n """\n return asarray(ljust(self, width, fillchar))\n\n def lower(self):\n """\n Return an array with the elements of `self` converted to\n lowercase.\n\n See Also\n --------\n char.lower\n\n """\n return asarray(lower(self))\n\n def lstrip(self, chars=None):\n """\n For each element in `self`, return a copy with the leading characters\n removed.\n\n See Also\n --------\n char.lstrip\n\n """\n return lstrip(self, chars)\n\n def partition(self, sep):\n """\n Partition each element in `self` around `sep`.\n\n See Also\n --------\n partition\n """\n return asarray(partition(self, sep))\n\n def replace(self, old, new, count=None):\n """\n For each element in `self`, return a copy of the string with all\n occurrences of substring `old` replaced by `new`.\n\n See Also\n --------\n char.replace\n\n """\n return replace(self, old, new, count if count is not None else -1)\n\n def rfind(self, sub, start=0, end=None):\n """\n For each element in `self`, return the highest index in the string\n where substring `sub` is found, such that `sub` is contained\n within [`start`, `end`].\n\n See Also\n --------\n char.rfind\n\n """\n return rfind(self, sub, start, end)\n\n def rindex(self, sub, start=0, end=None):\n """\n Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is\n not found.\n\n See Also\n --------\n char.rindex\n\n """\n return rindex(self, sub, start, end)\n\n def rjust(self, width, fillchar=' '):\n """\n Return an array with the elements of `self`\n right-justified in a string of length `width`.\n\n See Also\n --------\n char.rjust\n\n """\n return asarray(rjust(self, width, fillchar))\n\n def rpartition(self, sep):\n """\n Partition each element in `self` around `sep`.\n\n See Also\n --------\n rpartition\n """\n return asarray(rpartition(self, sep))\n\n def rsplit(self, sep=None, maxsplit=None):\n """\n For each element in `self`, return a list of the words in\n the string, using `sep` as the delimiter string.\n\n See Also\n --------\n char.rsplit\n\n """\n return rsplit(self, sep, maxsplit)\n\n def rstrip(self, chars=None):\n """\n For each element in `self`, return a copy with the trailing\n characters removed.\n\n See Also\n --------\n char.rstrip\n\n """\n return rstrip(self, chars)\n\n def split(self, sep=None, maxsplit=None):\n """\n For each element in `self`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n See Also\n --------\n char.split\n\n """\n return split(self, sep, maxsplit)\n\n def splitlines(self, keepends=None):\n """\n For each element in `self`, return a list of the lines in the\n element, breaking at line boundaries.\n\n See Also\n --------\n char.splitlines\n\n """\n return splitlines(self, keepends)\n\n def startswith(self, prefix, start=0, end=None):\n """\n Returns a boolean array which is `True` where the string element\n in `self` starts with `prefix`, otherwise `False`.\n\n See Also\n --------\n char.startswith\n\n """\n return startswith(self, prefix, start, end)\n\n def strip(self, chars=None):\n """\n For each element in `self`, return a copy with the leading and\n trailing characters removed.\n\n See Also\n --------\n char.strip\n\n """\n return strip(self, chars)\n\n def swapcase(self):\n """\n For each element in `self`, return a copy of the string with\n uppercase characters converted to lowercase and vice versa.\n\n See Also\n --------\n char.swapcase\n\n """\n return asarray(swapcase(self))\n\n def title(self):\n """\n For each element in `self`, return a titlecased version of the\n string: words start with uppercase characters, all remaining cased\n characters are lowercase.\n\n See Also\n --------\n char.title\n\n """\n return asarray(title(self))\n\n def translate(self, table, deletechars=None):\n """\n For each element in `self`, return a copy of the string where\n all characters occurring in the optional argument\n `deletechars` are removed, and the remaining characters have\n been mapped through the given translation table.\n\n See Also\n --------\n char.translate\n\n """\n return asarray(translate(self, table, deletechars))\n\n def upper(self):\n """\n Return an array with the elements of `self` converted to\n uppercase.\n\n See Also\n --------\n char.upper\n\n """\n return asarray(upper(self))\n\n def zfill(self, width):\n """\n Return the numeric string left-filled with zeros in a string of\n length `width`.\n\n See Also\n --------\n char.zfill\n\n """\n return asarray(zfill(self, width))\n\n def isnumeric(self):\n """\n For each element in `self`, return True if there are only\n numeric characters in the element.\n\n See Also\n --------\n char.isnumeric\n\n """\n return isnumeric(self)\n\n def isdecimal(self):\n """\n For each element in `self`, return True if there are only\n decimal characters in the element.\n\n See Also\n --------\n char.isdecimal\n\n """\n return isdecimal(self)\n\n\n@set_module("numpy.char")\ndef array(obj, itemsize=None, copy=True, unicode=None, order=None):\n """\n Create a `~numpy.char.chararray`.\n\n .. note::\n This class is provided for numarray backward-compatibility.\n New code (not concerned with numarray compatibility) should use\n arrays of type `bytes_` or `str_` and use the free functions\n in :mod:`numpy.char` for fast vectorized string operations instead.\n\n Versus a NumPy array of dtype `bytes_` or `str_`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `chararray.endswith <numpy.char.chararray.endswith>`)\n and infix operators (e.g. ``+, *, %``)\n\n Parameters\n ----------\n obj : array of str or unicode-like\n\n itemsize : int, optional\n `itemsize` is the number of characters per scalar in the\n resulting array. If `itemsize` is None, and `obj` is an\n object array or a Python list, the `itemsize` will be\n automatically determined. If `itemsize` is provided and `obj`\n is of type str or unicode, then the `obj` string will be\n chunked into `itemsize` pieces.\n\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy\n will only be made if ``__array__`` returns a copy, if obj is a\n nested sequence, or if a copy is needed to satisfy any of the other\n requirements (`itemsize`, unicode, `order`, etc.).\n\n unicode : bool, optional\n When true, the resulting `~numpy.char.chararray` can contain Unicode\n characters, when false only 8-bit characters. If unicode is\n None and `obj` is one of the following:\n\n - a `~numpy.char.chararray`,\n - an ndarray of type :class:`str_` or :class:`bytes_`\n - a Python :class:`str` or :class:`bytes` object,\n\n then the unicode setting of the output array will be\n automatically determined.\n\n order : {'C', 'F', 'A'}, optional\n Specify the order of the array. If order is 'C' (default), then the\n array will be in C-contiguous order (last-index varies the\n fastest). If order is 'F', then the returned array\n will be in Fortran-contiguous order (first-index varies the\n fastest). If order is 'A', then the returned array may\n be in any order (either C-, Fortran-contiguous, or even\n discontiguous).\n\n Examples\n --------\n\n >>> import numpy as np\n >>> char_array = np.char.array(['hello', 'world', 'numpy','array'])\n >>> char_array\n chararray(['hello', 'world', 'numpy', 'array'], dtype='<U5')\n\n """\n if isinstance(obj, (bytes, str)):\n if unicode is None:\n if isinstance(obj, str):\n unicode = True\n else:\n unicode = False\n\n if itemsize is None:\n itemsize = len(obj)\n shape = len(obj) // itemsize\n\n return chararray(shape, itemsize=itemsize, unicode=unicode,\n buffer=obj, order=order)\n\n if isinstance(obj, (list, tuple)):\n obj = asnarray(obj)\n\n if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):\n # If we just have a vanilla chararray, create a chararray\n # view around it.\n if not isinstance(obj, chararray):\n obj = obj.view(chararray)\n\n if itemsize is None:\n itemsize = obj.itemsize\n # itemsize is in 8-bit chars, so for Unicode, we need\n # to divide by the size of a single Unicode character,\n # which for NumPy is always 4\n if issubclass(obj.dtype.type, str_):\n itemsize //= 4\n\n if unicode is None:\n if issubclass(obj.dtype.type, str_):\n unicode = True\n else:\n unicode = False\n\n if unicode:\n dtype = str_\n else:\n dtype = bytes_\n\n if order is not None:\n obj = asnarray(obj, order=order)\n if (copy or\n (itemsize != obj.itemsize) or\n (not unicode and isinstance(obj, str_)) or\n (unicode and isinstance(obj, bytes_))):\n obj = obj.astype((dtype, int(itemsize)))\n return obj\n\n if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):\n if itemsize is None:\n # Since no itemsize was specified, convert the input array to\n # a list so the ndarray constructor will automatically\n # determine the itemsize for us.\n obj = obj.tolist()\n # Fall through to the default case\n\n if unicode:\n dtype = str_\n else:\n dtype = bytes_\n\n if itemsize is None:\n val = narray(obj, dtype=dtype, order=order, subok=True)\n else:\n val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)\n return val.view(chararray)\n\n\n@set_module("numpy.char")\ndef asarray(obj, itemsize=None, unicode=None, order=None):\n """\n Convert the input to a `~numpy.char.chararray`, copying the data only if\n necessary.\n\n Versus a NumPy array of dtype `bytes_` or `str_`, this\n class adds the following functionality:\n\n 1) values automatically have whitespace removed from the end\n when indexed\n\n 2) comparison operators automatically remove whitespace from the\n end when comparing values\n\n 3) vectorized string operations are provided as methods\n (e.g. `chararray.endswith <numpy.char.chararray.endswith>`)\n and infix operators (e.g. ``+``, ``*``, ``%``)\n\n Parameters\n ----------\n obj : array of str or unicode-like\n\n itemsize : int, optional\n `itemsize` is the number of characters per scalar in the\n resulting array. If `itemsize` is None, and `obj` is an\n object array or a Python list, the `itemsize` will be\n automatically determined. If `itemsize` is provided and `obj`\n is of type str or unicode, then the `obj` string will be\n chunked into `itemsize` pieces.\n\n unicode : bool, optional\n When true, the resulting `~numpy.char.chararray` can contain Unicode\n characters, when false only 8-bit characters. If unicode is\n None and `obj` is one of the following:\n\n - a `~numpy.char.chararray`,\n - an ndarray of type `str_` or `unicode_`\n - a Python str or unicode object,\n\n then the unicode setting of the output array will be\n automatically determined.\n\n order : {'C', 'F'}, optional\n Specify the order of the array. If order is 'C' (default), then the\n array will be in C-contiguous order (last-index varies the\n fastest). If order is 'F', then the returned array\n will be in Fortran-contiguous order (first-index varies the\n fastest).\n\n Examples\n --------\n >>> import numpy as np\n >>> np.char.asarray(['hello', 'world'])\n chararray(['hello', 'world'], dtype='<U5')\n\n """\n return array(obj, itemsize, copy=False,\n unicode=unicode, order=order)\n
.venv\Lib\site-packages\numpy\_core\defchararray.py
defchararray.py
Python
39,434
0.95
0.111423
0.018486
react-lib
998
2023-12-06T01:50:13.429241
MIT
false
6f4c91d69c1a5825373c8133a5462031
from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload\nfrom typing import Literal as L\n\nfrom typing_extensions import TypeVar\n\nimport numpy as np\nfrom numpy import (\n _OrderKACF,\n _SupportsBuffer,\n bytes_,\n dtype,\n int_,\n ndarray,\n object_,\n str_,\n)\nfrom numpy._core.multiarray import compare_chararrays\nfrom numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray\nfrom numpy._typing import _ArrayLikeAnyString_co as UST_co\nfrom numpy._typing import _ArrayLikeBool_co as b_co\nfrom numpy._typing import _ArrayLikeBytes_co as S_co\nfrom numpy._typing import _ArrayLikeInt_co as i_co\nfrom numpy._typing import _ArrayLikeStr_co as U_co\nfrom numpy._typing import _ArrayLikeString_co as T_co\n\n__all__ = [\n "equal",\n "not_equal",\n "greater_equal",\n "less_equal",\n "greater",\n "less",\n "str_len",\n "add",\n "multiply",\n "mod",\n "capitalize",\n "center",\n "count",\n "decode",\n "encode",\n "endswith",\n "expandtabs",\n "find",\n "index",\n "isalnum",\n "isalpha",\n "isdigit",\n "islower",\n "isspace",\n "istitle",\n "isupper",\n "join",\n "ljust",\n "lower",\n "lstrip",\n "partition",\n "replace",\n "rfind",\n "rindex",\n "rjust",\n "rpartition",\n "rsplit",\n "rstrip",\n "split",\n "splitlines",\n "startswith",\n "strip",\n "swapcase",\n "title",\n "translate",\n "upper",\n "zfill",\n "isnumeric",\n "isdecimal",\n "array",\n "asarray",\n "compare_chararrays",\n "chararray",\n]\n\n_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)\n_CharacterT = TypeVar("_CharacterT", bound=np.character)\n_CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True)\n\n_CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]]\n\n_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType]\n_StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_]\n_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType]\n\nclass chararray(ndarray[_ShapeT_co, _CharDTypeT_co]):\n @overload\n def __new__(\n subtype,\n shape: _ShapeLike,\n itemsize: SupportsIndex | SupportsInt = ...,\n unicode: L[False] = ...,\n buffer: _SupportsBuffer = ...,\n offset: SupportsIndex = ...,\n strides: _ShapeLike = ...,\n order: _OrderKACF = ...,\n ) -> _CharArray[bytes_]: ...\n @overload\n def __new__(\n subtype,\n shape: _ShapeLike,\n itemsize: SupportsIndex | SupportsInt = ...,\n unicode: L[True] = ...,\n buffer: _SupportsBuffer = ...,\n offset: SupportsIndex = ...,\n strides: _ShapeLike = ...,\n order: _OrderKACF = ...,\n ) -> _CharArray[str_]: ...\n\n def __array_finalize__(self, obj: object) -> None: ...\n def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...\n def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...\n def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ...\n\n @overload\n def __eq__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __eq__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __ne__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __ne__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __ge__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __ge__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __le__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __le__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __gt__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __gt__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __lt__(\n self: _CharArray[str_],\n other: U_co,\n ) -> NDArray[np.bool]: ...\n @overload\n def __lt__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def __add__(\n self: _CharArray[str_],\n other: U_co,\n ) -> _CharArray[str_]: ...\n @overload\n def __add__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def __radd__(\n self: _CharArray[str_],\n other: U_co,\n ) -> _CharArray[str_]: ...\n @overload\n def __radd__(\n self: _CharArray[bytes_],\n other: S_co,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def center(\n self: _CharArray[str_],\n width: i_co,\n fillchar: U_co = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def center(\n self: _CharArray[bytes_],\n width: i_co,\n fillchar: S_co = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def count(\n self: _CharArray[str_],\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n @overload\n def count(\n self: _CharArray[bytes_],\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n\n def decode(\n self: _CharArray[bytes_],\n encoding: str | None = ...,\n errors: str | None = ...,\n ) -> _CharArray[str_]: ...\n\n def encode(\n self: _CharArray[str_],\n encoding: str | None = ...,\n errors: str | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def endswith(\n self: _CharArray[str_],\n suffix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[np.bool]: ...\n @overload\n def endswith(\n self: _CharArray[bytes_],\n suffix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[np.bool]: ...\n\n def expandtabs(\n self,\n tabsize: i_co = ...,\n ) -> Self: ...\n\n @overload\n def find(\n self: _CharArray[str_],\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n @overload\n def find(\n self: _CharArray[bytes_],\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n\n @overload\n def index(\n self: _CharArray[str_],\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n @overload\n def index(\n self: _CharArray[bytes_],\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n\n @overload\n def join(\n self: _CharArray[str_],\n seq: U_co,\n ) -> _CharArray[str_]: ...\n @overload\n def join(\n self: _CharArray[bytes_],\n seq: S_co,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def ljust(\n self: _CharArray[str_],\n width: i_co,\n fillchar: U_co = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def ljust(\n self: _CharArray[bytes_],\n width: i_co,\n fillchar: S_co = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def lstrip(\n self: _CharArray[str_],\n chars: U_co | None = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def lstrip(\n self: _CharArray[bytes_],\n chars: S_co | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def partition(\n self: _CharArray[str_],\n sep: U_co,\n ) -> _CharArray[str_]: ...\n @overload\n def partition(\n self: _CharArray[bytes_],\n sep: S_co,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def replace(\n self: _CharArray[str_],\n old: U_co,\n new: U_co,\n count: i_co | None = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def replace(\n self: _CharArray[bytes_],\n old: S_co,\n new: S_co,\n count: i_co | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def rfind(\n self: _CharArray[str_],\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n @overload\n def rfind(\n self: _CharArray[bytes_],\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n\n @overload\n def rindex(\n self: _CharArray[str_],\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n @overload\n def rindex(\n self: _CharArray[bytes_],\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[int_]: ...\n\n @overload\n def rjust(\n self: _CharArray[str_],\n width: i_co,\n fillchar: U_co = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def rjust(\n self: _CharArray[bytes_],\n width: i_co,\n fillchar: S_co = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def rpartition(\n self: _CharArray[str_],\n sep: U_co,\n ) -> _CharArray[str_]: ...\n @overload\n def rpartition(\n self: _CharArray[bytes_],\n sep: S_co,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def rsplit(\n self: _CharArray[str_],\n sep: U_co | None = ...,\n maxsplit: i_co | None = ...,\n ) -> NDArray[object_]: ...\n @overload\n def rsplit(\n self: _CharArray[bytes_],\n sep: S_co | None = ...,\n maxsplit: i_co | None = ...,\n ) -> NDArray[object_]: ...\n\n @overload\n def rstrip(\n self: _CharArray[str_],\n chars: U_co | None = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def rstrip(\n self: _CharArray[bytes_],\n chars: S_co | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def split(\n self: _CharArray[str_],\n sep: U_co | None = ...,\n maxsplit: i_co | None = ...,\n ) -> NDArray[object_]: ...\n @overload\n def split(\n self: _CharArray[bytes_],\n sep: S_co | None = ...,\n maxsplit: i_co | None = ...,\n ) -> NDArray[object_]: ...\n\n def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ...\n\n @overload\n def startswith(\n self: _CharArray[str_],\n prefix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[np.bool]: ...\n @overload\n def startswith(\n self: _CharArray[bytes_],\n prefix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n ) -> NDArray[np.bool]: ...\n\n @overload\n def strip(\n self: _CharArray[str_],\n chars: U_co | None = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def strip(\n self: _CharArray[bytes_],\n chars: S_co | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n @overload\n def translate(\n self: _CharArray[str_],\n table: U_co,\n deletechars: U_co | None = ...,\n ) -> _CharArray[str_]: ...\n @overload\n def translate(\n self: _CharArray[bytes_],\n table: S_co,\n deletechars: S_co | None = ...,\n ) -> _CharArray[bytes_]: ...\n\n def zfill(self, width: i_co) -> Self: ...\n def capitalize(self) -> Self: ...\n def title(self) -> Self: ...\n def swapcase(self) -> Self: ...\n def lower(self) -> Self: ...\n def upper(self) -> Self: ...\n def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...\n\n# Comparison\n@overload\ndef equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...\n@overload\ndef add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...\n@overload\ndef multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...\n@overload\ndef multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...\n@overload\ndef multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef mod(a: U_co, value: Any) -> NDArray[np.str_]: ...\n@overload\ndef mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ...\n@overload\ndef mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ...\n@overload\ndef mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef capitalize(a: U_co) -> NDArray[str_]: ...\n@overload\ndef capitalize(a: S_co) -> NDArray[bytes_]: ...\n@overload\ndef capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...\n@overload\ndef center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...\n@overload\ndef center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...\n@overload\ndef center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...\n\ndef decode(\n a: S_co,\n encoding: str | None = ...,\n errors: str | None = ...,\n) -> NDArray[str_]: ...\ndef encode(\n a: U_co | T_co,\n encoding: str | None = ...,\n errors: str | None = ...,\n) -> NDArray[bytes_]: ...\n\n@overload\ndef expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...\n@overload\ndef expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...\n@overload\ndef expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...\n@overload\ndef expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef join(sep: U_co, seq: U_co) -> NDArray[str_]: ...\n@overload\ndef join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...\n@overload\ndef join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...\n@overload\ndef ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...\n@overload\ndef ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...\n@overload\ndef ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef lower(a: U_co) -> NDArray[str_]: ...\n@overload\ndef lower(a: S_co) -> NDArray[bytes_]: ...\n@overload\ndef lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...\n@overload\ndef lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...\n@overload\ndef lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...\n@overload\ndef lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef partition(a: U_co, sep: U_co) -> NDArray[str_]: ...\n@overload\ndef partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...\n@overload\ndef partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef replace(\n a: U_co,\n old: U_co,\n new: U_co,\n count: i_co | None = ...,\n) -> NDArray[str_]: ...\n@overload\ndef replace(\n a: S_co,\n old: S_co,\n new: S_co,\n count: i_co | None = ...,\n) -> NDArray[bytes_]: ...\n@overload\ndef replace(\n a: _StringDTypeSupportsArray,\n old: _StringDTypeSupportsArray,\n new: _StringDTypeSupportsArray,\n count: i_co = ...,\n) -> _StringDTypeArray: ...\n@overload\ndef replace(\n a: T_co,\n old: T_co,\n new: T_co,\n count: i_co = ...,\n) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rjust(\n a: U_co,\n width: i_co,\n fillchar: U_co = ...,\n) -> NDArray[str_]: ...\n@overload\ndef rjust(\n a: S_co,\n width: i_co,\n fillchar: S_co = ...,\n) -> NDArray[bytes_]: ...\n@overload\ndef rjust(\n a: _StringDTypeSupportsArray,\n width: i_co,\n fillchar: _StringDTypeSupportsArray = ...,\n) -> _StringDTypeArray: ...\n@overload\ndef rjust(\n a: T_co,\n width: i_co,\n fillchar: T_co = ...,\n) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...\n@overload\ndef rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...\n@overload\ndef rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rsplit(\n a: U_co,\n sep: U_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef rsplit(\n a: S_co,\n sep: S_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef rsplit(\n a: _StringDTypeSupportsArray,\n sep: _StringDTypeSupportsArray | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef rsplit(\n a: T_co,\n sep: T_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...\n@overload\ndef rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...\n@overload\ndef rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...\n@overload\ndef rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef split(\n a: U_co,\n sep: U_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef split(\n a: S_co,\n sep: S_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef split(\n a: _StringDTypeSupportsArray,\n sep: _StringDTypeSupportsArray | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef split(\n a: T_co,\n sep: T_co | None = ...,\n maxsplit: i_co | None = ...,\n) -> NDArray[object_]: ...\n\ndef splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ...\n\n@overload\ndef strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...\n@overload\ndef strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...\n@overload\ndef strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...\n@overload\ndef strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef swapcase(a: U_co) -> NDArray[str_]: ...\n@overload\ndef swapcase(a: S_co) -> NDArray[bytes_]: ...\n@overload\ndef swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef title(a: U_co) -> NDArray[str_]: ...\n@overload\ndef title(a: S_co) -> NDArray[bytes_]: ...\n@overload\ndef title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef title(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef translate(\n a: U_co,\n table: str,\n deletechars: str | None = ...,\n) -> NDArray[str_]: ...\n@overload\ndef translate(\n a: S_co,\n table: str,\n deletechars: str | None = ...,\n) -> NDArray[bytes_]: ...\n@overload\ndef translate(\n a: _StringDTypeSupportsArray,\n table: str,\n deletechars: str | None = ...,\n) -> _StringDTypeArray: ...\n@overload\ndef translate(\n a: T_co,\n table: str,\n deletechars: str | None = ...,\n) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef upper(a: U_co) -> NDArray[str_]: ...\n@overload\ndef upper(a: S_co) -> NDArray[bytes_]: ...\n@overload\ndef upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef zfill(a: U_co, width: i_co) -> NDArray[str_]: ...\n@overload\ndef zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...\n@overload\ndef zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...\n@overload\ndef zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...\n\n# String information\n@overload\ndef count(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef count(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef count(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef endswith(\n a: U_co,\n suffix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef endswith(\n a: S_co,\n suffix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef endswith(\n a: T_co,\n suffix: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n\n@overload\ndef find(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef find(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef find(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef index(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef index(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef index(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\ndef isalpha(a: UST_co) -> NDArray[np.bool]: ...\ndef isalnum(a: UST_co) -> NDArray[np.bool]: ...\ndef isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ...\ndef isdigit(a: UST_co) -> NDArray[np.bool]: ...\ndef islower(a: UST_co) -> NDArray[np.bool]: ...\ndef isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ...\ndef isspace(a: UST_co) -> NDArray[np.bool]: ...\ndef istitle(a: UST_co) -> NDArray[np.bool]: ...\ndef isupper(a: UST_co) -> NDArray[np.bool]: ...\n\n@overload\ndef rfind(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef rfind(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef rfind(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef rindex(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef rindex(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef rindex(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef startswith(\n a: U_co,\n prefix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef startswith(\n a: S_co,\n prefix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef startswith(\n a: T_co,\n suffix: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n\ndef str_len(A: UST_co) -> NDArray[int_]: ...\n\n# Overload 1 and 2: str- or bytes-based array-likes\n# overload 3: arbitrary object with unicode=False (-> bytes_)\n# overload 4: arbitrary object with unicode=True (-> str_)\n@overload\ndef array(\n obj: U_co,\n itemsize: int | None = ...,\n copy: bool = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[str_]: ...\n@overload\ndef array(\n obj: S_co,\n itemsize: int | None = ...,\n copy: bool = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[bytes_]: ...\n@overload\ndef array(\n obj: object,\n itemsize: int | None = ...,\n copy: bool = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[bytes_]: ...\n@overload\ndef array(\n obj: object,\n itemsize: int | None = ...,\n copy: bool = ...,\n unicode: L[True] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[str_]: ...\n\n@overload\ndef asarray(\n obj: U_co,\n itemsize: int | None = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[str_]: ...\n@overload\ndef asarray(\n obj: S_co,\n itemsize: int | None = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[bytes_]: ...\n@overload\ndef asarray(\n obj: object,\n itemsize: int | None = ...,\n unicode: L[False] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[bytes_]: ...\n@overload\ndef asarray(\n obj: object,\n itemsize: int | None = ...,\n unicode: L[True] = ...,\n order: _OrderKACF = ...,\n) -> _CharArray[str_]: ...\n
.venv\Lib\site-packages\numpy\_core\defchararray.pyi
defchararray.pyi
Other
27,957
0.95
0.215668
0.00499
node-utils
888
2024-12-22T19:32:53.185599
GPL-3.0
false
3c39894a2696c19a1140cd197fc9dfc4
"""\nImplementation of optimized einsum.\n\n"""\nimport itertools\nimport operator\n\nfrom numpy._core.multiarray import c_einsum\nfrom numpy._core.numeric import asanyarray, tensordot\nfrom numpy._core.overrides import array_function_dispatch\n\n__all__ = ['einsum', 'einsum_path']\n\n# importing string for string.ascii_letters would be too slow\n# the first import before caching has been measured to take 800 µs (#23777)\n# imports begin with uppercase to mimic ASCII values to avoid sorting issues\neinsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\neinsum_symbols_set = set(einsum_symbols)\n\n\ndef _flop_count(idx_contraction, inner, num_terms, size_dictionary):\n """\n Computes the number of FLOPS in the contraction.\n\n Parameters\n ----------\n idx_contraction : iterable\n The indices involved in the contraction\n inner : bool\n Does this contraction require an inner product?\n num_terms : int\n The number of terms in a contraction\n size_dictionary : dict\n The size of each of the indices in idx_contraction\n\n Returns\n -------\n flop_count : int\n The total number of FLOPS required for the contraction.\n\n Examples\n --------\n\n >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})\n 30\n\n >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})\n 60\n\n """\n\n overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)\n op_factor = max(1, num_terms - 1)\n if inner:\n op_factor += 1\n\n return overall_size * op_factor\n\ndef _compute_size_by_dict(indices, idx_dict):\n """\n Computes the product of the elements in indices based on the dictionary\n idx_dict.\n\n Parameters\n ----------\n indices : iterable\n Indices to base the product on.\n idx_dict : dictionary\n Dictionary of index sizes\n\n Returns\n -------\n ret : int\n The resulting product.\n\n Examples\n --------\n >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})\n 90\n\n """\n ret = 1\n for i in indices:\n ret *= idx_dict[i]\n return ret\n\n\ndef _find_contraction(positions, input_sets, output_set):\n """\n Finds the contraction for a given set of input and output sets.\n\n Parameters\n ----------\n positions : iterable\n Integer positions of terms used in the contraction.\n input_sets : list\n List of sets that represent the lhs side of the einsum subscript\n output_set : set\n Set that represents the rhs side of the overall einsum subscript\n\n Returns\n -------\n new_result : set\n The indices of the resulting contraction\n remaining : list\n List of sets that have not been contracted, the new set is appended to\n the end of this list\n idx_removed : set\n Indices removed from the entire contraction\n idx_contraction : set\n The indices used in the current contraction\n\n Examples\n --------\n\n # A simple dot product test case\n >>> pos = (0, 1)\n >>> isets = [set('ab'), set('bc')]\n >>> oset = set('ac')\n >>> _find_contraction(pos, isets, oset)\n ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})\n\n # A more complex case with additional terms in the contraction\n >>> pos = (0, 2)\n >>> isets = [set('abd'), set('ac'), set('bdc')]\n >>> oset = set('ac')\n >>> _find_contraction(pos, isets, oset)\n ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})\n """\n\n idx_contract = set()\n idx_remain = output_set.copy()\n remaining = []\n for ind, value in enumerate(input_sets):\n if ind in positions:\n idx_contract |= value\n else:\n remaining.append(value)\n idx_remain |= value\n\n new_result = idx_remain & idx_contract\n idx_removed = (idx_contract - new_result)\n remaining.append(new_result)\n\n return (new_result, remaining, idx_removed, idx_contract)\n\n\ndef _optimal_path(input_sets, output_set, idx_dict, memory_limit):\n """\n Computes all possible pair contractions, sieves the results based\n on ``memory_limit`` and returns the lowest cost path. This algorithm\n scales factorial with respect to the elements in the list ``input_sets``.\n\n Parameters\n ----------\n input_sets : list\n List of sets that represent the lhs side of the einsum subscript\n output_set : set\n Set that represents the rhs side of the overall einsum subscript\n idx_dict : dictionary\n Dictionary of index sizes\n memory_limit : int\n The maximum number of elements in a temporary array\n\n Returns\n -------\n path : list\n The optimal contraction order within the memory limit constraint.\n\n Examples\n --------\n >>> isets = [set('abd'), set('ac'), set('bdc')]\n >>> oset = set()\n >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}\n >>> _optimal_path(isets, oset, idx_sizes, 5000)\n [(0, 2), (0, 1)]\n """\n\n full_results = [(0, [], input_sets)]\n for iteration in range(len(input_sets) - 1):\n iter_results = []\n\n # Compute all unique pairs\n for curr in full_results:\n cost, positions, remaining = curr\n for con in itertools.combinations(\n range(len(input_sets) - iteration), 2\n ):\n\n # Find the contraction\n cont = _find_contraction(con, remaining, output_set)\n new_result, new_input_sets, idx_removed, idx_contract = cont\n\n # Sieve the results based on memory_limit\n new_size = _compute_size_by_dict(new_result, idx_dict)\n if new_size > memory_limit:\n continue\n\n # Build (total_cost, positions, indices_remaining)\n total_cost = cost + _flop_count(\n idx_contract, idx_removed, len(con), idx_dict\n )\n new_pos = positions + [con]\n iter_results.append((total_cost, new_pos, new_input_sets))\n\n # Update combinatorial list, if we did not find anything return best\n # path + remaining contractions\n if iter_results:\n full_results = iter_results\n else:\n path = min(full_results, key=lambda x: x[0])[1]\n path += [tuple(range(len(input_sets) - iteration))]\n return path\n\n # If we have not found anything return single einsum contraction\n if len(full_results) == 0:\n return [tuple(range(len(input_sets)))]\n\n path = min(full_results, key=lambda x: x[0])[1]\n return path\n\ndef _parse_possible_contraction(\n positions, input_sets, output_set, idx_dict,\n memory_limit, path_cost, naive_cost\n ):\n """Compute the cost (removed size + flops) and resultant indices for\n performing the contraction specified by ``positions``.\n\n Parameters\n ----------\n positions : tuple of int\n The locations of the proposed tensors to contract.\n input_sets : list of sets\n The indices found on each tensors.\n output_set : set\n The output indices of the expression.\n idx_dict : dict\n Mapping of each index to its size.\n memory_limit : int\n The total allowed size for an intermediary tensor.\n path_cost : int\n The contraction cost so far.\n naive_cost : int\n The cost of the unoptimized expression.\n\n Returns\n -------\n cost : (int, int)\n A tuple containing the size of any indices removed, and the flop cost.\n positions : tuple of int\n The locations of the proposed tensors to contract.\n new_input_sets : list of sets\n The resulting new list of indices if this proposed contraction\n is performed.\n\n """\n\n # Find the contraction\n contract = _find_contraction(positions, input_sets, output_set)\n idx_result, new_input_sets, idx_removed, idx_contract = contract\n\n # Sieve the results based on memory_limit\n new_size = _compute_size_by_dict(idx_result, idx_dict)\n if new_size > memory_limit:\n return None\n\n # Build sort tuple\n old_sizes = (\n _compute_size_by_dict(input_sets[p], idx_dict) for p in positions\n )\n removed_size = sum(old_sizes) - new_size\n\n # NB: removed_size used to be just the size of any removed indices i.e.:\n # helpers.compute_size_by_dict(idx_removed, idx_dict)\n cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)\n sort = (-removed_size, cost)\n\n # Sieve based on total cost as well\n if (path_cost + cost) > naive_cost:\n return None\n\n # Add contraction to possible choices\n return [sort, positions, new_input_sets]\n\n\ndef _update_other_results(results, best):\n """Update the positions and provisional input_sets of ``results``\n based on performing the contraction result ``best``. Remove any\n involving the tensors contracted.\n\n Parameters\n ----------\n results : list\n List of contraction results produced by\n ``_parse_possible_contraction``.\n best : list\n The best contraction of ``results`` i.e. the one that\n will be performed.\n\n Returns\n -------\n mod_results : list\n The list of modified results, updated with outcome of\n ``best`` contraction.\n """\n\n best_con = best[1]\n bx, by = best_con\n mod_results = []\n\n for cost, (x, y), con_sets in results:\n\n # Ignore results involving tensors just contracted\n if x in best_con or y in best_con:\n continue\n\n # Update the input_sets\n del con_sets[by - int(by > x) - int(by > y)]\n del con_sets[bx - int(bx > x) - int(bx > y)]\n con_sets.insert(-1, best[2][-1])\n\n # Update the position indices\n mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)\n mod_results.append((cost, mod_con, con_sets))\n\n return mod_results\n\ndef _greedy_path(input_sets, output_set, idx_dict, memory_limit):\n """\n Finds the path by contracting the best pair until the input list is\n exhausted. The best pair is found by minimizing the tuple\n ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing\n matrix multiplication or inner product operations, then Hadamard like\n operations, and finally outer operations. Outer products are limited by\n ``memory_limit``. This algorithm scales cubically with respect to the\n number of elements in the list ``input_sets``.\n\n Parameters\n ----------\n input_sets : list\n List of sets that represent the lhs side of the einsum subscript\n output_set : set\n Set that represents the rhs side of the overall einsum subscript\n idx_dict : dictionary\n Dictionary of index sizes\n memory_limit : int\n The maximum number of elements in a temporary array\n\n Returns\n -------\n path : list\n The greedy contraction order within the memory limit constraint.\n\n Examples\n --------\n >>> isets = [set('abd'), set('ac'), set('bdc')]\n >>> oset = set()\n >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}\n >>> _greedy_path(isets, oset, idx_sizes, 5000)\n [(0, 2), (0, 1)]\n """\n\n # Handle trivial cases that leaked through\n if len(input_sets) == 1:\n return [(0,)]\n elif len(input_sets) == 2:\n return [(0, 1)]\n\n # Build up a naive cost\n contract = _find_contraction(\n range(len(input_sets)), input_sets, output_set\n )\n idx_result, new_input_sets, idx_removed, idx_contract = contract\n naive_cost = _flop_count(\n idx_contract, idx_removed, len(input_sets), idx_dict\n )\n\n # Initially iterate over all pairs\n comb_iter = itertools.combinations(range(len(input_sets)), 2)\n known_contractions = []\n\n path_cost = 0\n path = []\n\n for iteration in range(len(input_sets) - 1):\n\n # Iterate over all pairs on the first step, only previously\n # found pairs on subsequent steps\n for positions in comb_iter:\n\n # Always initially ignore outer products\n if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):\n continue\n\n result = _parse_possible_contraction(\n positions, input_sets, output_set, idx_dict,\n memory_limit, path_cost, naive_cost\n )\n if result is not None:\n known_contractions.append(result)\n\n # If we do not have a inner contraction, rescan pairs\n # including outer products\n if len(known_contractions) == 0:\n\n # Then check the outer products\n for positions in itertools.combinations(\n range(len(input_sets)), 2\n ):\n result = _parse_possible_contraction(\n positions, input_sets, output_set, idx_dict,\n memory_limit, path_cost, naive_cost\n )\n if result is not None:\n known_contractions.append(result)\n\n # If we still did not find any remaining contractions,\n # default back to einsum like behavior\n if len(known_contractions) == 0:\n path.append(tuple(range(len(input_sets))))\n break\n\n # Sort based on first index\n best = min(known_contractions, key=lambda x: x[0])\n\n # Now propagate as many unused contractions as possible\n # to the next iteration\n known_contractions = _update_other_results(known_contractions, best)\n\n # Next iteration only compute contractions with the new tensor\n # All other contractions have been accounted for\n input_sets = best[2]\n new_tensor_pos = len(input_sets) - 1\n comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))\n\n # Update path and total cost\n path.append(best[1])\n path_cost += best[0][1]\n\n return path\n\n\ndef _can_dot(inputs, result, idx_removed):\n """\n Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.\n\n Parameters\n ----------\n inputs : list of str\n Specifies the subscripts for summation.\n result : str\n Resulting summation.\n idx_removed : set\n Indices that are removed in the summation\n\n\n Returns\n -------\n type : bool\n Returns true if BLAS should and can be used, else False\n\n Notes\n -----\n If the operations is BLAS level 1 or 2 and is not already aligned\n we default back to einsum as the memory movement to copy is more\n costly than the operation itself.\n\n\n Examples\n --------\n\n # Standard GEMM operation\n >>> _can_dot(['ij', 'jk'], 'ik', set('j'))\n True\n\n # Can use the standard BLAS, but requires odd data movement\n >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))\n False\n\n # DDOT where the memory is not aligned\n >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))\n False\n\n """\n\n # All `dot` calls remove indices\n if len(idx_removed) == 0:\n return False\n\n # BLAS can only handle two operands\n if len(inputs) != 2:\n return False\n\n input_left, input_right = inputs\n\n for c in set(input_left + input_right):\n # can't deal with repeated indices on same input or more than 2 total\n nl, nr = input_left.count(c), input_right.count(c)\n if (nl > 1) or (nr > 1) or (nl + nr > 2):\n return False\n\n # can't do implicit summation or dimension collapse e.g.\n # "ab,bc->c" (implicitly sum over 'a')\n # "ab,ca->ca" (take diagonal of 'a')\n if nl + nr - 1 == int(c in result):\n return False\n\n # Build a few temporaries\n set_left = set(input_left)\n set_right = set(input_right)\n keep_left = set_left - idx_removed\n keep_right = set_right - idx_removed\n rs = len(idx_removed)\n\n # At this point we are a DOT, GEMV, or GEMM operation\n\n # Handle inner products\n\n # DDOT with aligned data\n if input_left == input_right:\n return True\n\n # DDOT without aligned data (better to use einsum)\n if set_left == set_right:\n return False\n\n # Handle the 4 possible (aligned) GEMV or GEMM cases\n\n # GEMM or GEMV no transpose\n if input_left[-rs:] == input_right[:rs]:\n return True\n\n # GEMM or GEMV transpose both\n if input_left[:rs] == input_right[-rs:]:\n return True\n\n # GEMM or GEMV transpose right\n if input_left[-rs:] == input_right[-rs:]:\n return True\n\n # GEMM or GEMV transpose left\n if input_left[:rs] == input_right[:rs]:\n return True\n\n # Einsum is faster than GEMV if we have to copy data\n if not keep_left or not keep_right:\n return False\n\n # We are a matrix-matrix product, but we need to copy data\n return True\n\n\ndef _parse_einsum_input(operands):\n """\n A reproduction of einsum c side einsum parsing in python.\n\n Returns\n -------\n input_strings : str\n Parsed input strings\n output_string : str\n Parsed output string\n operands : list of array_like\n The operands to use in the numpy contraction\n\n Examples\n --------\n The operand list is simplified to reduce printing:\n\n >>> np.random.seed(123)\n >>> a = np.random.rand(4, 4)\n >>> b = np.random.rand(4, 4, 4)\n >>> _parse_einsum_input(('...a,...a->...', a, b))\n ('za,xza', 'xz', [a, b]) # may vary\n\n >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))\n ('za,xza', 'xz', [a, b]) # may vary\n """\n\n if len(operands) == 0:\n raise ValueError("No input operands")\n\n if isinstance(operands[0], str):\n subscripts = operands[0].replace(" ", "")\n operands = [asanyarray(v) for v in operands[1:]]\n\n # Ensure all characters are valid\n for s in subscripts:\n if s in '.,->':\n continue\n if s not in einsum_symbols:\n raise ValueError(f"Character {s} is not a valid symbol.")\n\n else:\n tmp_operands = list(operands)\n operand_list = []\n subscript_list = []\n for p in range(len(operands) // 2):\n operand_list.append(tmp_operands.pop(0))\n subscript_list.append(tmp_operands.pop(0))\n\n output_list = tmp_operands[-1] if len(tmp_operands) else None\n operands = [asanyarray(v) for v in operand_list]\n subscripts = ""\n last = len(subscript_list) - 1\n for num, sub in enumerate(subscript_list):\n for s in sub:\n if s is Ellipsis:\n subscripts += "..."\n else:\n try:\n s = operator.index(s)\n except TypeError as e:\n raise TypeError(\n "For this input type lists must contain "\n "either int or Ellipsis"\n ) from e\n subscripts += einsum_symbols[s]\n if num != last:\n subscripts += ","\n\n if output_list is not None:\n subscripts += "->"\n for s in output_list:\n if s is Ellipsis:\n subscripts += "..."\n else:\n try:\n s = operator.index(s)\n except TypeError as e:\n raise TypeError(\n "For this input type lists must contain "\n "either int or Ellipsis"\n ) from e\n subscripts += einsum_symbols[s]\n # Check for proper "->"\n if ("-" in subscripts) or (">" in subscripts):\n invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)\n if invalid or (subscripts.count("->") != 1):\n raise ValueError("Subscripts can only contain one '->'.")\n\n # Parse ellipses\n if "." in subscripts:\n used = subscripts.replace(".", "").replace(",", "").replace("->", "")\n unused = list(einsum_symbols_set - set(used))\n ellipse_inds = "".join(unused)\n longest = 0\n\n if "->" in subscripts:\n input_tmp, output_sub = subscripts.split("->")\n split_subscripts = input_tmp.split(",")\n out_sub = True\n else:\n split_subscripts = subscripts.split(',')\n out_sub = False\n\n for num, sub in enumerate(split_subscripts):\n if "." in sub:\n if (sub.count(".") != 3) or (sub.count("...") != 1):\n raise ValueError("Invalid Ellipses.")\n\n # Take into account numerical values\n if operands[num].shape == ():\n ellipse_count = 0\n else:\n ellipse_count = max(operands[num].ndim, 1)\n ellipse_count -= (len(sub) - 3)\n\n if ellipse_count > longest:\n longest = ellipse_count\n\n if ellipse_count < 0:\n raise ValueError("Ellipses lengths do not match.")\n elif ellipse_count == 0:\n split_subscripts[num] = sub.replace('...', '')\n else:\n rep_inds = ellipse_inds[-ellipse_count:]\n split_subscripts[num] = sub.replace('...', rep_inds)\n\n subscripts = ",".join(split_subscripts)\n if longest == 0:\n out_ellipse = ""\n else:\n out_ellipse = ellipse_inds[-longest:]\n\n if out_sub:\n subscripts += "->" + output_sub.replace("...", out_ellipse)\n else:\n # Special care for outputless ellipses\n output_subscript = ""\n tmp_subscripts = subscripts.replace(",", "")\n for s in sorted(set(tmp_subscripts)):\n if s not in (einsum_symbols):\n raise ValueError(f"Character {s} is not a valid symbol.")\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n normal_inds = ''.join(sorted(set(output_subscript) -\n set(out_ellipse)))\n\n subscripts += "->" + out_ellipse + normal_inds\n\n # Build output string if does not exist\n if "->" in subscripts:\n input_subscripts, output_subscript = subscripts.split("->")\n else:\n input_subscripts = subscripts\n # Build output subscripts\n tmp_subscripts = subscripts.replace(",", "")\n output_subscript = ""\n for s in sorted(set(tmp_subscripts)):\n if s not in einsum_symbols:\n raise ValueError(f"Character {s} is not a valid symbol.")\n if tmp_subscripts.count(s) == 1:\n output_subscript += s\n\n # Make sure output subscripts are in the input\n for char in output_subscript:\n if output_subscript.count(char) != 1:\n raise ValueError("Output character %s appeared more than once in "\n "the output." % char)\n if char not in input_subscripts:\n raise ValueError(f"Output character {char} did not appear in the input")\n\n # Make sure number operands is equivalent to the number of terms\n if len(input_subscripts.split(',')) != len(operands):\n raise ValueError("Number of einsum subscripts must be equal to the "\n "number of operands.")\n\n return (input_subscripts, output_subscript, operands)\n\n\ndef _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):\n # NOTE: technically, we should only dispatch on array-like arguments, not\n # subscripts (given as strings). But separating operands into\n # arrays/subscripts is a little tricky/slow (given einsum's two supported\n # signatures), so as a practical shortcut we dispatch on everything.\n # Strings will be ignored for dispatching since they don't define\n # __array_function__.\n return operands\n\n\n@array_function_dispatch(_einsum_path_dispatcher, module='numpy')\ndef einsum_path(*operands, optimize='greedy', einsum_call=False):\n """\n einsum_path(subscripts, *operands, optimize='greedy')\n\n Evaluates the lowest cost contraction order for an einsum expression by\n considering the creation of intermediate arrays.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation.\n *operands : list of array_like\n These are the arrays for the operation.\n optimize : {bool, list, tuple, 'greedy', 'optimal'}\n Choose the type of path. If a tuple is provided, the second argument is\n assumed to be the maximum intermediate size created. If only a single\n argument is provided the largest input or output array size is used\n as a maximum intermediate size.\n\n * if a list is given that starts with ``einsum_path``, uses this as the\n contraction path\n * if False no optimization is taken\n * if True defaults to the 'greedy' algorithm\n * 'optimal' An algorithm that combinatorially explores all possible\n ways of contracting the listed tensors and chooses the least costly\n path. Scales exponentially with the number of terms in the\n contraction.\n * 'greedy' An algorithm that chooses the best pair contraction\n at each step. Effectively, this algorithm searches the largest inner,\n Hadamard, and then outer products at each step. Scales cubically with\n the number of terms in the contraction. Equivalent to the 'optimal'\n path for most contractions.\n\n Default is 'greedy'.\n\n Returns\n -------\n path : list of tuples\n A list representation of the einsum path.\n string_repr : str\n A printable representation of the einsum path.\n\n Notes\n -----\n The resulting path indicates which terms of the input contraction should be\n contracted first, the result of this contraction is then appended to the\n end of the contraction list. This list can then be iterated over until all\n intermediate contractions are complete.\n\n See Also\n --------\n einsum, linalg.multi_dot\n\n Examples\n --------\n\n We can begin with a chain dot example. In this case, it is optimal to\n contract the ``b`` and ``c`` tensors first as represented by the first\n element of the path ``(1, 2)``. The resulting tensor is added to the end\n of the contraction and the remaining contraction ``(0, 1)`` is then\n completed.\n\n >>> np.random.seed(123)\n >>> a = np.random.rand(2, 2)\n >>> b = np.random.rand(2, 5)\n >>> c = np.random.rand(5, 2)\n >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')\n >>> print(path_info[0])\n ['einsum_path', (1, 2), (0, 1)]\n >>> print(path_info[1])\n Complete contraction: ij,jk,kl->il # may vary\n Naive scaling: 4\n Optimized scaling: 3\n Naive FLOP count: 1.600e+02\n Optimized FLOP count: 5.600e+01\n Theoretical speedup: 2.857\n Largest intermediate: 4.000e+00 elements\n -------------------------------------------------------------------------\n scaling current remaining\n -------------------------------------------------------------------------\n 3 kl,jk->jl ij,jl->il\n 3 jl,ij->il il->il\n\n\n A more complex index transformation example.\n\n >>> I = np.random.rand(10, 10, 10, 10)\n >>> C = np.random.rand(10, 10)\n >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,\n ... optimize='greedy')\n\n >>> print(path_info[0])\n ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]\n >>> print(path_info[1])\n Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary\n Naive scaling: 8\n Optimized scaling: 5\n Naive FLOP count: 8.000e+08\n Optimized FLOP count: 8.000e+05\n Theoretical speedup: 1000.000\n Largest intermediate: 1.000e+04 elements\n --------------------------------------------------------------------------\n scaling current remaining\n --------------------------------------------------------------------------\n 5 abcd,ea->bcde fb,gc,hd,bcde->efgh\n 5 bcde,fb->cdef gc,hd,cdef->efgh\n 5 cdef,gc->defg hd,defg->efgh\n 5 defg,hd->efgh efgh->efgh\n """\n\n # Figure out what the path really is\n path_type = optimize\n if path_type is True:\n path_type = 'greedy'\n if path_type is None:\n path_type = False\n\n explicit_einsum_path = False\n memory_limit = None\n\n # No optimization or a named path algorithm\n if (path_type is False) or isinstance(path_type, str):\n pass\n\n # Given an explicit path\n elif len(path_type) and (path_type[0] == 'einsum_path'):\n explicit_einsum_path = True\n\n # Path tuple with memory limit\n elif ((len(path_type) == 2) and isinstance(path_type[0], str) and\n isinstance(path_type[1], (int, float))):\n memory_limit = int(path_type[1])\n path_type = path_type[0]\n\n else:\n raise TypeError(f"Did not understand the path: {str(path_type)}")\n\n # Hidden option, only einsum should call this\n einsum_call_arg = einsum_call\n\n # Python side parsing\n input_subscripts, output_subscript, operands = (\n _parse_einsum_input(operands)\n )\n\n # Build a few useful list and sets\n input_list = input_subscripts.split(',')\n input_sets = [set(x) for x in input_list]\n output_set = set(output_subscript)\n indices = set(input_subscripts.replace(',', ''))\n\n # Get length of each unique dimension and ensure all dimensions are correct\n dimension_dict = {}\n broadcast_indices = [[] for x in range(len(input_list))]\n for tnum, term in enumerate(input_list):\n sh = operands[tnum].shape\n if len(sh) != len(term):\n raise ValueError("Einstein sum subscript %s does not contain the "\n "correct number of indices for operand %d."\n % (input_subscripts[tnum], tnum))\n for cnum, char in enumerate(term):\n dim = sh[cnum]\n\n # Build out broadcast indices\n if dim == 1:\n broadcast_indices[tnum].append(char)\n\n if char in dimension_dict.keys():\n # For broadcasting cases we always want the largest dim size\n if dimension_dict[char] == 1:\n dimension_dict[char] = dim\n elif dim not in (1, dimension_dict[char]):\n raise ValueError("Size of label '%s' for operand %d (%d) "\n "does not match previous terms (%d)."\n % (char, tnum, dimension_dict[char], dim))\n else:\n dimension_dict[char] = dim\n\n # Convert broadcast inds to sets\n broadcast_indices = [set(x) for x in broadcast_indices]\n\n # Compute size of each input array plus the output array\n size_list = [_compute_size_by_dict(term, dimension_dict)\n for term in input_list + [output_subscript]]\n max_size = max(size_list)\n\n if memory_limit is None:\n memory_arg = max_size\n else:\n memory_arg = memory_limit\n\n # Compute naive cost\n # This isn't quite right, need to look into exactly how einsum does this\n inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0\n naive_cost = _flop_count(\n indices, inner_product, len(input_list), dimension_dict\n )\n\n # Compute the path\n if explicit_einsum_path:\n path = path_type[1:]\n elif (\n (path_type is False)\n or (len(input_list) in [1, 2])\n or (indices == output_set)\n ):\n # Nothing to be optimized, leave it to einsum\n path = [tuple(range(len(input_list)))]\n elif path_type == "greedy":\n path = _greedy_path(\n input_sets, output_set, dimension_dict, memory_arg\n )\n elif path_type == "optimal":\n path = _optimal_path(\n input_sets, output_set, dimension_dict, memory_arg\n )\n else:\n raise KeyError("Path name %s not found", path_type)\n\n cost_list, scale_list, size_list, contraction_list = [], [], [], []\n\n # Build contraction tuple (positions, gemm, einsum_str, remaining)\n for cnum, contract_inds in enumerate(path):\n # Make sure we remove inds from right to left\n contract_inds = tuple(sorted(contract_inds, reverse=True))\n\n contract = _find_contraction(contract_inds, input_sets, output_set)\n out_inds, input_sets, idx_removed, idx_contract = contract\n\n cost = _flop_count(\n idx_contract, idx_removed, len(contract_inds), dimension_dict\n )\n cost_list.append(cost)\n scale_list.append(len(idx_contract))\n size_list.append(_compute_size_by_dict(out_inds, dimension_dict))\n\n bcast = set()\n tmp_inputs = []\n for x in contract_inds:\n tmp_inputs.append(input_list.pop(x))\n bcast |= broadcast_indices.pop(x)\n\n new_bcast_inds = bcast - idx_removed\n\n # If we're broadcasting, nix blas\n if not len(idx_removed & bcast):\n do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)\n else:\n do_blas = False\n\n # Last contraction\n if (cnum - len(path)) == -1:\n idx_result = output_subscript\n else:\n sort_result = [(dimension_dict[ind], ind) for ind in out_inds]\n idx_result = "".join([x[1] for x in sorted(sort_result)])\n\n input_list.append(idx_result)\n broadcast_indices.append(new_bcast_inds)\n einsum_str = ",".join(tmp_inputs) + "->" + idx_result\n\n contraction = (\n contract_inds, idx_removed, einsum_str, input_list[:], do_blas\n )\n contraction_list.append(contraction)\n\n opt_cost = sum(cost_list) + 1\n\n if len(input_list) != 1:\n # Explicit "einsum_path" is usually trusted, but we detect this kind of\n # mistake in order to prevent from returning an intermediate value.\n raise RuntimeError(\n f"Invalid einsum_path is specified: {len(input_list) - 1} more "\n "operands has to be contracted.")\n\n if einsum_call_arg:\n return (operands, contraction_list)\n\n # Return the path along with a nice string representation\n overall_contraction = input_subscripts + "->" + output_subscript\n header = ("scaling", "current", "remaining")\n\n speedup = naive_cost / opt_cost\n max_i = max(size_list)\n\n path_print = f" Complete contraction: {overall_contraction}\n"\n path_print += f" Naive scaling: {len(indices)}\n"\n path_print += " Optimized scaling: %d\n" % max(scale_list)\n path_print += f" Naive FLOP count: {naive_cost:.3e}\n"\n path_print += f" Optimized FLOP count: {opt_cost:.3e}\n"\n path_print += f" Theoretical speedup: {speedup:3.3f}\n"\n path_print += f" Largest intermediate: {max_i:.3e} elements\n"\n path_print += "-" * 74 + "\n"\n path_print += "%6s %24s %40s\n" % header\n path_print += "-" * 74\n\n for n, contraction in enumerate(contraction_list):\n inds, idx_rm, einsum_str, remaining, blas = contraction\n remaining_str = ",".join(remaining) + "->" + output_subscript\n path_run = (scale_list[n], einsum_str, remaining_str)\n path_print += "\n%4d %24s %40s" % path_run\n\n path = ['einsum_path'] + path\n return (path, path_print)\n\n\ndef _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):\n # Arguably we dispatch on more arguments than we really should; see note in\n # _einsum_path_dispatcher for why.\n yield from operands\n yield out\n\n\n# Rewrite einsum to handle different cases\n@array_function_dispatch(_einsum_dispatcher, module='numpy')\ndef einsum(*operands, out=None, optimize=False, **kwargs):\n """\n einsum(subscripts, *operands, out=None, dtype=None, order='K',\n casting='safe', optimize=False)\n\n Evaluates the Einstein summation convention on the operands.\n\n Using the Einstein summation convention, many common multi-dimensional,\n linear algebraic array operations can be represented in a simple fashion.\n In *implicit* mode `einsum` computes these values.\n\n In *explicit* mode, `einsum` provides further flexibility to compute\n other array operations that might not be considered classical Einstein\n summation operations, by disabling, or forcing summation over specified\n subscript labels.\n\n See the notes and examples for clarification.\n\n Parameters\n ----------\n subscripts : str\n Specifies the subscripts for summation as comma separated list of\n subscript labels. An implicit (classical Einstein summation)\n calculation is performed unless the explicit indicator '->' is\n included as well as subscript labels of the precise output form.\n operands : list of array_like\n These are the arrays for the operation.\n out : ndarray, optional\n If provided, the calculation is done into this array.\n dtype : {data-type, None}, optional\n If provided, forces the calculation to use the data type specified.\n Note that you may have to also give a more liberal `casting`\n parameter to allow the conversions. Default is None.\n order : {'C', 'F', 'A', 'K'}, optional\n Controls the memory layout of the output. 'C' means it should\n be C contiguous. 'F' means it should be Fortran contiguous,\n 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.\n 'K' means it should be as close to the layout as the inputs as\n is possible, including arbitrarily permuted axes.\n Default is 'K'.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Setting this to\n 'unsafe' is not recommended, as it can adversely affect accumulations.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n\n Default is 'safe'.\n optimize : {False, True, 'greedy', 'optimal'}, optional\n Controls if intermediate optimization should occur. No optimization\n will occur if False and True will default to the 'greedy' algorithm.\n Also accepts an explicit contraction list from the ``np.einsum_path``\n function. See ``np.einsum_path`` for more details. Defaults to False.\n\n Returns\n -------\n output : ndarray\n The calculation based on the Einstein summation convention.\n\n See Also\n --------\n einsum_path, dot, inner, outer, tensordot, linalg.multi_dot\n einsum:\n Similar verbose interface is provided by the\n `einops <https://github.com/arogozhnikov/einops>`_ package to cover\n additional operations: transpose, reshape/flatten, repeat/tile,\n squeeze/unsqueeze and reductions.\n The `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_\n optimizes contraction order for einsum-like expressions\n in backend-agnostic manner.\n\n Notes\n -----\n The Einstein summation convention can be used to compute\n many multi-dimensional, linear algebraic array operations. `einsum`\n provides a succinct way of representing these.\n\n A non-exhaustive list of these operations,\n which can be computed by `einsum`, is shown below along with examples:\n\n * Trace of an array, :py:func:`numpy.trace`.\n * Return a diagonal, :py:func:`numpy.diag`.\n * Array axis summations, :py:func:`numpy.sum`.\n * Transpositions and permutations, :py:func:`numpy.transpose`.\n * Matrix multiplication and dot product, :py:func:`numpy.matmul`\n :py:func:`numpy.dot`.\n * Vector inner and outer products, :py:func:`numpy.inner`\n :py:func:`numpy.outer`.\n * Broadcasting, element-wise and scalar multiplication,\n :py:func:`numpy.multiply`.\n * Tensor contractions, :py:func:`numpy.tensordot`.\n * Chained array operations, in efficient calculation order,\n :py:func:`numpy.einsum_path`.\n\n The subscripts string is a comma-separated list of subscript labels,\n where each label refers to a dimension of the corresponding operand.\n Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``\n is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label\n appears only once, it is not summed, so ``np.einsum('i', a)``\n produces a view of ``a`` with no changes. A further example\n ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication\n and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`.\n Repeated subscript labels in one operand take the diagonal.\n For example, ``np.einsum('ii', a)`` is equivalent to\n :py:func:`np.trace(a) <numpy.trace>`.\n\n In *implicit mode*, the chosen subscripts are important\n since the axes of the output are reordered alphabetically. This\n means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while\n ``np.einsum('ji', a)`` takes its transpose. Additionally,\n ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,\n ``np.einsum('ij,jh', a, b)`` returns the transpose of the\n multiplication since subscript 'h' precedes subscript 'i'.\n\n In *explicit mode* the output can be directly controlled by\n specifying output subscript labels. This requires the\n identifier '->' as well as the list of output subscript labels.\n This feature increases the flexibility of the function since\n summing can be disabled or forced when required. The call\n ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) <numpy.sum>`\n if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)``\n is like :py:func:`np.diag(a) <numpy.diag>` if ``a`` is a square 2-D array.\n The difference is that `einsum` does not allow broadcasting by default.\n Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the\n order of the output subscript labels and therefore returns matrix\n multiplication, unlike the example above in implicit mode.\n\n To enable and control broadcasting, use an ellipsis. Default\n NumPy-style broadcasting is done by adding an ellipsis\n to the left of each term, like ``np.einsum('...ii->...i', a)``.\n ``np.einsum('...i->...', a)`` is like\n :py:func:`np.sum(a, axis=-1) <numpy.sum>` for array ``a`` of any shape.\n To take the trace along the first and last axes,\n you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix\n product with the left-most indices instead of rightmost, one can do\n ``np.einsum('ij...,jk...->ik...', a, b)``.\n\n When there is only one operand, no axes are summed, and no output\n parameter is provided, a view into the operand is returned instead\n of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``\n produces a view (changed in version 1.10.0).\n\n `einsum` also provides an alternative way to provide the subscripts and\n operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.\n If the output shape is not provided in this format `einsum` will be\n calculated in implicit mode, otherwise it will be performed explicitly.\n The examples below have corresponding `einsum` calls with the two\n parameter methods.\n\n Views returned from einsum are now writeable whenever the input array\n is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now\n have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`\n and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal\n of a 2D array.\n\n Added the ``optimize`` argument which will optimize the contraction order\n of an einsum expression. For a contraction with three or more operands\n this can greatly increase the computational efficiency at the cost of\n a larger memory footprint during computation.\n\n Typically a 'greedy' algorithm is applied which empirical tests have shown\n returns the optimal path in the majority of cases. In some cases 'optimal'\n will return the superlative path through a more expensive, exhaustive\n search. For iterative calculations it may be advisable to calculate\n the optimal path once and reuse that path by supplying it as an argument.\n An example is given below.\n\n See :py:func:`numpy.einsum_path` for more details.\n\n Examples\n --------\n >>> a = np.arange(25).reshape(5,5)\n >>> b = np.arange(5)\n >>> c = np.arange(6).reshape(2,3)\n\n Trace of a matrix:\n\n >>> np.einsum('ii', a)\n 60\n >>> np.einsum(a, [0,0])\n 60\n >>> np.trace(a)\n 60\n\n Extract the diagonal (requires explicit form):\n\n >>> np.einsum('ii->i', a)\n array([ 0, 6, 12, 18, 24])\n >>> np.einsum(a, [0,0], [0])\n array([ 0, 6, 12, 18, 24])\n >>> np.diag(a)\n array([ 0, 6, 12, 18, 24])\n\n Sum over an axis (requires explicit form):\n\n >>> np.einsum('ij->i', a)\n array([ 10, 35, 60, 85, 110])\n >>> np.einsum(a, [0,1], [0])\n array([ 10, 35, 60, 85, 110])\n >>> np.sum(a, axis=1)\n array([ 10, 35, 60, 85, 110])\n\n For higher dimensional arrays summing a single axis can be done\n with ellipsis:\n\n >>> np.einsum('...j->...', a)\n array([ 10, 35, 60, 85, 110])\n >>> np.einsum(a, [Ellipsis,1], [Ellipsis])\n array([ 10, 35, 60, 85, 110])\n\n Compute a matrix transpose, or reorder any number of axes:\n\n >>> np.einsum('ji', c)\n array([[0, 3],\n [1, 4],\n [2, 5]])\n >>> np.einsum('ij->ji', c)\n array([[0, 3],\n [1, 4],\n [2, 5]])\n >>> np.einsum(c, [1,0])\n array([[0, 3],\n [1, 4],\n [2, 5]])\n >>> np.transpose(c)\n array([[0, 3],\n [1, 4],\n [2, 5]])\n\n Vector inner products:\n\n >>> np.einsum('i,i', b, b)\n 30\n >>> np.einsum(b, [0], b, [0])\n 30\n >>> np.inner(b,b)\n 30\n\n Matrix vector multiplication:\n\n >>> np.einsum('ij,j', a, b)\n array([ 30, 80, 130, 180, 230])\n >>> np.einsum(a, [0,1], b, [1])\n array([ 30, 80, 130, 180, 230])\n >>> np.dot(a, b)\n array([ 30, 80, 130, 180, 230])\n >>> np.einsum('...j,j', a, b)\n array([ 30, 80, 130, 180, 230])\n\n Broadcasting and scalar multiplication:\n\n >>> np.einsum('..., ...', 3, c)\n array([[ 0, 3, 6],\n [ 9, 12, 15]])\n >>> np.einsum(',ij', 3, c)\n array([[ 0, 3, 6],\n [ 9, 12, 15]])\n >>> np.einsum(3, [Ellipsis], c, [Ellipsis])\n array([[ 0, 3, 6],\n [ 9, 12, 15]])\n >>> np.multiply(3, c)\n array([[ 0, 3, 6],\n [ 9, 12, 15]])\n\n Vector outer product:\n\n >>> np.einsum('i,j', np.arange(2)+1, b)\n array([[0, 1, 2, 3, 4],\n [0, 2, 4, 6, 8]])\n >>> np.einsum(np.arange(2)+1, [0], b, [1])\n array([[0, 1, 2, 3, 4],\n [0, 2, 4, 6, 8]])\n >>> np.outer(np.arange(2)+1, b)\n array([[0, 1, 2, 3, 4],\n [0, 2, 4, 6, 8]])\n\n Tensor contraction:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> np.einsum('ijk,jil->kl', a, b)\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n >>> np.tensordot(a,b, axes=([1,0],[0,1]))\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n Writeable returned arrays (since version 1.10.0):\n\n >>> a = np.zeros((3, 3))\n >>> np.einsum('ii->i', a)[:] = 1\n >>> a\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n\n Example of ellipsis use:\n\n >>> a = np.arange(6).reshape((3,2))\n >>> b = np.arange(12).reshape((4,3))\n >>> np.einsum('ki,jk->ij', a, b)\n array([[10, 28, 46, 64],\n [13, 40, 67, 94]])\n >>> np.einsum('ki,...k->i...', a, b)\n array([[10, 28, 46, 64],\n [13, 40, 67, 94]])\n >>> np.einsum('k...,jk', a, b)\n array([[10, 28, 46, 64],\n [13, 40, 67, 94]])\n\n Chained array operations. For more complicated contractions, speed ups\n might be achieved by repeatedly computing a 'greedy' path or pre-computing\n the 'optimal' path and repeatedly applying it, using an `einsum_path`\n insertion (since version 1.12.0). Performance improvements can be\n particularly significant with larger arrays:\n\n >>> a = np.ones(64).reshape(2,4,8)\n\n Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)\n\n >>> for iteration in range(500):\n ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)\n\n Sub-optimal `einsum` (due to repeated path calculation time): ~330ms\n\n >>> for iteration in range(500):\n ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,\n ... optimize='optimal')\n\n Greedy `einsum` (faster optimal path approximation): ~160ms\n\n >>> for iteration in range(500):\n ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')\n\n Optimal `einsum` (best usage pattern in some use cases): ~110ms\n\n >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,\n ... optimize='optimal')[0]\n >>> for iteration in range(500):\n ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)\n\n """\n # Special handling if out is specified\n specified_out = out is not None\n\n # If no optimization, run pure einsum\n if optimize is False:\n if specified_out:\n kwargs['out'] = out\n return c_einsum(*operands, **kwargs)\n\n # Check the kwargs to avoid a more cryptic error later, without having to\n # repeat default values here\n valid_einsum_kwargs = ['dtype', 'order', 'casting']\n unknown_kwargs = [k for (k, v) in kwargs.items() if\n k not in valid_einsum_kwargs]\n if len(unknown_kwargs):\n raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}")\n\n # Build the contraction list and operand\n operands, contraction_list = einsum_path(*operands, optimize=optimize,\n einsum_call=True)\n\n # Handle order kwarg for output array, c_einsum allows mixed case\n output_order = kwargs.pop('order', 'K')\n if output_order.upper() == 'A':\n if all(arr.flags.f_contiguous for arr in operands):\n output_order = 'F'\n else:\n output_order = 'C'\n\n # Start contraction loop\n for num, contraction in enumerate(contraction_list):\n inds, idx_rm, einsum_str, remaining, blas = contraction\n tmp_operands = [operands.pop(x) for x in inds]\n\n # Do we need to deal with the output?\n handle_out = specified_out and ((num + 1) == len(contraction_list))\n\n # Call tensordot if still possible\n if blas:\n # Checks have already been handled\n input_str, results_index = einsum_str.split('->')\n input_left, input_right = input_str.split(',')\n\n tensor_result = input_left + input_right\n for s in idx_rm:\n tensor_result = tensor_result.replace(s, "")\n\n # Find indices to contract over\n left_pos, right_pos = [], []\n for s in sorted(idx_rm):\n left_pos.append(input_left.find(s))\n right_pos.append(input_right.find(s))\n\n # Contract!\n new_view = tensordot(\n *tmp_operands, axes=(tuple(left_pos), tuple(right_pos))\n )\n\n # Build a new view if needed\n if (tensor_result != results_index) or handle_out:\n if handle_out:\n kwargs["out"] = out\n new_view = c_einsum(\n tensor_result + '->' + results_index, new_view, **kwargs\n )\n\n # Call einsum\n else:\n # If out was specified\n if handle_out:\n kwargs["out"] = out\n\n # Do the contraction\n new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)\n\n # Append new items and dereference what we can\n operands.append(new_view)\n del tmp_operands, new_view\n\n if specified_out:\n return out\n else:\n return asanyarray(operands[0], order=output_order)\n
.venv\Lib\site-packages\numpy\_core\einsumfunc.py
einsumfunc.py
Python
54,318
0.75
0.122163
0.112825
python-kit
597
2024-07-10T18:52:52.066161
MIT
false
e34f08e11bc09daa72e1c7e1d28ab099
from collections.abc import Sequence\nfrom typing import Any, Literal, TypeAlias, TypeVar, overload\n\nimport numpy as np\nfrom numpy import _OrderKACF, number\nfrom numpy._typing import (\n NDArray,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeUInt_co,\n _DTypeLikeBool,\n _DTypeLikeComplex,\n _DTypeLikeComplex_co,\n _DTypeLikeFloat,\n _DTypeLikeInt,\n _DTypeLikeObject,\n _DTypeLikeUInt,\n)\n\n__all__ = ["einsum", "einsum_path"]\n\n_ArrayT = TypeVar(\n "_ArrayT",\n bound=NDArray[np.bool | number],\n)\n\n_OptimizeKind: TypeAlias = bool | Literal["greedy", "optimal"] | Sequence[Any] | None\n_CastingSafe: TypeAlias = Literal["no", "equiv", "safe", "same_kind"]\n_CastingUnsafe: TypeAlias = Literal["unsafe"]\n\n# TODO: Properly handle the `casting`-based combinatorics\n# TODO: We need to evaluate the content `__subscripts` in order\n# to identify whether or an array or scalar is returned. At a cursory\n# glance this seems like something that can quite easily be done with\n# a mypy plugin.\n# Something like `is_scalar = bool(__subscripts.partition("->")[-1])`\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeBool_co,\n out: None = ...,\n dtype: _DTypeLikeBool | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeUInt_co,\n out: None = ...,\n dtype: _DTypeLikeUInt | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeInt_co,\n out: None = ...,\n dtype: _DTypeLikeInt | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeFloat_co,\n out: None = ...,\n dtype: _DTypeLikeFloat | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeComplex_co,\n out: None = ...,\n dtype: _DTypeLikeComplex | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: Any,\n casting: _CastingUnsafe,\n dtype: _DTypeLikeComplex_co | None = ...,\n out: None = ...,\n order: _OrderKACF = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeComplex_co,\n out: _ArrayT,\n dtype: _DTypeLikeComplex_co | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> _ArrayT: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: Any,\n out: _ArrayT,\n casting: _CastingUnsafe,\n dtype: _DTypeLikeComplex_co | None = ...,\n order: _OrderKACF = ...,\n optimize: _OptimizeKind = ...,\n) -> _ArrayT: ...\n\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeObject_co,\n out: None = ...,\n dtype: _DTypeLikeObject | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: Any,\n casting: _CastingUnsafe,\n dtype: _DTypeLikeObject | None = ...,\n out: None = ...,\n order: _OrderKACF = ...,\n optimize: _OptimizeKind = ...,\n) -> Any: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeObject_co,\n out: _ArrayT,\n dtype: _DTypeLikeObject | None = ...,\n order: _OrderKACF = ...,\n casting: _CastingSafe = ...,\n optimize: _OptimizeKind = ...,\n) -> _ArrayT: ...\n@overload\ndef einsum(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: Any,\n out: _ArrayT,\n casting: _CastingUnsafe,\n dtype: _DTypeLikeObject | None = ...,\n order: _OrderKACF = ...,\n optimize: _OptimizeKind = ...,\n) -> _ArrayT: ...\n\n# NOTE: `einsum_call` is a hidden kwarg unavailable for public use.\n# It is therefore excluded from the signatures below.\n# NOTE: In practice the list consists of a `str` (first element)\n# and a variable number of integer tuples.\ndef einsum_path(\n subscripts: str | _ArrayLikeInt_co,\n /,\n *operands: _ArrayLikeComplex_co | _DTypeLikeObject,\n optimize: _OptimizeKind = "greedy",\n einsum_call: Literal[False] = False,\n) -> tuple[list[Any], str]: ...\n
.venv\Lib\site-packages\numpy\_core\einsumfunc.pyi
einsumfunc.pyi
Other
5,077
0.95
0.076087
0.129944
react-lib
390
2024-10-03T06:59:35.618328
MIT
false
e4b12fe4445a87121e15461caab623e8
# ruff: noqa: ANN401\nfrom collections.abc import Sequence\nfrom typing import (\n Any,\n Literal,\n Never,\n Protocol,\n SupportsIndex,\n TypeAlias,\n TypeVar,\n overload,\n type_check_only,\n)\n\nfrom _typeshed import Incomplete\nfrom typing_extensions import deprecated\n\nimport numpy as np\nfrom numpy import (\n _AnyShapeT,\n _CastingKind,\n _ModeKind,\n _OrderACF,\n _OrderKACF,\n _PartitionKind,\n _SortKind,\n _SortSide,\n complexfloating,\n float16,\n floating,\n generic,\n int64,\n int_,\n intp,\n object_,\n timedelta64,\n uint64,\n)\nfrom numpy._globals import _NoValueType\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _AnyShape,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeUInt_co,\n _BoolLike_co,\n _ComplexLike_co,\n _DTypeLike,\n _IntLike_co,\n _NestedSequence,\n _NumberLike_co,\n _ScalarLike_co,\n _ShapeLike,\n)\n\n__all__ = [\n "all",\n "amax",\n "amin",\n "any",\n "argmax",\n "argmin",\n "argpartition",\n "argsort",\n "around",\n "choose",\n "clip",\n "compress",\n "cumprod",\n "cumsum",\n "cumulative_prod",\n "cumulative_sum",\n "diagonal",\n "mean",\n "max",\n "min",\n "matrix_transpose",\n "ndim",\n "nonzero",\n "partition",\n "prod",\n "ptp",\n "put",\n "ravel",\n "repeat",\n "reshape",\n "resize",\n "round",\n "searchsorted",\n "shape",\n "size",\n "sort",\n "squeeze",\n "std",\n "sum",\n "swapaxes",\n "take",\n "trace",\n "transpose",\n "var",\n]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_NumberOrObjectT = TypeVar("_NumberOrObjectT", bound=np.number | np.object_)\n_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any])\n_ShapeT = TypeVar("_ShapeT", bound=tuple[int, ...])\n_ShapeT_co = TypeVar("_ShapeT_co", bound=tuple[int, ...], covariant=True)\n_BoolOrIntArrayT = TypeVar("_BoolOrIntArrayT", bound=NDArray[np.integer | np.bool])\n\n@type_check_only\nclass _SupportsShape(Protocol[_ShapeT_co]):\n # NOTE: it matters that `self` is positional only\n @property\n def shape(self, /) -> _ShapeT_co: ...\n\n# a "sequence" that isn't a string, bytes, bytearray, or memoryview\n_T = TypeVar("_T")\n_PyArray: TypeAlias = list[_T] | tuple[_T, ...]\n# `int` also covers `bool`\n_PyScalar: TypeAlias = complex | bytes | str\n\n@overload\ndef take(\n a: _ArrayLike[_ScalarT],\n indices: _IntLike_co,\n axis: None = ...,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> _ScalarT: ...\n@overload\ndef take(\n a: ArrayLike,\n indices: _IntLike_co,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> Any: ...\n@overload\ndef take(\n a: _ArrayLike[_ScalarT],\n indices: _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef take(\n a: ArrayLike,\n indices: _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> NDArray[Any]: ...\n@overload\ndef take(\n a: ArrayLike,\n indices: _ArrayLikeInt_co,\n axis: SupportsIndex | None,\n out: _ArrayT,\n mode: _ModeKind = ...,\n) -> _ArrayT: ...\n@overload\ndef take(\n a: ArrayLike,\n indices: _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n *,\n out: _ArrayT,\n mode: _ModeKind = ...,\n) -> _ArrayT: ...\n\n@overload\ndef reshape( # shape: index\n a: _ArrayLike[_ScalarT],\n /,\n shape: SupportsIndex,\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...\n@overload\ndef reshape( # shape: (int, ...) @ _AnyShapeT\n a: _ArrayLike[_ScalarT],\n /,\n shape: _AnyShapeT,\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ...\n@overload # shape: Sequence[index]\ndef reshape(\n a: _ArrayLike[_ScalarT],\n /,\n shape: Sequence[SupportsIndex],\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload # shape: index\ndef reshape(\n a: ArrayLike,\n /,\n shape: SupportsIndex,\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> np.ndarray[tuple[int], np.dtype]: ...\n@overload\ndef reshape( # shape: (int, ...) @ _AnyShapeT\n a: ArrayLike,\n /,\n shape: _AnyShapeT,\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> np.ndarray[_AnyShapeT, np.dtype]: ...\n@overload # shape: Sequence[index]\ndef reshape(\n a: ArrayLike,\n /,\n shape: Sequence[SupportsIndex],\n order: _OrderACF = "C",\n *,\n copy: bool | None = None,\n) -> NDArray[Any]: ...\n@overload\n@deprecated(\n "`newshape` keyword argument is deprecated, "\n "use `shape=...` or pass shape positionally instead. "\n "(deprecated in NumPy 2.1)",\n)\ndef reshape(\n a: ArrayLike,\n /,\n shape: None = None,\n order: _OrderACF = "C",\n *,\n newshape: _ShapeLike,\n copy: bool | None = None,\n) -> NDArray[Any]: ...\n\n@overload\ndef choose(\n a: _IntLike_co,\n choices: ArrayLike,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> Any: ...\n@overload\ndef choose(\n a: _ArrayLikeInt_co,\n choices: _ArrayLike[_ScalarT],\n out: None = ...,\n mode: _ModeKind = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef choose(\n a: _ArrayLikeInt_co,\n choices: ArrayLike,\n out: None = ...,\n mode: _ModeKind = ...,\n) -> NDArray[Any]: ...\n@overload\ndef choose(\n a: _ArrayLikeInt_co,\n choices: ArrayLike,\n out: _ArrayT,\n mode: _ModeKind = ...,\n) -> _ArrayT: ...\n\n@overload\ndef repeat(\n a: _ArrayLike[_ScalarT],\n repeats: _ArrayLikeInt_co,\n axis: None = None,\n) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...\n@overload\ndef repeat(\n a: _ArrayLike[_ScalarT],\n repeats: _ArrayLikeInt_co,\n axis: SupportsIndex,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef repeat(\n a: ArrayLike,\n repeats: _ArrayLikeInt_co,\n axis: None = None,\n) -> np.ndarray[tuple[int], np.dtype[Any]]: ...\n@overload\ndef repeat(\n a: ArrayLike,\n repeats: _ArrayLikeInt_co,\n axis: SupportsIndex,\n) -> NDArray[Any]: ...\n\ndef put(\n a: NDArray[Any],\n ind: _ArrayLikeInt_co,\n v: ArrayLike,\n mode: _ModeKind = ...,\n) -> None: ...\n\n@overload\ndef swapaxes(\n a: _ArrayLike[_ScalarT],\n axis1: SupportsIndex,\n axis2: SupportsIndex,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef swapaxes(\n a: ArrayLike,\n axis1: SupportsIndex,\n axis2: SupportsIndex,\n) -> NDArray[Any]: ...\n\n@overload\ndef transpose(\n a: _ArrayLike[_ScalarT],\n axes: _ShapeLike | None = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef transpose(\n a: ArrayLike,\n axes: _ShapeLike | None = ...\n) -> NDArray[Any]: ...\n\n@overload\ndef matrix_transpose(x: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ...\n@overload\ndef matrix_transpose(x: ArrayLike, /) -> NDArray[Any]: ...\n\n#\n@overload\ndef partition(\n a: _ArrayLike[_ScalarT],\n kth: _ArrayLikeInt,\n axis: SupportsIndex | None = -1,\n kind: _PartitionKind = "introselect",\n order: None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef partition(\n a: _ArrayLike[np.void],\n kth: _ArrayLikeInt,\n axis: SupportsIndex | None = -1,\n kind: _PartitionKind = "introselect",\n order: str | Sequence[str] | None = None,\n) -> NDArray[np.void]: ...\n@overload\ndef partition(\n a: ArrayLike,\n kth: _ArrayLikeInt,\n axis: SupportsIndex | None = -1,\n kind: _PartitionKind = "introselect",\n order: str | Sequence[str] | None = None,\n) -> NDArray[Any]: ...\n\n#\ndef argpartition(\n a: ArrayLike,\n kth: _ArrayLikeInt,\n axis: SupportsIndex | None = -1,\n kind: _PartitionKind = "introselect",\n order: str | Sequence[str] | None = None,\n) -> NDArray[intp]: ...\n\n#\n@overload\ndef sort(\n a: _ArrayLike[_ScalarT],\n axis: SupportsIndex | None = ...,\n kind: _SortKind | None = ...,\n order: str | Sequence[str] | None = ...,\n *,\n stable: bool | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef sort(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n kind: _SortKind | None = ...,\n order: str | Sequence[str] | None = ...,\n *,\n stable: bool | None = ...,\n) -> NDArray[Any]: ...\n\ndef argsort(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n kind: _SortKind | None = ...,\n order: str | Sequence[str] | None = ...,\n *,\n stable: bool | None = ...,\n) -> NDArray[intp]: ...\n\n@overload\ndef argmax(\n a: ArrayLike,\n axis: None = ...,\n out: None = ...,\n *,\n keepdims: Literal[False] = ...,\n) -> intp: ...\n@overload\ndef argmax(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n *,\n keepdims: bool = ...,\n) -> Any: ...\n@overload\ndef argmax(\n a: ArrayLike,\n axis: SupportsIndex | None,\n out: _BoolOrIntArrayT,\n *,\n keepdims: bool = ...,\n) -> _BoolOrIntArrayT: ...\n@overload\ndef argmax(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n *,\n out: _BoolOrIntArrayT,\n keepdims: bool = ...,\n) -> _BoolOrIntArrayT: ...\n\n@overload\ndef argmin(\n a: ArrayLike,\n axis: None = ...,\n out: None = ...,\n *,\n keepdims: Literal[False] = ...,\n) -> intp: ...\n@overload\ndef argmin(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n *,\n keepdims: bool = ...,\n) -> Any: ...\n@overload\ndef argmin(\n a: ArrayLike,\n axis: SupportsIndex | None,\n out: _BoolOrIntArrayT,\n *,\n keepdims: bool = ...,\n) -> _BoolOrIntArrayT: ...\n@overload\ndef argmin(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n *,\n out: _BoolOrIntArrayT,\n keepdims: bool = ...,\n) -> _BoolOrIntArrayT: ...\n\n@overload\ndef searchsorted(\n a: ArrayLike,\n v: _ScalarLike_co,\n side: _SortSide = ...,\n sorter: _ArrayLikeInt_co | None = ..., # 1D int array\n) -> intp: ...\n@overload\ndef searchsorted(\n a: ArrayLike,\n v: ArrayLike,\n side: _SortSide = ...,\n sorter: _ArrayLikeInt_co | None = ..., # 1D int array\n) -> NDArray[intp]: ...\n\n#\n@overload\ndef resize(a: _ArrayLike[_ScalarT], new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ...\n@overload\ndef resize(a: _ArrayLike[_ScalarT], new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype[_ScalarT]]: ...\n@overload\ndef resize(a: _ArrayLike[_ScalarT], new_shape: _ShapeLike) -> NDArray[_ScalarT]: ...\n@overload\ndef resize(a: ArrayLike, new_shape: SupportsIndex | tuple[SupportsIndex]) -> np.ndarray[tuple[int], np.dtype]: ...\n@overload\ndef resize(a: ArrayLike, new_shape: _AnyShapeT) -> np.ndarray[_AnyShapeT, np.dtype]: ...\n@overload\ndef resize(a: ArrayLike, new_shape: _ShapeLike) -> NDArray[Any]: ...\n\n@overload\ndef squeeze(\n a: _ScalarT,\n axis: _ShapeLike | None = ...,\n) -> _ScalarT: ...\n@overload\ndef squeeze(\n a: _ArrayLike[_ScalarT],\n axis: _ShapeLike | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef squeeze(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef diagonal(\n a: _ArrayLike[_ScalarT],\n offset: SupportsIndex = ...,\n axis1: SupportsIndex = ...,\n axis2: SupportsIndex = ..., # >= 2D array\n) -> NDArray[_ScalarT]: ...\n@overload\ndef diagonal(\n a: ArrayLike,\n offset: SupportsIndex = ...,\n axis1: SupportsIndex = ...,\n axis2: SupportsIndex = ..., # >= 2D array\n) -> NDArray[Any]: ...\n\n@overload\ndef trace(\n a: ArrayLike, # >= 2D array\n offset: SupportsIndex = ...,\n axis1: SupportsIndex = ...,\n axis2: SupportsIndex = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n) -> Any: ...\n@overload\ndef trace(\n a: ArrayLike, # >= 2D array\n offset: SupportsIndex,\n axis1: SupportsIndex,\n axis2: SupportsIndex,\n dtype: DTypeLike,\n out: _ArrayT,\n) -> _ArrayT: ...\n@overload\ndef trace(\n a: ArrayLike, # >= 2D array\n offset: SupportsIndex = ...,\n axis1: SupportsIndex = ...,\n axis2: SupportsIndex = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]]\n\n@overload\ndef ravel(a: _ArrayLike[_ScalarT], order: _OrderKACF = "C") -> _Array1D[_ScalarT]: ...\n@overload\ndef ravel(a: bytes | _NestedSequence[bytes], order: _OrderKACF = "C") -> _Array1D[np.bytes_]: ...\n@overload\ndef ravel(a: str | _NestedSequence[str], order: _OrderKACF = "C") -> _Array1D[np.str_]: ...\n@overload\ndef ravel(a: bool | _NestedSequence[bool], order: _OrderKACF = "C") -> _Array1D[np.bool]: ...\n@overload\ndef ravel(a: int | _NestedSequence[int], order: _OrderKACF = "C") -> _Array1D[np.int_ | np.bool]: ...\n@overload\ndef ravel(a: float | _NestedSequence[float], order: _OrderKACF = "C") -> _Array1D[np.float64 | np.int_ | np.bool]: ...\n@overload\ndef ravel(\n a: complex | _NestedSequence[complex],\n order: _OrderKACF = "C",\n) -> _Array1D[np.complex128 | np.float64 | np.int_ | np.bool]: ...\n@overload\ndef ravel(a: ArrayLike, order: _OrderKACF = "C") -> np.ndarray[tuple[int], np.dtype]: ...\n\ndef nonzero(a: _ArrayLike[Any]) -> tuple[NDArray[intp], ...]: ...\n\n# this prevents `Any` from being returned with Pyright\n@overload\ndef shape(a: _SupportsShape[Never]) -> _AnyShape: ...\n@overload\ndef shape(a: _SupportsShape[_ShapeT]) -> _ShapeT: ...\n@overload\ndef shape(a: _PyScalar) -> tuple[()]: ...\n# `collections.abc.Sequence` can't be used hesre, since `bytes` and `str` are\n# subtypes of it, which would make the return types incompatible.\n@overload\ndef shape(a: _PyArray[_PyScalar]) -> tuple[int]: ...\n@overload\ndef shape(a: _PyArray[_PyArray[_PyScalar]]) -> tuple[int, int]: ...\n# this overload will be skipped by typecheckers that don't support PEP 688\n@overload\ndef shape(a: memoryview | bytearray) -> tuple[int]: ...\n@overload\ndef shape(a: ArrayLike) -> _AnyShape: ...\n\n@overload\ndef compress(\n condition: _ArrayLikeBool_co, # 1D bool array\n a: _ArrayLike[_ScalarT],\n axis: SupportsIndex | None = ...,\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef compress(\n condition: _ArrayLikeBool_co, # 1D bool array\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef compress(\n condition: _ArrayLikeBool_co, # 1D bool array\n a: ArrayLike,\n axis: SupportsIndex | None,\n out: _ArrayT,\n) -> _ArrayT: ...\n@overload\ndef compress(\n condition: _ArrayLikeBool_co, # 1D bool array\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n *,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef clip(\n a: _ScalarT,\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: None = ...,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: None = ...,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> _ScalarT: ...\n@overload\ndef clip(\n a: _ScalarLike_co,\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: None = ...,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: None = ...,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> Any: ...\n@overload\ndef clip(\n a: _ArrayLike[_ScalarT],\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: None = ...,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: None = ...,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef clip(\n a: ArrayLike,\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: None = ...,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: None = ...,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> NDArray[Any]: ...\n@overload\ndef clip(\n a: ArrayLike,\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: _ArrayT,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: DTypeLike = ...,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> _ArrayT: ...\n@overload\ndef clip(\n a: ArrayLike,\n a_min: ArrayLike | None,\n a_max: ArrayLike | None,\n out: ArrayLike = ...,\n *,\n min: ArrayLike | None = ...,\n max: ArrayLike | None = ...,\n dtype: DTypeLike,\n where: _ArrayLikeBool_co | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n signature: str | tuple[str | None, ...] = ...,\n casting: _CastingKind = ...,\n) -> Any: ...\n\n@overload\ndef sum(\n a: _ArrayLike[_ScalarT],\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef sum(\n a: _ArrayLike[_ScalarT],\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: _ShapeLike | None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> Any: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: _ShapeLike | None,\n dtype: DTypeLike,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n@overload\ndef sum(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n\n# keep in sync with `any`\n@overload\ndef all(\n a: ArrayLike | None,\n axis: None = None,\n out: None = None,\n keepdims: Literal[False, 0] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> np.bool: ...\n@overload\ndef all(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None = None,\n out: None = None,\n keepdims: _BoolLike_co | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> Incomplete: ...\n@overload\ndef all(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None,\n out: _ArrayT,\n keepdims: _BoolLike_co | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef all(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None = None,\n *,\n out: _ArrayT,\n keepdims: _BoolLike_co | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n\n# keep in sync with `all`\n@overload\ndef any(\n a: ArrayLike | None,\n axis: None = None,\n out: None = None,\n keepdims: Literal[False, 0] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> np.bool: ...\n@overload\ndef any(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None = None,\n out: None = None,\n keepdims: _BoolLike_co | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> Incomplete: ...\n@overload\ndef any(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None,\n out: _ArrayT,\n keepdims: _BoolLike_co | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef any(\n a: ArrayLike | None,\n axis: int | tuple[int, ...] | None = None,\n *,\n out: _ArrayT,\n keepdims: _BoolLike_co | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n\n#\n@overload\ndef cumsum(\n a: _ArrayLike[_ScalarT],\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None,\n dtype: DTypeLike,\n out: _ArrayT,\n) -> _ArrayT: ...\n@overload\ndef cumsum(\n a: ArrayLike,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef cumulative_sum(\n x: _ArrayLike[_ScalarT],\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumulative_sum(\n x: ArrayLike,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumulative_sum(\n x: ArrayLike,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumulative_sum(\n x: ArrayLike,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumulative_sum(\n x: ArrayLike,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: _ArrayT,\n include_initial: bool = ...,\n) -> _ArrayT: ...\n\n@overload\ndef ptp(\n a: _ArrayLike[_ScalarT],\n axis: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n) -> _ScalarT: ...\n@overload\ndef ptp(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n out: None = ...,\n keepdims: bool = ...,\n) -> Any: ...\n@overload\ndef ptp(\n a: ArrayLike,\n axis: _ShapeLike | None,\n out: _ArrayT,\n keepdims: bool = ...,\n) -> _ArrayT: ...\n@overload\ndef ptp(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n *,\n out: _ArrayT,\n keepdims: bool = ...,\n) -> _ArrayT: ...\n\n@overload\ndef amax(\n a: _ArrayLike[_ScalarT],\n axis: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef amax(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> Any: ...\n@overload\ndef amax(\n a: ArrayLike,\n axis: _ShapeLike | None,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n@overload\ndef amax(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n *,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n\n@overload\ndef amin(\n a: _ArrayLike[_ScalarT],\n axis: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef amin(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> Any: ...\n@overload\ndef amin(\n a: ArrayLike,\n axis: _ShapeLike | None,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n@overload\ndef amin(\n a: ArrayLike,\n axis: _ShapeLike | None = ...,\n *,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n\n# TODO: `np.prod()``: For object arrays `initial` does not necessarily\n# have to be a numerical scalar.\n# The only requirement is that it is compatible\n# with the `.__mul__()` method(s) of the passed array's elements.\n\n# Note that the same situation holds for all wrappers around\n# `np.ufunc.reduce`, e.g. `np.sum()` (`.__add__()`).\n@overload\ndef prod(\n a: _ArrayLikeBool_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> int_: ...\n@overload\ndef prod(\n a: _ArrayLikeUInt_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> uint64: ...\n@overload\ndef prod(\n a: _ArrayLikeInt_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> int64: ...\n@overload\ndef prod(\n a: _ArrayLikeFloat_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> floating: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> complexfloating: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> Any: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ScalarT: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike | None = ...,\n out: None = ...,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> Any: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: DTypeLike | None,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n@overload\ndef prod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike | None = ...,\n *,\n out: _ArrayT,\n keepdims: bool = ...,\n initial: _NumberLike_co = ...,\n where: _ArrayLikeBool_co = ...,\n) -> _ArrayT: ...\n\n@overload\ndef cumprod(\n a: _ArrayLikeBool_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeUInt_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[uint64]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeInt_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[int64]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeFloat_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeObject_co,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: SupportsIndex | None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: SupportsIndex | None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: SupportsIndex | None,\n dtype: DTypeLike,\n out: _ArrayT,\n) -> _ArrayT: ...\n@overload\ndef cumprod(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef cumulative_prod(\n x: _ArrayLikeBool_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[int_]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeUInt_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[uint64]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeInt_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[int64]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeFloat_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[floating]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeComplex_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeObject_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: None = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[object_]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n include_initial: bool = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cumulative_prod(\n x: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n /,\n *,\n axis: SupportsIndex | None = ...,\n dtype: DTypeLike = ...,\n out: _ArrayT,\n include_initial: bool = ...,\n) -> _ArrayT: ...\n\ndef ndim(a: ArrayLike) -> int: ...\n\ndef size(a: ArrayLike, axis: int | None = ...) -> int: ...\n\n@overload\ndef around(\n a: _BoolLike_co,\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> float16: ...\n@overload\ndef around(\n a: _NumberOrObjectT,\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> _NumberOrObjectT: ...\n@overload\ndef around(\n a: _ComplexLike_co | object_,\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> Any: ...\n@overload\ndef around(\n a: _ArrayLikeBool_co,\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> NDArray[float16]: ...\n@overload\ndef around(\n a: _ArrayLike[_NumberOrObjectT],\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> NDArray[_NumberOrObjectT]: ...\n@overload\ndef around(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n decimals: SupportsIndex = ...,\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef around(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n decimals: SupportsIndex,\n out: _ArrayT,\n) -> _ArrayT: ...\n@overload\ndef around(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n decimals: SupportsIndex = ...,\n *,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef mean(\n a: _ArrayLikeFloat_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> floating: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> complexfloating: ...\n@overload\ndef mean(\n a: _ArrayLike[np.timedelta64],\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n keepdims: Literal[False] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> timedelta64: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: DTypeLike,\n out: _ArrayT,\n keepdims: bool | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike | None = ...,\n *,\n out: _ArrayT,\n keepdims: bool | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: Literal[False] | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: _DTypeLike[_ScalarT],\n out: None,\n keepdims: Literal[True, 1],\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n *,\n keepdims: bool | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n keepdims: bool | _NoValueType = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> _ScalarT | NDArray[_ScalarT]: ...\n@overload\ndef mean(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike | None = ...,\n out: None = ...,\n keepdims: bool | _NoValueType = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n) -> Incomplete: ...\n\n@overload\ndef std(\n a: _ArrayLikeComplex_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> floating: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: None = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> Any: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> Any: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: DTypeLike,\n out: _ArrayT,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef std(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n ddof: float = ...,\n keepdims: bool = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ArrayT: ...\n\n@overload\ndef var(\n a: _ArrayLikeComplex_co,\n axis: None = ...,\n dtype: None = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> floating: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: None = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> Any: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n out: None = ...,\n ddof: float = ...,\n keepdims: Literal[False] = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ScalarT: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n out: None = ...,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> Any: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None,\n dtype: DTypeLike,\n out: _ArrayT,\n ddof: float = ...,\n keepdims: bool = ...,\n *,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ArrayT: ...\n@overload\ndef var(\n a: _ArrayLikeComplex_co | _ArrayLikeObject_co,\n axis: _ShapeLike | None = ...,\n dtype: DTypeLike = ...,\n *,\n out: _ArrayT,\n ddof: float = ...,\n keepdims: bool = ...,\n where: _ArrayLikeBool_co | _NoValueType = ...,\n mean: _ArrayLikeComplex_co | _ArrayLikeObject_co | _NoValueType = ...,\n correction: float | _NoValueType = ...,\n) -> _ArrayT: ...\n\nmax = amax\nmin = amin\nround = around\n
.venv\Lib\site-packages\numpy\_core\fromnumeric.pyi
fromnumeric.pyi
Other
43,784
0.95
0.111429
0.063529
node-utils
245
2024-07-13T04:40:33.305778
GPL-3.0
false
571317298b7bad985833061938a96395
import functools\nimport operator\nimport types\nimport warnings\n\nimport numpy as np\nfrom numpy._core import overrides\nfrom numpy._core._multiarray_umath import _array_converter\nfrom numpy._core.multiarray import add_docstring\n\nfrom . import numeric as _nx\nfrom .numeric import asanyarray, nan, ndim, result_type\n\n__all__ = ['logspace', 'linspace', 'geomspace']\n\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,\n dtype=None, axis=None, *, device=None):\n return (start, stop)\n\n\n@array_function_dispatch(_linspace_dispatcher)\ndef linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,\n axis=0, *, device=None):\n """\n Return evenly spaced numbers over a specified interval.\n\n Returns `num` evenly spaced samples, calculated over the\n interval [`start`, `stop`].\n\n The endpoint of the interval can optionally be excluded.\n\n .. versionchanged:: 1.20.0\n Values are rounded towards ``-inf`` instead of ``0`` when an\n integer ``dtype`` is specified. The old behavior can\n still be obtained with ``np.linspace(start, stop, num).astype(int)``\n\n Parameters\n ----------\n start : array_like\n The starting value of the sequence.\n stop : array_like\n The end value of the sequence, unless `endpoint` is set to False.\n In that case, the sequence consists of all but the last of ``num + 1``\n evenly spaced samples, so that `stop` is excluded. Note that the step\n size changes when `endpoint` is False.\n num : int, optional\n Number of samples to generate. Default is 50. Must be non-negative.\n endpoint : bool, optional\n If True, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n retstep : bool, optional\n If True, return (`samples`, `step`), where `step` is the spacing\n between samples.\n dtype : dtype, optional\n The type of the output array. If `dtype` is not given, the data type\n is inferred from `start` and `stop`. The inferred dtype will never be\n an integer; `float` is chosen even if the arguments would produce an\n array of integers.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Use -1 to get an axis at the end.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n samples : ndarray\n There are `num` equally spaced samples in the closed interval\n ``[start, stop]`` or the half-open interval ``[start, stop)``\n (depending on whether `endpoint` is True or False).\n step : float, optional\n Only returned if `retstep` is True\n\n Size of spacing between samples.\n\n\n See Also\n --------\n arange : Similar to `linspace`, but uses a step size (instead of the\n number of samples).\n geomspace : Similar to `linspace`, but with numbers spaced evenly on a log\n scale (a geometric progression).\n logspace : Similar to `geomspace`, but with the end points specified as\n logarithms.\n :ref:`how-to-partition`\n\n Examples\n --------\n >>> import numpy as np\n >>> np.linspace(2.0, 3.0, num=5)\n array([2. , 2.25, 2.5 , 2.75, 3. ])\n >>> np.linspace(2.0, 3.0, num=5, endpoint=False)\n array([2. , 2.2, 2.4, 2.6, 2.8])\n >>> np.linspace(2.0, 3.0, num=5, retstep=True)\n (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 8\n >>> y = np.zeros(N)\n >>> x1 = np.linspace(0, 10, N, endpoint=True)\n >>> x2 = np.linspace(0, 10, N, endpoint=False)\n >>> plt.plot(x1, y, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2, y + 0.5, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n """\n num = operator.index(num)\n if num < 0:\n raise ValueError(\n f"Number of samples, {num}, must be non-negative."\n )\n div = (num - 1) if endpoint else num\n\n conv = _array_converter(start, stop)\n start, stop = conv.as_arrays()\n dt = conv.result_type(ensure_inexact=True)\n\n if dtype is None:\n dtype = dt\n integer_dtype = False\n else:\n integer_dtype = _nx.issubdtype(dtype, _nx.integer)\n\n # Use `dtype=type(dt)` to enforce a floating point evaluation:\n delta = np.subtract(stop, start, dtype=type(dt))\n y = _nx.arange(\n 0, num, dtype=dt, device=device\n ).reshape((-1,) + (1,) * ndim(delta))\n\n # In-place multiplication y *= delta/div is faster, but prevents\n # the multiplicant from overriding what class is produced, and thus\n # prevents, e.g. use of Quantities, see gh-7142. Hence, we multiply\n # in place only for standard scalar types.\n if div > 0:\n _mult_inplace = _nx.isscalar(delta)\n step = delta / div\n any_step_zero = (\n step == 0 if _mult_inplace else _nx.asanyarray(step == 0).any())\n if any_step_zero:\n # Special handling for denormal numbers, gh-5437\n y /= div\n if _mult_inplace:\n y *= delta\n else:\n y = y * delta\n elif _mult_inplace:\n y *= step\n else:\n y = y * step\n else:\n # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)\n # have an undefined step\n step = nan\n # Multiply with delta to allow possible override of output class.\n y = y * delta\n\n y += start\n\n if endpoint and num > 1:\n y[-1, ...] = stop\n\n if axis != 0:\n y = _nx.moveaxis(y, 0, axis)\n\n if integer_dtype:\n _nx.floor(y, out=y)\n\n y = conv.wrap(y.astype(dtype, copy=False))\n if retstep:\n return y, step\n else:\n return y\n\n\ndef _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,\n dtype=None, axis=None):\n return (start, stop, base)\n\n\n@array_function_dispatch(_logspace_dispatcher)\ndef logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,\n axis=0):\n """\n Return numbers spaced evenly on a log scale.\n\n In linear space, the sequence starts at ``base ** start``\n (`base` to the power of `start`) and ends with ``base ** stop``\n (see `endpoint` below).\n\n .. versionchanged:: 1.25.0\n Non-scalar 'base` is now supported\n\n Parameters\n ----------\n start : array_like\n ``base ** start`` is the starting value of the sequence.\n stop : array_like\n ``base ** stop`` is the final value of the sequence, unless `endpoint`\n is False. In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n base : array_like, optional\n The base of the log space. The step size between the elements in\n ``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.\n Default is 10.0.\n dtype : dtype\n The type of the output array. If `dtype` is not given, the data type\n is inferred from `start` and `stop`. The inferred type will never be\n an integer; `float` is chosen even if the arguments would produce an\n array of integers.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start,\n stop, or base are array-like. By default (0), the samples will be\n along a new axis inserted at the beginning. Use -1 to get an axis at\n the end.\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n arange : Similar to linspace, with the step size specified instead of the\n number of samples. Note that, when used with a float endpoint, the\n endpoint may or may not be included.\n linspace : Similar to logspace, but with the samples uniformly distributed\n in linear space, instead of log space.\n geomspace : Similar to logspace, but with endpoints specified directly.\n :ref:`how-to-partition`\n\n Notes\n -----\n If base is a scalar, logspace is equivalent to the code\n\n >>> y = np.linspace(start, stop, num=num, endpoint=endpoint)\n ... # doctest: +SKIP\n >>> power(base, y).astype(dtype)\n ... # doctest: +SKIP\n\n Examples\n --------\n >>> import numpy as np\n >>> np.logspace(2.0, 3.0, num=4)\n array([ 100. , 215.443469 , 464.15888336, 1000. ])\n >>> np.logspace(2.0, 3.0, num=4, endpoint=False)\n array([100. , 177.827941 , 316.22776602, 562.34132519])\n >>> np.logspace(2.0, 3.0, num=4, base=2.0)\n array([4. , 5.0396842 , 6.34960421, 8. ])\n >>> np.logspace(2.0, 3.0, num=4, base=[2.0, 3.0], axis=-1)\n array([[ 4. , 5.0396842 , 6.34960421, 8. ],\n [ 9. , 12.98024613, 18.72075441, 27. ]])\n\n Graphical illustration:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 10\n >>> x1 = np.logspace(0.1, 1, N, endpoint=True)\n >>> x2 = np.logspace(0.1, 1, N, endpoint=False)\n >>> y = np.zeros(N)\n >>> plt.plot(x1, y, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.plot(x2, y + 0.5, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.ylim([-0.5, 1])\n (-0.5, 1)\n >>> plt.show()\n\n """\n if not isinstance(base, (float, int)) and np.ndim(base):\n # If base is non-scalar, broadcast it with the others, since it\n # may influence how axis is interpreted.\n ndmax = np.broadcast(start, stop, base).ndim\n start, stop, base = (\n np.array(a, copy=None, subok=True, ndmin=ndmax)\n for a in (start, stop, base)\n )\n base = np.expand_dims(base, axis=axis)\n y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)\n if dtype is None:\n return _nx.power(base, y)\n return _nx.power(base, y).astype(dtype, copy=False)\n\n\ndef _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,\n axis=None):\n return (start, stop)\n\n\n@array_function_dispatch(_geomspace_dispatcher)\ndef geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):\n """\n Return numbers spaced evenly on a log scale (a geometric progression).\n\n This is similar to `logspace`, but with endpoints specified directly.\n Each output sample is a constant multiple of the previous.\n\n Parameters\n ----------\n start : array_like\n The starting value of the sequence.\n stop : array_like\n The final value of the sequence, unless `endpoint` is False.\n In that case, ``num + 1`` values are spaced over the\n interval in log-space, of which all but the last (a sequence of\n length `num`) are returned.\n num : integer, optional\n Number of samples to generate. Default is 50.\n endpoint : boolean, optional\n If true, `stop` is the last sample. Otherwise, it is not included.\n Default is True.\n dtype : dtype\n The type of the output array. If `dtype` is not given, the data type\n is inferred from `start` and `stop`. The inferred dtype will never be\n an integer; `float` is chosen even if the arguments would produce an\n array of integers.\n axis : int, optional\n The axis in the result to store the samples. Relevant only if start\n or stop are array-like. By default (0), the samples will be along a\n new axis inserted at the beginning. Use -1 to get an axis at the end.\n\n Returns\n -------\n samples : ndarray\n `num` samples, equally spaced on a log scale.\n\n See Also\n --------\n logspace : Similar to geomspace, but with endpoints specified using log\n and base.\n linspace : Similar to geomspace, but with arithmetic instead of geometric\n progression.\n arange : Similar to linspace, with the step size specified instead of the\n number of samples.\n :ref:`how-to-partition`\n\n Notes\n -----\n If the inputs or dtype are complex, the output will follow a logarithmic\n spiral in the complex plane. (There are an infinite number of spirals\n passing through two points; the output will follow the shortest such path.)\n\n Examples\n --------\n >>> import numpy as np\n >>> np.geomspace(1, 1000, num=4)\n array([ 1., 10., 100., 1000.])\n >>> np.geomspace(1, 1000, num=3, endpoint=False)\n array([ 1., 10., 100.])\n >>> np.geomspace(1, 1000, num=4, endpoint=False)\n array([ 1. , 5.62341325, 31.6227766 , 177.827941 ])\n >>> np.geomspace(1, 256, num=9)\n array([ 1., 2., 4., 8., 16., 32., 64., 128., 256.])\n\n Note that the above may not produce exact integers:\n\n >>> np.geomspace(1, 256, num=9, dtype=int)\n array([ 1, 2, 4, 7, 16, 32, 63, 127, 256])\n >>> np.around(np.geomspace(1, 256, num=9)).astype(int)\n array([ 1, 2, 4, 8, 16, 32, 64, 128, 256])\n\n Negative, decreasing, and complex inputs are allowed:\n\n >>> np.geomspace(1000, 1, num=4)\n array([1000., 100., 10., 1.])\n >>> np.geomspace(-1000, -1, num=4)\n array([-1000., -100., -10., -1.])\n >>> np.geomspace(1j, 1000j, num=4) # Straight line\n array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])\n >>> np.geomspace(-1+0j, 1+0j, num=5) # Circle\n array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,\n 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,\n 1.00000000e+00+0.00000000e+00j])\n\n Graphical illustration of `endpoint` parameter:\n\n >>> import matplotlib.pyplot as plt\n >>> N = 10\n >>> y = np.zeros(N)\n >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')\n [<matplotlib.lines.Line2D object at 0x...>]\n >>> plt.axis([0.5, 2000, 0, 3])\n [0.5, 2000, 0, 3]\n >>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')\n >>> plt.show()\n\n """\n start = asanyarray(start)\n stop = asanyarray(stop)\n if _nx.any(start == 0) or _nx.any(stop == 0):\n raise ValueError('Geometric sequence cannot include zero')\n\n dt = result_type(start, stop, float(num), _nx.zeros((), dtype))\n if dtype is None:\n dtype = dt\n else:\n # complex to dtype('complex128'), for instance\n dtype = _nx.dtype(dtype)\n\n # Promote both arguments to the same dtype in case, for instance, one is\n # complex and another is negative and log would produce NaN otherwise.\n # Copy since we may change things in-place further down.\n start = start.astype(dt, copy=True)\n stop = stop.astype(dt, copy=True)\n\n # Allow negative real values and ensure a consistent result for complex\n # (including avoiding negligible real or imaginary parts in output) by\n # rotating start to positive real, calculating, then undoing rotation.\n out_sign = _nx.sign(start)\n start /= out_sign\n stop = stop / out_sign\n\n log_start = _nx.log10(start)\n log_stop = _nx.log10(stop)\n result = logspace(log_start, log_stop, num=num,\n endpoint=endpoint, base=10.0, dtype=dt)\n\n # Make sure the endpoints match the start and stop arguments. This is\n # necessary because np.exp(np.log(x)) is not necessarily equal to x.\n if num > 0:\n result[0] = start\n if num > 1 and endpoint:\n result[-1] = stop\n\n result *= out_sign\n\n if axis != 0:\n result = _nx.moveaxis(result, 0, axis)\n\n return result.astype(dtype, copy=False)\n\n\ndef _needs_add_docstring(obj):\n """\n Returns true if the only way to set the docstring of `obj` from python is\n via add_docstring.\n\n This function errs on the side of being overly conservative.\n """\n Py_TPFLAGS_HEAPTYPE = 1 << 9\n\n if isinstance(obj, (types.FunctionType, types.MethodType, property)):\n return False\n\n if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:\n return False\n\n return True\n\n\ndef _add_docstring(obj, doc, warn_on_python):\n if warn_on_python and not _needs_add_docstring(obj):\n warnings.warn(\n f"add_newdoc was used on a pure-python object {obj}. "\n "Prefer to attach it directly to the source.",\n UserWarning,\n stacklevel=3)\n try:\n add_docstring(obj, doc)\n except Exception:\n pass\n\n\ndef add_newdoc(place, obj, doc, warn_on_python=True):\n """\n Add documentation to an existing object, typically one defined in C\n\n The purpose is to allow easier editing of the docstrings without requiring\n a re-compile. This exists primarily for internal use within numpy itself.\n\n Parameters\n ----------\n place : str\n The absolute name of the module to import from\n obj : str or None\n The name of the object to add documentation to, typically a class or\n function name.\n doc : {str, Tuple[str, str], List[Tuple[str, str]]}\n If a string, the documentation to apply to `obj`\n\n If a tuple, then the first element is interpreted as an attribute\n of `obj` and the second as the docstring to apply -\n ``(method, docstring)``\n\n If a list, then each element of the list should be a tuple of length\n two - ``[(method1, docstring1), (method2, docstring2), ...]``\n warn_on_python : bool\n If True, the default, emit `UserWarning` if this is used to attach\n documentation to a pure-python object.\n\n Notes\n -----\n This routine never raises an error if the docstring can't be written, but\n will raise an error if the object being documented does not exist.\n\n This routine cannot modify read-only docstrings, as appear\n in new-style classes or built-in functions. Because this\n routine never raises an error the caller must check manually\n that the docstrings were changed.\n\n Since this function grabs the ``char *`` from a c-level str object and puts\n it into the ``tp_doc`` slot of the type of `obj`, it violates a number of\n C-API best-practices, by:\n\n - modifying a `PyTypeObject` after calling `PyType_Ready`\n - calling `Py_INCREF` on the str and losing the reference, so the str\n will never be released\n\n If possible it should be avoided.\n """\n new = getattr(__import__(place, globals(), {}, [obj]), obj)\n if isinstance(doc, str):\n if "${ARRAY_FUNCTION_LIKE}" in doc:\n doc = overrides.get_array_function_like_doc(new, doc)\n _add_docstring(new, doc.strip(), warn_on_python)\n elif isinstance(doc, tuple):\n attr, docstring = doc\n _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)\n elif isinstance(doc, list):\n for attr, docstring in doc:\n _add_docstring(\n getattr(new, attr), docstring.strip(), warn_on_python\n )\n
.venv\Lib\site-packages\numpy\_core\function_base.py
function_base.py
Python
20,228
0.95
0.108257
0.043956
vue-tools
471
2024-02-22T08:49:52.025738
BSD-3-Clause
false
45883af1fb86ae1f231dd7c2bf6891d4
from typing import Literal as L\nfrom typing import SupportsIndex, TypeAlias, TypeVar, overload\n\nfrom _typeshed import Incomplete\n\nimport numpy as np\nfrom numpy._typing import (\n DTypeLike,\n NDArray,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _DTypeLike,\n)\nfrom numpy._typing._array_like import _DualArrayLike\n\n__all__ = ["geomspace", "linspace", "logspace"]\n\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n\n_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float]\n\n@overload\ndef linspace(\n start: _ToArrayFloat64,\n stop: _ToArrayFloat64,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n retstep: L[False] = False,\n dtype: None = None,\n axis: SupportsIndex = 0,\n *,\n device: L["cpu"] | None = None,\n) -> NDArray[np.float64]: ...\n@overload\ndef linspace(\n start: _ArrayLikeFloat_co,\n stop: _ArrayLikeFloat_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n retstep: L[False] = False,\n dtype: None = None,\n axis: SupportsIndex = 0,\n *,\n device: L["cpu"] | None = None,\n) -> NDArray[np.floating]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n retstep: L[False] = False,\n dtype: None = None,\n axis: SupportsIndex = 0,\n *,\n device: L["cpu"] | None = None,\n) -> NDArray[np.complexfloating]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex,\n endpoint: bool,\n retstep: L[False],\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n *,\n device: L["cpu"] | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n retstep: L[False] = False,\n *,\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n retstep: L[False] = False,\n dtype: DTypeLike | None = None,\n axis: SupportsIndex = 0,\n *,\n device: L["cpu"] | None = None,\n) -> NDArray[Incomplete]: ...\n@overload\ndef linspace(\n start: _ToArrayFloat64,\n stop: _ToArrayFloat64,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n retstep: L[True],\n dtype: None = None,\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> tuple[NDArray[np.float64], np.float64]: ...\n@overload\ndef linspace(\n start: _ArrayLikeFloat_co,\n stop: _ArrayLikeFloat_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n retstep: L[True],\n dtype: None = None,\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> tuple[NDArray[np.floating], np.floating]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n retstep: L[True],\n dtype: None = None,\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n retstep: L[True],\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> tuple[NDArray[_ScalarT], _ScalarT]: ...\n@overload\ndef linspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n retstep: L[True],\n dtype: DTypeLike | None = None,\n axis: SupportsIndex = 0,\n device: L["cpu"] | None = None,\n) -> tuple[NDArray[Incomplete], Incomplete]: ...\n\n@overload\ndef logspace(\n start: _ToArrayFloat64,\n stop: _ToArrayFloat64,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n base: _ToArrayFloat64 = 10.0,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.float64]: ...\n@overload\ndef logspace(\n start: _ArrayLikeFloat_co,\n stop: _ArrayLikeFloat_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n base: _ArrayLikeFloat_co = 10.0,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.floating]: ...\n@overload\ndef logspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n base: _ArrayLikeComplex_co = 10.0,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.complexfloating]: ...\n@overload\ndef logspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex,\n endpoint: bool,\n base: _ArrayLikeComplex_co,\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef logspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n base: _ArrayLikeComplex_co = 10.0,\n *,\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef logspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n base: _ArrayLikeComplex_co = 10.0,\n dtype: DTypeLike | None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[Incomplete]: ...\n\n@overload\ndef geomspace(\n start: _ToArrayFloat64,\n stop: _ToArrayFloat64,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.float64]: ...\n@overload\ndef geomspace(\n start: _ArrayLikeFloat_co,\n stop: _ArrayLikeFloat_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.floating]: ...\n@overload\ndef geomspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n dtype: None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[np.complexfloating]: ...\n@overload\ndef geomspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex,\n endpoint: bool,\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef geomspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n *,\n dtype: _DTypeLike[_ScalarT],\n axis: SupportsIndex = 0,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef geomspace(\n start: _ArrayLikeComplex_co,\n stop: _ArrayLikeComplex_co,\n num: SupportsIndex = 50,\n endpoint: bool = True,\n dtype: DTypeLike | None = None,\n axis: SupportsIndex = 0,\n) -> NDArray[Incomplete]: ...\n\ndef add_newdoc(\n place: str,\n obj: str,\n doc: str | tuple[str, str] | list[tuple[str, str]],\n warn_on_python: bool = True,\n) -> None: ...\n
.venv\Lib\site-packages\numpy\_core\function_base.pyi
function_base.pyi
Other
7,342
0.85
0.086331
0.048327
react-lib
701
2023-08-03T09:21:20.840477
Apache-2.0
false
659bbac0d82f32deaa00adf7155a52a0
"""Machine limits for Float32 and Float64 and (long double) if available...\n\n"""\n__all__ = ['finfo', 'iinfo']\n\nimport types\nimport warnings\n\nfrom numpy._utils import set_module\n\nfrom . import numeric\nfrom . import numerictypes as ntypes\nfrom ._machar import MachAr\nfrom .numeric import array, inf, nan\nfrom .umath import exp2, isnan, log10, nextafter\n\n\ndef _fr0(a):\n """fix rank-0 --> rank-1"""\n if a.ndim == 0:\n a = a.copy()\n a.shape = (1,)\n return a\n\n\ndef _fr1(a):\n """fix rank > 0 --> rank-0"""\n if a.size == 1:\n a = a.copy()\n a.shape = ()\n return a\n\n\nclass MachArLike:\n """ Object to simulate MachAr instance """\n def __init__(self, ftype, *, eps, epsneg, huge, tiny,\n ibeta, smallest_subnormal=None, **kwargs):\n self.params = _MACHAR_PARAMS[ftype]\n self.ftype = ftype\n self.title = self.params['title']\n # Parameter types same as for discovered MachAr object.\n if not smallest_subnormal:\n self._smallest_subnormal = nextafter(\n self.ftype(0), self.ftype(1), dtype=self.ftype)\n else:\n self._smallest_subnormal = smallest_subnormal\n self.epsilon = self.eps = self._float_to_float(eps)\n self.epsneg = self._float_to_float(epsneg)\n self.xmax = self.huge = self._float_to_float(huge)\n self.xmin = self._float_to_float(tiny)\n self.smallest_normal = self.tiny = self._float_to_float(tiny)\n self.ibeta = self.params['itype'](ibeta)\n self.__dict__.update(kwargs)\n self.precision = int(-log10(self.eps))\n self.resolution = self._float_to_float(\n self._float_conv(10) ** (-self.precision))\n self._str_eps = self._float_to_str(self.eps)\n self._str_epsneg = self._float_to_str(self.epsneg)\n self._str_xmin = self._float_to_str(self.xmin)\n self._str_xmax = self._float_to_str(self.xmax)\n self._str_resolution = self._float_to_str(self.resolution)\n self._str_smallest_normal = self._float_to_str(self.xmin)\n\n @property\n def smallest_subnormal(self):\n """Return the value for the smallest subnormal.\n\n Returns\n -------\n smallest_subnormal : float\n value for the smallest subnormal.\n\n Warns\n -----\n UserWarning\n If the calculated value for the smallest subnormal is zero.\n """\n # Check that the calculated value is not zero, in case it raises a\n # warning.\n value = self._smallest_subnormal\n if self.ftype(0) == value:\n warnings.warn(\n f'The value of the smallest subnormal for {self.ftype} type is zero.',\n UserWarning, stacklevel=2)\n\n return self._float_to_float(value)\n\n @property\n def _str_smallest_subnormal(self):\n """Return the string representation of the smallest subnormal."""\n return self._float_to_str(self.smallest_subnormal)\n\n def _float_to_float(self, value):\n """Converts float to float.\n\n Parameters\n ----------\n value : float\n value to be converted.\n """\n return _fr1(self._float_conv(value))\n\n def _float_conv(self, value):\n """Converts float to conv.\n\n Parameters\n ----------\n value : float\n value to be converted.\n """\n return array([value], self.ftype)\n\n def _float_to_str(self, value):\n """Converts float to str.\n\n Parameters\n ----------\n value : float\n value to be converted.\n """\n return self.params['fmt'] % array(_fr0(value)[0], self.ftype)\n\n\n_convert_to_float = {\n ntypes.csingle: ntypes.single,\n ntypes.complex128: ntypes.float64,\n ntypes.clongdouble: ntypes.longdouble\n }\n\n# Parameters for creating MachAr / MachAr-like objects\n_title_fmt = 'numpy {} precision floating point number'\n_MACHAR_PARAMS = {\n ntypes.double: {\n 'itype': ntypes.int64,\n 'fmt': '%24.16e',\n 'title': _title_fmt.format('double')},\n ntypes.single: {\n 'itype': ntypes.int32,\n 'fmt': '%15.7e',\n 'title': _title_fmt.format('single')},\n ntypes.longdouble: {\n 'itype': ntypes.longlong,\n 'fmt': '%s',\n 'title': _title_fmt.format('long double')},\n ntypes.half: {\n 'itype': ntypes.int16,\n 'fmt': '%12.5e',\n 'title': _title_fmt.format('half')}}\n\n# Key to identify the floating point type. Key is result of\n#\n# ftype = np.longdouble # or float64, float32, etc.\n# v = (ftype(-1.0) / ftype(10.0))\n# v.view(v.dtype.newbyteorder('<')).tobytes()\n#\n# Uses division to work around deficiencies in strtold on some platforms.\n# See:\n# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure\n\n_KNOWN_TYPES = {}\ndef _register_type(machar, bytepat):\n _KNOWN_TYPES[bytepat] = machar\n\n\n_float_ma = {}\n\n\ndef _register_known_types():\n # Known parameters for float16\n # See docstring of MachAr class for description of parameters.\n f16 = ntypes.float16\n float16_ma = MachArLike(f16,\n machep=-10,\n negep=-11,\n minexp=-14,\n maxexp=16,\n it=10,\n iexp=5,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=exp2(f16(-10)),\n epsneg=exp2(f16(-11)),\n huge=f16(65504),\n tiny=f16(2 ** -14))\n _register_type(float16_ma, b'f\xae')\n _float_ma[16] = float16_ma\n\n # Known parameters for float32\n f32 = ntypes.float32\n float32_ma = MachArLike(f32,\n machep=-23,\n negep=-24,\n minexp=-126,\n maxexp=128,\n it=23,\n iexp=8,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=exp2(f32(-23)),\n epsneg=exp2(f32(-24)),\n huge=f32((1 - 2 ** -24) * 2**128),\n tiny=exp2(f32(-126)))\n _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')\n _float_ma[32] = float32_ma\n\n # Known parameters for float64\n f64 = ntypes.float64\n epsneg_f64 = 2.0 ** -53.0\n tiny_f64 = 2.0 ** -1022.0\n float64_ma = MachArLike(f64,\n machep=-52,\n negep=-53,\n minexp=-1022,\n maxexp=1024,\n it=52,\n iexp=11,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=2.0 ** -52.0,\n epsneg=epsneg_f64,\n huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),\n tiny=tiny_f64)\n _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')\n _float_ma[64] = float64_ma\n\n # Known parameters for IEEE 754 128-bit binary float\n ld = ntypes.longdouble\n epsneg_f128 = exp2(ld(-113))\n tiny_f128 = exp2(ld(-16382))\n # Ignore runtime error when this is not f128\n with numeric.errstate(all='ignore'):\n huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)\n float128_ma = MachArLike(ld,\n machep=-112,\n negep=-113,\n minexp=-16382,\n maxexp=16384,\n it=112,\n iexp=15,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=exp2(ld(-112)),\n epsneg=epsneg_f128,\n huge=huge_f128,\n tiny=tiny_f128)\n # IEEE 754 128-bit binary float\n _register_type(float128_ma,\n b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')\n _float_ma[128] = float128_ma\n\n # Known parameters for float80 (Intel 80-bit extended precision)\n epsneg_f80 = exp2(ld(-64))\n tiny_f80 = exp2(ld(-16382))\n # Ignore runtime error when this is not f80\n with numeric.errstate(all='ignore'):\n huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)\n float80_ma = MachArLike(ld,\n machep=-63,\n negep=-64,\n minexp=-16382,\n maxexp=16384,\n it=63,\n iexp=15,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=exp2(ld(-63)),\n epsneg=epsneg_f80,\n huge=huge_f80,\n tiny=tiny_f80)\n # float80, first 10 bytes containing actual storage\n _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')\n _float_ma[80] = float80_ma\n\n # Guessed / known parameters for double double; see:\n # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic\n # These numbers have the same exponent range as float64, but extended\n # number of digits in the significand.\n huge_dd = nextafter(ld(inf), ld(0), dtype=ld)\n # As the smallest_normal in double double is so hard to calculate we set\n # it to NaN.\n smallest_normal_dd = nan\n # Leave the same value for the smallest subnormal as double\n smallest_subnormal_dd = ld(nextafter(0., 1.))\n float_dd_ma = MachArLike(ld,\n machep=-105,\n negep=-106,\n minexp=-1022,\n maxexp=1024,\n it=105,\n iexp=11,\n ibeta=2,\n irnd=5,\n ngrd=0,\n eps=exp2(ld(-105)),\n epsneg=exp2(ld(-106)),\n huge=huge_dd,\n tiny=smallest_normal_dd,\n smallest_subnormal=smallest_subnormal_dd)\n # double double; low, high order (e.g. PPC 64)\n _register_type(float_dd_ma,\n b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')\n # double double; high, low order (e.g. PPC 64 le)\n _register_type(float_dd_ma,\n b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')\n _float_ma['dd'] = float_dd_ma\n\n\ndef _get_machar(ftype):\n """ Get MachAr instance or MachAr-like instance\n\n Get parameters for floating point type, by first trying signatures of\n various known floating point types, then, if none match, attempting to\n identify parameters by analysis.\n\n Parameters\n ----------\n ftype : class\n Numpy floating point type class (e.g. ``np.float64``)\n\n Returns\n -------\n ma_like : instance of :class:`MachAr` or :class:`MachArLike`\n Object giving floating point parameters for `ftype`.\n\n Warns\n -----\n UserWarning\n If the binary signature of the float type is not in the dictionary of\n known float types.\n """\n params = _MACHAR_PARAMS.get(ftype)\n if params is None:\n raise ValueError(repr(ftype))\n # Detect known / suspected types\n # ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold\n # may be deficient\n key = (ftype(-1.0) / ftype(10.))\n key = key.view(key.dtype.newbyteorder("<")).tobytes()\n ma_like = None\n if ftype == ntypes.longdouble:\n # Could be 80 bit == 10 byte extended precision, where last bytes can\n # be random garbage.\n # Comparing first 10 bytes to pattern first to avoid branching on the\n # random garbage.\n ma_like = _KNOWN_TYPES.get(key[:10])\n if ma_like is None:\n # see if the full key is known.\n ma_like = _KNOWN_TYPES.get(key)\n if ma_like is None and len(key) == 16:\n # machine limits could be f80 masquerading as np.float128,\n # find all keys with length 16 and make new dict, but make the keys\n # only 10 bytes long, the last bytes can be random garbage\n _kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}\n ma_like = _kt.get(key[:10])\n if ma_like is not None:\n return ma_like\n # Fall back to parameter discovery\n warnings.warn(\n f'Signature {key} for {ftype} does not match any known type: '\n 'falling back to type probe function.\n'\n 'This warnings indicates broken support for the dtype!',\n UserWarning, stacklevel=2)\n return _discovered_machar(ftype)\n\n\ndef _discovered_machar(ftype):\n """ Create MachAr instance with found information on float types\n\n TODO: MachAr should be retired completely ideally. We currently only\n ever use it system with broken longdouble (valgrind, WSL).\n """\n params = _MACHAR_PARAMS[ftype]\n return MachAr(lambda v: array([v], ftype),\n lambda v: _fr0(v.astype(params['itype']))[0],\n lambda v: array(_fr0(v)[0], ftype),\n lambda v: params['fmt'] % array(_fr0(v)[0], ftype),\n params['title'])\n\n\n@set_module('numpy')\nclass finfo:\n """\n finfo(dtype)\n\n Machine limits for floating point types.\n\n Attributes\n ----------\n bits : int\n The number of bits occupied by the type.\n dtype : dtype\n Returns the dtype for which `finfo` returns information. For complex\n input, the returned dtype is the associated ``float*`` dtype for its\n real and complex components.\n eps : float\n The difference between 1.0 and the next smallest representable float\n larger than 1.0. For example, for 64-bit binary floats in the IEEE-754\n standard, ``eps = 2**-52``, approximately 2.22e-16.\n epsneg : float\n The difference between 1.0 and the next smallest representable float\n less than 1.0. For example, for 64-bit binary floats in the IEEE-754\n standard, ``epsneg = 2**-53``, approximately 1.11e-16.\n iexp : int\n The number of bits in the exponent portion of the floating point\n representation.\n machep : int\n The exponent that yields `eps`.\n max : floating point number of the appropriate type\n The largest representable number.\n maxexp : int\n The smallest positive power of the base (2) that causes overflow.\n min : floating point number of the appropriate type\n The smallest representable number, typically ``-max``.\n minexp : int\n The most negative power of the base (2) consistent with there\n being no leading 0's in the mantissa.\n negep : int\n The exponent that yields `epsneg`.\n nexp : int\n The number of bits in the exponent including its sign and bias.\n nmant : int\n The number of bits in the mantissa.\n precision : int\n The approximate number of decimal digits to which this kind of\n float is precise.\n resolution : floating point number of the appropriate type\n The approximate decimal resolution of this type, i.e.,\n ``10**-precision``.\n tiny : float\n An alias for `smallest_normal`, kept for backwards compatibility.\n smallest_normal : float\n The smallest positive floating point number with 1 as leading bit in\n the mantissa following IEEE-754 (see Notes).\n smallest_subnormal : float\n The smallest positive floating point number with 0 as leading bit in\n the mantissa following IEEE-754.\n\n Parameters\n ----------\n dtype : float, dtype, or instance\n Kind of floating point or complex floating point\n data-type about which to get information.\n\n See Also\n --------\n iinfo : The equivalent for integer data types.\n spacing : The distance between a value and the nearest adjacent number\n nextafter : The next floating point value after x1 towards x2\n\n Notes\n -----\n For developers of NumPy: do not instantiate this at the module level.\n The initial calculation of these parameters is expensive and negatively\n impacts import times. These objects are cached, so calling ``finfo()``\n repeatedly inside your functions is not a problem.\n\n Note that ``smallest_normal`` is not actually the smallest positive\n representable value in a NumPy floating point type. As in the IEEE-754\n standard [1]_, NumPy floating point types make use of subnormal numbers to\n fill the gap between 0 and ``smallest_normal``. However, subnormal numbers\n may have significantly reduced precision [2]_.\n\n This function can also be used for complex data types as well. If used,\n the output will be the same as the corresponding real float type\n (e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).\n However, the output is true for the real and imaginary components.\n\n References\n ----------\n .. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,\n pp.1-70, 2008, https://doi.org/10.1109/IEEESTD.2008.4610935\n .. [2] Wikipedia, "Denormal Numbers",\n https://en.wikipedia.org/wiki/Denormal_number\n\n Examples\n --------\n >>> import numpy as np\n >>> np.finfo(np.float64).dtype\n dtype('float64')\n >>> np.finfo(np.complex64).dtype\n dtype('float32')\n\n """\n\n _finfo_cache = {}\n\n __class_getitem__ = classmethod(types.GenericAlias)\n\n def __new__(cls, dtype):\n try:\n obj = cls._finfo_cache.get(dtype) # most common path\n if obj is not None:\n return obj\n except TypeError:\n pass\n\n if dtype is None:\n # Deprecated in NumPy 1.25, 2023-01-16\n warnings.warn(\n "finfo() dtype cannot be None. This behavior will "\n "raise an error in the future. (Deprecated in NumPy 1.25)",\n DeprecationWarning,\n stacklevel=2\n )\n\n try:\n dtype = numeric.dtype(dtype)\n except TypeError:\n # In case a float instance was given\n dtype = numeric.dtype(type(dtype))\n\n obj = cls._finfo_cache.get(dtype)\n if obj is not None:\n return obj\n dtypes = [dtype]\n newdtype = ntypes.obj2sctype(dtype)\n if newdtype is not dtype:\n dtypes.append(newdtype)\n dtype = newdtype\n if not issubclass(dtype, numeric.inexact):\n raise ValueError(f"data type {dtype!r} not inexact")\n obj = cls._finfo_cache.get(dtype)\n if obj is not None:\n return obj\n if not issubclass(dtype, numeric.floating):\n newdtype = _convert_to_float[dtype]\n if newdtype is not dtype:\n # dtype changed, for example from complex128 to float64\n dtypes.append(newdtype)\n dtype = newdtype\n\n obj = cls._finfo_cache.get(dtype, None)\n if obj is not None:\n # the original dtype was not in the cache, but the new\n # dtype is in the cache. we add the original dtypes to\n # the cache and return the result\n for dt in dtypes:\n cls._finfo_cache[dt] = obj\n return obj\n obj = object.__new__(cls)._init(dtype)\n for dt in dtypes:\n cls._finfo_cache[dt] = obj\n return obj\n\n def _init(self, dtype):\n self.dtype = numeric.dtype(dtype)\n machar = _get_machar(dtype)\n\n for word in ['precision', 'iexp',\n 'maxexp', 'minexp', 'negep',\n 'machep']:\n setattr(self, word, getattr(machar, word))\n for word in ['resolution', 'epsneg', 'smallest_subnormal']:\n setattr(self, word, getattr(machar, word).flat[0])\n self.bits = self.dtype.itemsize * 8\n self.max = machar.huge.flat[0]\n self.min = -self.max\n self.eps = machar.eps.flat[0]\n self.nexp = machar.iexp\n self.nmant = machar.it\n self._machar = machar\n self._str_tiny = machar._str_xmin.strip()\n self._str_max = machar._str_xmax.strip()\n self._str_epsneg = machar._str_epsneg.strip()\n self._str_eps = machar._str_eps.strip()\n self._str_resolution = machar._str_resolution.strip()\n self._str_smallest_normal = machar._str_smallest_normal.strip()\n self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()\n return self\n\n def __str__(self):\n fmt = (\n 'Machine parameters for %(dtype)s\n'\n '---------------------------------------------------------------\n'\n 'precision = %(precision)3s resolution = %(_str_resolution)s\n'\n 'machep = %(machep)6s eps = %(_str_eps)s\n'\n 'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'\n 'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'\n 'maxexp = %(maxexp)6s max = %(_str_max)s\n'\n 'nexp = %(nexp)6s min = -max\n'\n 'smallest_normal = %(_str_smallest_normal)s '\n 'smallest_subnormal = %(_str_smallest_subnormal)s\n'\n '---------------------------------------------------------------\n'\n )\n return fmt % self.__dict__\n\n def __repr__(self):\n c = self.__class__.__name__\n d = self.__dict__.copy()\n d['klass'] = c\n return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"\n " max=%(_str_max)s, dtype=%(dtype)s)") % d)\n\n @property\n def smallest_normal(self):\n """Return the value for the smallest normal.\n\n Returns\n -------\n smallest_normal : float\n Value for the smallest normal.\n\n Warns\n -----\n UserWarning\n If the calculated value for the smallest normal is requested for\n double-double.\n """\n # This check is necessary because the value for smallest_normal is\n # platform dependent for longdouble types.\n if isnan(self._machar.smallest_normal.flat[0]):\n warnings.warn(\n 'The value of smallest normal is undefined for double double',\n UserWarning, stacklevel=2)\n return self._machar.smallest_normal.flat[0]\n\n @property\n def tiny(self):\n """Return the value for tiny, alias of smallest_normal.\n\n Returns\n -------\n tiny : float\n Value for the smallest normal, alias of smallest_normal.\n\n Warns\n -----\n UserWarning\n If the calculated value for the smallest normal is requested for\n double-double.\n """\n return self.smallest_normal\n\n\n@set_module('numpy')\nclass iinfo:\n """\n iinfo(type)\n\n Machine limits for integer types.\n\n Attributes\n ----------\n bits : int\n The number of bits occupied by the type.\n dtype : dtype\n Returns the dtype for which `iinfo` returns information.\n min : int\n The smallest integer expressible by the type.\n max : int\n The largest integer expressible by the type.\n\n Parameters\n ----------\n int_type : integer type, dtype, or instance\n The kind of integer data type to get information about.\n\n See Also\n --------\n finfo : The equivalent for floating point data types.\n\n Examples\n --------\n With types:\n\n >>> import numpy as np\n >>> ii16 = np.iinfo(np.int16)\n >>> ii16.min\n -32768\n >>> ii16.max\n 32767\n >>> ii32 = np.iinfo(np.int32)\n >>> ii32.min\n -2147483648\n >>> ii32.max\n 2147483647\n\n With instances:\n\n >>> ii32 = np.iinfo(np.int32(10))\n >>> ii32.min\n -2147483648\n >>> ii32.max\n 2147483647\n\n """\n\n _min_vals = {}\n _max_vals = {}\n\n __class_getitem__ = classmethod(types.GenericAlias)\n\n def __init__(self, int_type):\n try:\n self.dtype = numeric.dtype(int_type)\n except TypeError:\n self.dtype = numeric.dtype(type(int_type))\n self.kind = self.dtype.kind\n self.bits = self.dtype.itemsize * 8\n self.key = "%s%d" % (self.kind, self.bits)\n if self.kind not in 'iu':\n raise ValueError(f"Invalid integer data type {self.kind!r}.")\n\n @property\n def min(self):\n """Minimum value of given dtype."""\n if self.kind == 'u':\n return 0\n else:\n try:\n val = iinfo._min_vals[self.key]\n except KeyError:\n val = int(-(1 << (self.bits - 1)))\n iinfo._min_vals[self.key] = val\n return val\n\n @property\n def max(self):\n """Maximum value of given dtype."""\n try:\n val = iinfo._max_vals[self.key]\n except KeyError:\n if self.kind == 'u':\n val = int((1 << self.bits) - 1)\n else:\n val = int((1 << (self.bits - 1)) - 1)\n iinfo._max_vals[self.key] = val\n return val\n\n def __str__(self):\n """String representation."""\n fmt = (\n 'Machine parameters for %(dtype)s\n'\n '---------------------------------------------------------------\n'\n 'min = %(min)s\n'\n 'max = %(max)s\n'\n '---------------------------------------------------------------\n'\n )\n return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}\n\n def __repr__(self):\n return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,\n self.min, self.max, self.dtype)\n
.venv\Lib\site-packages\numpy\_core\getlimits.py
getlimits.py
Python
26,849
0.95
0.15508
0.079148
python-kit
521
2024-07-30T13:55:21.423890
Apache-2.0
false
4a2f1131027bc3a4147496758573c1ed
from numpy import finfo, iinfo\n\n__all__ = ["finfo", "iinfo"]\n
.venv\Lib\site-packages\numpy\_core\getlimits.pyi
getlimits.pyi
Other
64
0.65
0
0
react-lib
205
2024-12-10T08:00:53.630216
GPL-3.0
false
1261c9f9590eee251f7aac779f41fe4b
import operator\nfrom contextlib import nullcontext\n\nimport numpy as np\nfrom numpy._utils import set_module\n\nfrom .numeric import dtype, ndarray, uint8\n\n__all__ = ['memmap']\n\ndtypedescr = dtype\nvalid_filemodes = ["r", "c", "r+", "w+"]\nwriteable_filemodes = ["r+", "w+"]\n\nmode_equivalents = {\n "readonly": "r",\n "copyonwrite": "c",\n "readwrite": "r+",\n "write": "w+"\n }\n\n\n@set_module('numpy')\nclass memmap(ndarray):\n """Create a memory-map to an array stored in a *binary* file on disk.\n\n Memory-mapped files are used for accessing small segments of large files\n on disk, without reading the entire file into memory. NumPy's\n memmap's are array-like objects. This differs from Python's ``mmap``\n module, which uses file-like objects.\n\n This subclass of ndarray has some unpleasant interactions with\n some operations, because it doesn't quite fit properly as a subclass.\n An alternative to using this subclass is to create the ``mmap``\n object yourself, then create an ndarray with ndarray.__new__ directly,\n passing the object created in its 'buffer=' parameter.\n\n This class may at some point be turned into a factory function\n which returns a view into an mmap buffer.\n\n Flush the memmap instance to write the changes to the file. Currently there\n is no API to close the underlying ``mmap``. It is tricky to ensure the\n resource is actually closed, since it may be shared between different\n memmap instances.\n\n\n Parameters\n ----------\n filename : str, file-like object, or pathlib.Path instance\n The file name or file object to be used as the array data buffer.\n dtype : data-type, optional\n The data-type used to interpret the file contents.\n Default is `uint8`.\n mode : {'r+', 'r', 'w+', 'c'}, optional\n The file is opened in this mode:\n\n +------+-------------------------------------------------------------+\n | 'r' | Open existing file for reading only. |\n +------+-------------------------------------------------------------+\n | 'r+' | Open existing file for reading and writing. |\n +------+-------------------------------------------------------------+\n | 'w+' | Create or overwrite existing file for reading and writing. |\n | | If ``mode == 'w+'`` then `shape` must also be specified. |\n +------+-------------------------------------------------------------+\n | 'c' | Copy-on-write: assignments affect data in memory, but |\n | | changes are not saved to disk. The file on disk is |\n | | read-only. |\n +------+-------------------------------------------------------------+\n\n Default is 'r+'.\n offset : int, optional\n In the file, array data starts at this offset. Since `offset` is\n measured in bytes, it should normally be a multiple of the byte-size\n of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of\n file are valid; The file will be extended to accommodate the\n additional data. By default, ``memmap`` will start at the beginning of\n the file, even if ``filename`` is a file pointer ``fp`` and\n ``fp.tell() != 0``.\n shape : int or sequence of ints, optional\n The desired shape of the array. If ``mode == 'r'`` and the number\n of remaining bytes after `offset` is not a multiple of the byte-size\n of `dtype`, you must specify `shape`. By default, the returned array\n will be 1-D with the number of elements determined by file size\n and data-type.\n\n .. versionchanged:: 2.0\n The shape parameter can now be any integer sequence type, previously\n types were limited to tuple and int.\n\n order : {'C', 'F'}, optional\n Specify the order of the ndarray memory layout:\n :term:`row-major`, C-style or :term:`column-major`,\n Fortran-style. This only has an effect if the shape is\n greater than 1-D. The default order is 'C'.\n\n Attributes\n ----------\n filename : str or pathlib.Path instance\n Path to the mapped file.\n offset : int\n Offset position in the file.\n mode : str\n File mode.\n\n Methods\n -------\n flush\n Flush any changes in memory to file on disk.\n When you delete a memmap object, flush is called first to write\n changes to disk.\n\n\n See also\n --------\n lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.\n\n Notes\n -----\n The memmap object can be used anywhere an ndarray is accepted.\n Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns\n ``True``.\n\n Memory-mapped files cannot be larger than 2GB on 32-bit systems.\n\n When a memmap causes a file to be created or extended beyond its\n current size in the filesystem, the contents of the new part are\n unspecified. On systems with POSIX filesystem semantics, the extended\n part will be filled with zero bytes.\n\n Examples\n --------\n >>> import numpy as np\n >>> data = np.arange(12, dtype='float32')\n >>> data.resize((3,4))\n\n This example uses a temporary file so that doctest doesn't write\n files to your directory. You would use a 'normal' filename.\n\n >>> from tempfile import mkdtemp\n >>> import os.path as path\n >>> filename = path.join(mkdtemp(), 'newfile.dat')\n\n Create a memmap with dtype and shape that matches our data:\n\n >>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))\n >>> fp\n memmap([[0., 0., 0., 0.],\n [0., 0., 0., 0.],\n [0., 0., 0., 0.]], dtype=float32)\n\n Write data to memmap array:\n\n >>> fp[:] = data[:]\n >>> fp\n memmap([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]], dtype=float32)\n\n >>> fp.filename == path.abspath(filename)\n True\n\n Flushes memory changes to disk in order to read them back\n\n >>> fp.flush()\n\n Load the memmap and verify data was stored:\n\n >>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))\n >>> newfp\n memmap([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]], dtype=float32)\n\n Read-only memmap:\n\n >>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))\n >>> fpr.flags.writeable\n False\n\n Copy-on-write memmap:\n\n >>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))\n >>> fpc.flags.writeable\n True\n\n It's possible to assign to copy-on-write array, but values are only\n written into the memory copy of the array, and not written to disk:\n\n >>> fpc\n memmap([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]], dtype=float32)\n >>> fpc[0,:] = 0\n >>> fpc\n memmap([[ 0., 0., 0., 0.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]], dtype=float32)\n\n File on disk is unchanged:\n\n >>> fpr\n memmap([[ 0., 1., 2., 3.],\n [ 4., 5., 6., 7.],\n [ 8., 9., 10., 11.]], dtype=float32)\n\n Offset into a memmap:\n\n >>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)\n >>> fpo\n memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)\n\n """\n\n __array_priority__ = -100.0\n\n def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,\n shape=None, order='C'):\n # Import here to minimize 'import numpy' overhead\n import mmap\n import os.path\n try:\n mode = mode_equivalents[mode]\n except KeyError as e:\n if mode not in valid_filemodes:\n all_modes = valid_filemodes + list(mode_equivalents.keys())\n raise ValueError(\n f"mode must be one of {all_modes!r} (got {mode!r})"\n ) from None\n\n if mode == 'w+' and shape is None:\n raise ValueError("shape must be given if mode == 'w+'")\n\n if hasattr(filename, 'read'):\n f_ctx = nullcontext(filename)\n else:\n f_ctx = open(\n os.fspath(filename),\n ('r' if mode == 'c' else mode) + 'b'\n )\n\n with f_ctx as fid:\n fid.seek(0, 2)\n flen = fid.tell()\n descr = dtypedescr(dtype)\n _dbytes = descr.itemsize\n\n if shape is None:\n bytes = flen - offset\n if bytes % _dbytes:\n raise ValueError("Size of available data is not a "\n "multiple of the data-type size.")\n size = bytes // _dbytes\n shape = (size,)\n else:\n if not isinstance(shape, (tuple, list)):\n try:\n shape = [operator.index(shape)]\n except TypeError:\n pass\n shape = tuple(shape)\n size = np.intp(1) # avoid overflows\n for k in shape:\n size *= k\n\n bytes = int(offset + size * _dbytes)\n\n if mode in ('w+', 'r+'):\n # gh-27723\n # if bytes == 0, we write out 1 byte to allow empty memmap.\n bytes = max(bytes, 1)\n if flen < bytes:\n fid.seek(bytes - 1, 0)\n fid.write(b'\0')\n fid.flush()\n\n if mode == 'c':\n acc = mmap.ACCESS_COPY\n elif mode == 'r':\n acc = mmap.ACCESS_READ\n else:\n acc = mmap.ACCESS_WRITE\n\n start = offset - offset % mmap.ALLOCATIONGRANULARITY\n bytes -= start\n # bytes == 0 is problematic as in mmap length=0 maps the full file.\n # See PR gh-27723 for a more detailed explanation.\n if bytes == 0 and start > 0:\n bytes += mmap.ALLOCATIONGRANULARITY\n start -= mmap.ALLOCATIONGRANULARITY\n array_offset = offset - start\n mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)\n\n self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,\n offset=array_offset, order=order)\n self._mmap = mm\n self.offset = offset\n self.mode = mode\n\n if isinstance(filename, os.PathLike):\n # special case - if we were constructed with a pathlib.path,\n # then filename is a path object, not a string\n self.filename = filename.resolve()\n elif hasattr(fid, "name") and isinstance(fid.name, str):\n # py3 returns int for TemporaryFile().name\n self.filename = os.path.abspath(fid.name)\n # same as memmap copies (e.g. memmap + 1)\n else:\n self.filename = None\n\n return self\n\n def __array_finalize__(self, obj):\n if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):\n self._mmap = obj._mmap\n self.filename = obj.filename\n self.offset = obj.offset\n self.mode = obj.mode\n else:\n self._mmap = None\n self.filename = None\n self.offset = None\n self.mode = None\n\n def flush(self):\n """\n Write any changes in the array to the file on disk.\n\n For further information, see `memmap`.\n\n Parameters\n ----------\n None\n\n See Also\n --------\n memmap\n\n """\n if self.base is not None and hasattr(self.base, 'flush'):\n self.base.flush()\n\n def __array_wrap__(self, arr, context=None, return_scalar=False):\n arr = super().__array_wrap__(arr, context)\n\n # Return a memmap if a memmap was given as the output of the\n # ufunc. Leave the arr class unchanged if self is not a memmap\n # to keep original memmap subclasses behavior\n if self is arr or type(self) is not memmap:\n return arr\n\n # Return scalar instead of 0d memmap, e.g. for np.sum with\n # axis=None (note that subclasses will not reach here)\n if return_scalar:\n return arr[()]\n\n # Return ndarray otherwise\n return arr.view(np.ndarray)\n\n def __getitem__(self, index):\n res = super().__getitem__(index)\n if type(res) is memmap and res._mmap is None:\n return res.view(type=ndarray)\n return res\n
.venv\Lib\site-packages\numpy\_core\memmap.py
memmap.py
Python
13,014
0.95
0.118457
0.05137
python-kit
316
2024-02-13T09:22:09.937721
BSD-3-Clause
false
dacdb0f3c2e0c696f6c93a651437fc8b
from numpy import memmap\n\n__all__ = ["memmap"]\n
.venv\Lib\site-packages\numpy\_core\memmap.pyi
memmap.pyi
Other
50
0.65
0
0
vue-tools
862
2023-11-21T22:30:32.445557
GPL-3.0
false
27d6f79490050803fc71c5e3ad341fa1
"""\nCreate the numpy._core.multiarray namespace for backward compatibility.\nIn v1.16 the multiarray and umath c-extension modules were merged into\na single _multiarray_umath extension module. So we replicate the old\nnamespace by importing from the extension module.\n\n"""\n\nimport functools\n\nfrom . import _multiarray_umath, overrides\nfrom ._multiarray_umath import * # noqa: F403\n\n# These imports are needed for backward compatibility,\n# do not change them. issue gh-15518\n# _get_ndarray_c_version is semi-public, on purpose not added to __all__\nfrom ._multiarray_umath import ( # noqa: F401\n _ARRAY_API,\n _flagdict,\n _get_madvise_hugepage,\n _get_ndarray_c_version,\n _monotonicity,\n _place,\n _reconstruct,\n _set_madvise_hugepage,\n _vec_string,\n from_dlpack,\n)\n\n__all__ = [\n '_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',\n 'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',\n 'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',\n 'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',\n '_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string',\n '_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',\n 'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',\n 'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',\n 'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',\n 'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',\n 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',\n 'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',\n 'frombuffer', 'fromfile', 'fromiter', 'fromstring',\n 'get_handler_name', 'get_handler_version', 'inner', 'interp',\n 'interp_complex', 'is_busday', 'lexsort', 'matmul', 'vecdot',\n 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',\n 'normalize_axis_index', 'packbits', 'promote_types', 'putmask',\n 'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',\n 'set_typeDict', 'shares_memory', 'typeinfo',\n 'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros']\n\n# For backward compatibility, make sure pickle imports\n# these functions from here\n_reconstruct.__module__ = 'numpy._core.multiarray'\nscalar.__module__ = 'numpy._core.multiarray'\n\n\nfrom_dlpack.__module__ = 'numpy'\narange.__module__ = 'numpy'\narray.__module__ = 'numpy'\nasarray.__module__ = 'numpy'\nasanyarray.__module__ = 'numpy'\nascontiguousarray.__module__ = 'numpy'\nasfortranarray.__module__ = 'numpy'\ndatetime_data.__module__ = 'numpy'\nempty.__module__ = 'numpy'\nfrombuffer.__module__ = 'numpy'\nfromfile.__module__ = 'numpy'\nfromiter.__module__ = 'numpy'\nfrompyfunc.__module__ = 'numpy'\nfromstring.__module__ = 'numpy'\nmay_share_memory.__module__ = 'numpy'\nnested_iters.__module__ = 'numpy'\npromote_types.__module__ = 'numpy'\nzeros.__module__ = 'numpy'\nnormalize_axis_index.__module__ = 'numpy.lib.array_utils'\nadd_docstring.__module__ = 'numpy.lib'\ncompare_chararrays.__module__ = 'numpy.char'\n\n\ndef _override___module__():\n namespace_names = globals()\n for ufunc_name in [\n 'absolute', 'arccos', 'arccosh', 'add', 'arcsin', 'arcsinh', 'arctan',\n 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_count', 'invert',\n 'left_shift', 'bitwise_or', 'right_shift', 'bitwise_xor', 'cbrt',\n 'ceil', 'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees',\n 'divide', 'divmod', 'equal', 'exp', 'exp2', 'expm1', 'fabs',\n 'float_power', 'floor', 'floor_divide', 'fmax', 'fmin', 'fmod',\n 'frexp', 'gcd', 'greater', 'greater_equal', 'heaviside', 'hypot',\n 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'less',\n 'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',\n 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',\n 'logical_xor', 'matmul', 'matvec', 'maximum', 'minimum', 'remainder',\n 'modf', 'multiply', 'negative', 'nextafter', 'not_equal', 'positive',\n 'power', 'rad2deg', 'radians', 'reciprocal', 'rint', 'sign', 'signbit',\n 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan', 'tanh',\n 'trunc', 'vecdot', 'vecmat',\n ]:\n ufunc = namespace_names[ufunc_name]\n ufunc.__module__ = "numpy"\n ufunc.__qualname__ = ufunc_name\n\n\n_override___module__()\n\n\n# We can't verify dispatcher signatures because NumPy's C functions don't\n# support introspection.\narray_function_from_c_func_and_dispatcher = functools.partial(\n overrides.array_function_from_dispatcher,\n module='numpy', docs_from_dispatcher=True, verify=False)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)\ndef empty_like(\n prototype, dtype=None, order=None, subok=None, shape=None, *, device=None\n):\n """\n empty_like(prototype, dtype=None, order='K', subok=True, shape=None, *,\n device=None)\n\n Return a new array with the same shape and type as a given array.\n\n Parameters\n ----------\n prototype : array_like\n The shape and data-type of `prototype` define these same attributes\n of the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `prototype` is Fortran\n contiguous, 'C' otherwise. 'K' means match the layout of `prototype`\n as closely as possible.\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of `prototype`, otherwise it will be a base-class array. Defaults\n to True.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n Array of uninitialized (arbitrary) data with the same\n shape and type as `prototype`.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n\n Notes\n -----\n Unlike other array creation functions (e.g. `zeros_like`, `ones_like`,\n `full_like`), `empty_like` does not initialize the values of the array,\n and may therefore be marginally faster. However, the values stored in the\n newly allocated array are arbitrary. For reproducible behavior, be sure\n to set each element of the array before reading.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = ([1,2,3], [4,5,6]) # a is array-like\n >>> np.empty_like(a)\n array([[-1073741821, -1073741821, 3], # uninitialized\n [ 0, 0, -1073741821]])\n >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])\n >>> np.empty_like(a)\n array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized\n [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])\n\n """\n return (prototype,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)\ndef concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):\n """\n concatenate(\n (a1, a2, ...),\n axis=0,\n out=None,\n dtype=None,\n casting="same_kind"\n )\n\n Join a sequence of arrays along an existing axis.\n\n Parameters\n ----------\n a1, a2, ... : sequence of array_like\n The arrays must have the same shape, except in the dimension\n corresponding to `axis` (the first, by default).\n axis : int, optional\n The axis along which the arrays will be joined. If axis is None,\n arrays are flattened before use. Default is 0.\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what concatenate would have returned if no\n out argument were specified.\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.20.0\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n For a description of the options, please see :term:`casting`.\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n res : ndarray\n The concatenated array.\n\n See Also\n --------\n ma.concatenate : Concatenate function that preserves input masks.\n array_split : Split an array into multiple sub-arrays of equal or\n near-equal size.\n split : Split array into a list of multiple sub-arrays of equal size.\n hsplit : Split array into multiple sub-arrays horizontally (column wise).\n vsplit : Split array into multiple sub-arrays vertically (row wise).\n dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).\n stack : Stack a sequence of arrays along a new axis.\n block : Assemble arrays from blocks.\n hstack : Stack arrays in sequence horizontally (column wise).\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third dimension).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n\n Notes\n -----\n When one or more of the arrays to be concatenated is a MaskedArray,\n this function will return a MaskedArray object instead of an ndarray,\n but the input masks are *not* preserved. In cases where a MaskedArray\n is expected as input, use the ma.concatenate function from the masked\n array module instead.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[1, 2], [3, 4]])\n >>> b = np.array([[5, 6]])\n >>> np.concatenate((a, b), axis=0)\n array([[1, 2],\n [3, 4],\n [5, 6]])\n >>> np.concatenate((a, b.T), axis=1)\n array([[1, 2, 5],\n [3, 4, 6]])\n >>> np.concatenate((a, b), axis=None)\n array([1, 2, 3, 4, 5, 6])\n\n This function will not preserve masking of MaskedArray inputs.\n\n >>> a = np.ma.arange(3)\n >>> a[1] = np.ma.masked\n >>> b = np.arange(2, 5)\n >>> a\n masked_array(data=[0, --, 2],\n mask=[False, True, False],\n fill_value=999999)\n >>> b\n array([2, 3, 4])\n >>> np.concatenate([a, b])\n masked_array(data=[0, 1, 2, 2, 3, 4],\n mask=False,\n fill_value=999999)\n >>> np.ma.concatenate([a, b])\n masked_array(data=[0, --, 2, 2, 3, 4],\n mask=[False, True, False, False, False, False],\n fill_value=999999)\n\n """\n if out is not None:\n # optimize for the typical case where only arrays is provided\n arrays = list(arrays)\n arrays.append(out)\n return arrays\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)\ndef inner(a, b):\n """\n inner(a, b, /)\n\n Inner product of two arrays.\n\n Ordinary inner product of vectors for 1-D arrays (without complex\n conjugation), in higher dimensions a sum product over the last axes.\n\n Parameters\n ----------\n a, b : array_like\n If `a` and `b` are nonscalar, their last dimensions must match.\n\n Returns\n -------\n out : ndarray\n If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n ``out.shape = (*a.shape[:-1], *b.shape[:-1])``\n\n Raises\n ------\n ValueError\n If both `a` and `b` are nonscalar and their last dimensions have\n different sizes.\n\n See Also\n --------\n tensordot : Sum products over arbitrary axes.\n dot : Generalised matrix product, using second last dimension of `b`.\n vecdot : Vector dot product of two arrays.\n einsum : Einstein summation convention.\n\n Notes\n -----\n For vectors (1-D arrays) it computes the ordinary inner-product::\n\n np.inner(a, b) = sum(a[:]*b[:])\n\n More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::\n\n np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))\n\n or explicitly::\n\n np.inner(a, b)[i0,...,ir-2,j0,...,js-2]\n = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])\n\n In addition `a` or `b` may be scalars, in which case::\n\n np.inner(a,b) = a*b\n\n Examples\n --------\n Ordinary inner product for vectors:\n\n >>> import numpy as np\n >>> a = np.array([1,2,3])\n >>> b = np.array([0,1,0])\n >>> np.inner(a, b)\n 2\n\n Some multidimensional examples:\n\n >>> a = np.arange(24).reshape((2,3,4))\n >>> b = np.arange(4)\n >>> c = np.inner(a, b)\n >>> c.shape\n (2, 3)\n >>> c\n array([[ 14, 38, 62],\n [ 86, 110, 134]])\n\n >>> a = np.arange(2).reshape((1,1,2))\n >>> b = np.arange(6).reshape((3,2))\n >>> c = np.inner(a, b)\n >>> c.shape\n (1, 1, 3)\n >>> c\n array([[[1, 3, 5]]])\n\n An example where `b` is a scalar:\n\n >>> np.inner(np.eye(2), 7)\n array([[7., 0.],\n [0., 7.]])\n\n """\n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)\ndef where(condition, x=None, y=None):\n """\n where(condition, [x, y], /)\n\n Return elements chosen from `x` or `y` depending on `condition`.\n\n .. note::\n When only `condition` is provided, this function is a shorthand for\n ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be\n preferred, as it behaves correctly for subclasses. The rest of this\n documentation covers only the case where all three arguments are\n provided.\n\n Parameters\n ----------\n condition : array_like, bool\n Where True, yield `x`, otherwise yield `y`.\n x, y : array_like\n Values from which to choose. `x`, `y` and `condition` need to be\n broadcastable to some shape.\n\n Returns\n -------\n out : ndarray\n An array with elements from `x` where `condition` is True, and elements\n from `y` elsewhere.\n\n See Also\n --------\n choose\n nonzero : The function that is called when x and y are omitted\n\n Notes\n -----\n If all the arrays are 1-D, `where` is equivalent to::\n\n [xv if c else yv\n for c, xv, yv in zip(condition, x, y)]\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.arange(10)\n >>> a\n array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\n >>> np.where(a < 5, a, 10*a)\n array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])\n\n This can be used on multidimensional arrays too:\n\n >>> np.where([[True, False], [True, True]],\n ... [[1, 2], [3, 4]],\n ... [[9, 8], [7, 6]])\n array([[1, 8],\n [3, 4]])\n\n The shapes of x, y, and the condition are broadcast together:\n\n >>> x, y = np.ogrid[:3, :4]\n >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast\n array([[10, 0, 0, 0],\n [10, 11, 1, 1],\n [10, 11, 12, 2]])\n\n >>> a = np.array([[0, 1, 2],\n ... [0, 2, 4],\n ... [0, 3, 6]])\n >>> np.where(a < 4, a, -1) # -1 is broadcast\n array([[ 0, 1, 2],\n [ 0, 2, -1],\n [ 0, 3, -1]])\n """\n return (condition, x, y)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)\ndef lexsort(keys, axis=None):\n """\n lexsort(keys, axis=-1)\n\n Perform an indirect stable sort using a sequence of keys.\n\n Given multiple sorting keys, lexsort returns an array of integer indices\n that describes the sort order by multiple keys. The last key in the\n sequence is used for the primary sort order, ties are broken by the\n second-to-last key, and so on.\n\n Parameters\n ----------\n keys : (k, m, n, ...) array-like\n The `k` keys to be sorted. The *last* key (e.g, the last\n row if `keys` is a 2D array) is the primary sort key.\n Each element of `keys` along the zeroth axis must be\n an array-like object of the same shape.\n axis : int, optional\n Axis to be indirectly sorted. By default, sort over the last axis\n of each sequence. Separate slices along `axis` sorted over\n independently; see last example.\n\n Returns\n -------\n indices : (m, n, ...) ndarray of ints\n Array of indices that sort the keys along the specified axis.\n\n See Also\n --------\n argsort : Indirect sort.\n ndarray.sort : In-place sort.\n sort : Return a sorted copy of an array.\n\n Examples\n --------\n Sort names: first by surname, then by name.\n\n >>> import numpy as np\n >>> surnames = ('Hertz', 'Galilei', 'Hertz')\n >>> first_names = ('Heinrich', 'Galileo', 'Gustav')\n >>> ind = np.lexsort((first_names, surnames))\n >>> ind\n array([1, 2, 0])\n\n >>> [surnames[i] + ", " + first_names[i] for i in ind]\n ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']\n\n Sort according to two numerical keys, first by elements\n of ``a``, then breaking ties according to elements of ``b``:\n\n >>> a = [1, 5, 1, 4, 3, 4, 4] # First sequence\n >>> b = [9, 4, 0, 4, 0, 2, 1] # Second sequence\n >>> ind = np.lexsort((b, a)) # Sort by `a`, then by `b`\n >>> ind\n array([2, 0, 4, 6, 5, 3, 1])\n >>> [(a[i], b[i]) for i in ind]\n [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]\n\n Compare against `argsort`, which would sort each key independently.\n\n >>> np.argsort((b, a), kind='stable')\n array([[2, 4, 6, 5, 1, 3, 0],\n [0, 2, 4, 3, 5, 6, 1]])\n\n To sort lexicographically with `argsort`, we would need to provide a\n structured array.\n\n >>> x = np.array([(ai, bi) for ai, bi in zip(a, b)],\n ... dtype = np.dtype([('x', int), ('y', int)]))\n >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))\n array([2, 0, 4, 6, 5, 3, 1])\n\n The zeroth axis of `keys` always corresponds with the sequence of keys,\n so 2D arrays are treated just like other sequences of keys.\n\n >>> arr = np.asarray([b, a])\n >>> ind2 = np.lexsort(arr)\n >>> np.testing.assert_equal(ind2, ind)\n\n Accordingly, the `axis` parameter refers to an axis of *each* key, not of\n the `keys` argument itself. For instance, the array ``arr`` is treated as\n a sequence of two 1-D keys, so specifying ``axis=0`` is equivalent to\n using the default axis, ``axis=-1``.\n\n >>> np.testing.assert_equal(np.lexsort(arr, axis=0),\n ... np.lexsort(arr, axis=-1))\n\n For higher-dimensional arrays, the axis parameter begins to matter. The\n resulting array has the same shape as each key, and the values are what\n we would expect if `lexsort` were performed on corresponding slices\n of the keys independently. For instance,\n\n >>> x = [[1, 2, 3, 4],\n ... [4, 3, 2, 1],\n ... [2, 1, 4, 3]]\n >>> y = [[2, 2, 1, 1],\n ... [1, 2, 1, 2],\n ... [1, 1, 2, 1]]\n >>> np.lexsort((x, y), axis=1)\n array([[2, 3, 0, 1],\n [2, 0, 3, 1],\n [1, 0, 3, 2]])\n\n Each row of the result is what we would expect if we were to perform\n `lexsort` on the corresponding row of the keys:\n\n >>> for i in range(3):\n ... print(np.lexsort((x[i], y[i])))\n [2 3 0 1]\n [2 0 3 1]\n [1 0 3 2]\n\n """\n if isinstance(keys, tuple):\n return keys\n else:\n return (keys,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)\ndef can_cast(from_, to, casting=None):\n """\n can_cast(from_, to, casting='safe')\n\n Returns True if cast between data types can occur according to the\n casting rule.\n\n Parameters\n ----------\n from_ : dtype, dtype specifier, NumPy scalar, or array\n Data type, NumPy scalar, or array to cast from.\n to : dtype or dtype specifier\n Data type to cast to.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n\n Returns\n -------\n out : bool\n True if cast can occur according to the casting rule.\n\n Notes\n -----\n .. versionchanged:: 2.0\n This function does not support Python scalars anymore and does not\n apply any value-based logic for 0-D arrays and NumPy scalars.\n\n See also\n --------\n dtype, result_type\n\n Examples\n --------\n Basic examples\n\n >>> import numpy as np\n >>> np.can_cast(np.int32, np.int64)\n True\n >>> np.can_cast(np.float64, complex)\n True\n >>> np.can_cast(complex, float)\n False\n\n >>> np.can_cast('i8', 'f8')\n True\n >>> np.can_cast('i8', 'f4')\n False\n >>> np.can_cast('i4', 'S4')\n False\n\n """\n return (from_,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)\ndef min_scalar_type(a):\n """\n min_scalar_type(a, /)\n\n For scalar ``a``, returns the data type with the smallest size\n and smallest scalar kind which can hold its value. For non-scalar\n array ``a``, returns the vector's dtype unmodified.\n\n Floating point values are not demoted to integers,\n and complex values are not demoted to floats.\n\n Parameters\n ----------\n a : scalar or array_like\n The value whose minimal data type is to be found.\n\n Returns\n -------\n out : dtype\n The minimal data type.\n\n See Also\n --------\n result_type, promote_types, dtype, can_cast\n\n Examples\n --------\n >>> import numpy as np\n >>> np.min_scalar_type(10)\n dtype('uint8')\n\n >>> np.min_scalar_type(-260)\n dtype('int16')\n\n >>> np.min_scalar_type(3.1)\n dtype('float16')\n\n >>> np.min_scalar_type(1e50)\n dtype('float64')\n\n >>> np.min_scalar_type(np.arange(4,dtype='f8'))\n dtype('float64')\n\n """\n return (a,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)\ndef result_type(*arrays_and_dtypes):\n """\n result_type(*arrays_and_dtypes)\n\n Returns the type that results from applying the NumPy\n type promotion rules to the arguments.\n\n Type promotion in NumPy works similarly to the rules in languages\n like C++, with some slight differences. When both scalars and\n arrays are used, the array's type takes precedence and the actual value\n of the scalar is taken into account.\n\n For example, calculating 3*a, where a is an array of 32-bit floats,\n intuitively should result in a 32-bit float output. If the 3 is a\n 32-bit integer, the NumPy rules indicate it can't convert losslessly\n into a 32-bit float, so a 64-bit float should be the result type.\n By examining the value of the constant, '3', we see that it fits in\n an 8-bit integer, which can be cast losslessly into the 32-bit float.\n\n Parameters\n ----------\n arrays_and_dtypes : list of arrays and dtypes\n The operands of some operation whose result type is needed.\n\n Returns\n -------\n out : dtype\n The result type.\n\n See also\n --------\n dtype, promote_types, min_scalar_type, can_cast\n\n Notes\n -----\n The specific algorithm used is as follows.\n\n Categories are determined by first checking which of boolean,\n integer (int/uint), or floating point (float/complex) the maximum\n kind of all the arrays and the scalars are.\n\n If there are only scalars or the maximum category of the scalars\n is higher than the maximum category of the arrays,\n the data types are combined with :func:`promote_types`\n to produce the return value.\n\n Otherwise, `min_scalar_type` is called on each scalar, and\n the resulting data types are all combined with :func:`promote_types`\n to produce the return value.\n\n The set of int values is not a subset of the uint values for types\n with the same number of bits, something not reflected in\n :func:`min_scalar_type`, but handled as a special case in `result_type`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.result_type(3, np.arange(7, dtype='i1'))\n dtype('int8')\n\n >>> np.result_type('i4', 'c8')\n dtype('complex128')\n\n >>> np.result_type(3.0, -2)\n dtype('float64')\n\n """\n return arrays_and_dtypes\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)\ndef dot(a, b, out=None):\n """\n dot(a, b, out=None)\n\n Dot product of two arrays. Specifically,\n\n - If both `a` and `b` are 1-D arrays, it is inner product of vectors\n (without complex conjugation).\n\n - If both `a` and `b` are 2-D arrays, it is matrix multiplication,\n but using :func:`matmul` or ``a @ b`` is preferred.\n\n - If either `a` or `b` is 0-D (scalar), it is equivalent to\n :func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is\n preferred.\n\n - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over\n the last axis of `a` and `b`.\n\n - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a\n sum product over the last axis of `a` and the second-to-last axis of\n `b`::\n\n dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])\n\n It uses an optimized BLAS library when possible (see `numpy.linalg`).\n\n Parameters\n ----------\n a : array_like\n First argument.\n b : array_like\n Second argument.\n out : ndarray, optional\n Output argument. This must have the exact kind that would be returned\n if it was not used. In particular, it must have the right type, must be\n C-contiguous, and its dtype must be the dtype that would be returned\n for `dot(a,b)`. This is a performance feature. Therefore, if these\n conditions are not met, an exception is raised, instead of attempting\n to be flexible.\n\n Returns\n -------\n output : ndarray\n Returns the dot product of `a` and `b`. If `a` and `b` are both\n scalars or both 1-D arrays then a scalar is returned; otherwise\n an array is returned.\n If `out` is given, then it is returned.\n\n Raises\n ------\n ValueError\n If the last dimension of `a` is not the same size as\n the second-to-last dimension of `b`.\n\n See Also\n --------\n vdot : Complex-conjugating dot product.\n vecdot : Vector dot product of two arrays.\n tensordot : Sum products over arbitrary axes.\n einsum : Einstein summation convention.\n matmul : '@' operator as method with out parameter.\n linalg.multi_dot : Chained dot product.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.dot(3, 4)\n 12\n\n Neither argument is complex-conjugated:\n\n >>> np.dot([2j, 3j], [2j, 3j])\n (-13+0j)\n\n For 2-D arrays it is the matrix product:\n\n >>> a = [[1, 0], [0, 1]]\n >>> b = [[4, 1], [2, 2]]\n >>> np.dot(a, b)\n array([[4, 1],\n [2, 2]])\n\n >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))\n >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))\n >>> np.dot(a, b)[2,3,2,1,2,2]\n 499128\n >>> sum(a[2,3,2,:] * b[1,2,:,2])\n 499128\n\n """\n return (a, b, out)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)\ndef vdot(a, b):\n r"""\n vdot(a, b, /)\n\n Return the dot product of two vectors.\n\n The `vdot` function handles complex numbers differently than `dot`:\n if the first argument is complex, it is replaced by its complex conjugate\n in the dot product calculation. `vdot` also handles multidimensional\n arrays differently than `dot`: it does not perform a matrix product, but\n flattens the arguments to 1-D arrays before taking a vector dot product.\n\n Consequently, when the arguments are 2-D arrays of the same shape, this\n function effectively returns their\n `Frobenius inner product <https://en.wikipedia.org/wiki/Frobenius_inner_product>`_\n (also known as the *trace inner product* or the *standard inner product*\n on a vector space of matrices).\n\n Parameters\n ----------\n a : array_like\n If `a` is complex the complex conjugate is taken before calculation\n of the dot product.\n b : array_like\n Second argument to the dot product.\n\n Returns\n -------\n output : ndarray\n Dot product of `a` and `b`. Can be an int, float, or\n complex depending on the types of `a` and `b`.\n\n See Also\n --------\n dot : Return the dot product without using the complex conjugate of the\n first argument.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1+2j,3+4j])\n >>> b = np.array([5+6j,7+8j])\n >>> np.vdot(a, b)\n (70-8j)\n >>> np.vdot(b, a)\n (70+8j)\n\n Note that higher-dimensional arrays are flattened!\n\n >>> a = np.array([[1, 4], [5, 6]])\n >>> b = np.array([[4, 1], [2, 2]])\n >>> np.vdot(a, b)\n 30\n >>> np.vdot(b, a)\n 30\n >>> 1*4 + 4*1 + 5*2 + 6*2\n 30\n\n """ # noqa: E501\n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)\ndef bincount(x, weights=None, minlength=None):\n """\n bincount(x, /, weights=None, minlength=0)\n\n Count number of occurrences of each value in array of non-negative ints.\n\n The number of bins (of size 1) is one larger than the largest value in\n `x`. If `minlength` is specified, there will be at least this number\n of bins in the output array (though it will be longer if necessary,\n depending on the contents of `x`).\n Each bin gives the number of occurrences of its index value in `x`.\n If `weights` is specified the input array is weighted by it, i.e. if a\n value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead\n of ``out[n] += 1``.\n\n Parameters\n ----------\n x : array_like, 1 dimension, nonnegative ints\n Input array.\n weights : array_like, optional\n Weights, array of the same shape as `x`.\n minlength : int, optional\n A minimum number of bins for the output array.\n\n Returns\n -------\n out : ndarray of ints\n The result of binning the input array.\n The length of `out` is equal to ``np.amax(x)+1``.\n\n Raises\n ------\n ValueError\n If the input is not 1-dimensional, or contains elements with negative\n values, or if `minlength` is negative.\n TypeError\n If the type of the input is float or complex.\n\n See Also\n --------\n histogram, digitize, unique\n\n Examples\n --------\n >>> import numpy as np\n >>> np.bincount(np.arange(5))\n array([1, 1, 1, 1, 1])\n >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))\n array([1, 3, 1, 1, 0, 0, 0, 1])\n\n >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])\n >>> np.bincount(x).size == np.amax(x)+1\n True\n\n The input array needs to be of integer dtype, otherwise a\n TypeError is raised:\n\n >>> np.bincount(np.arange(5, dtype=float))\n Traceback (most recent call last):\n ...\n TypeError: Cannot cast array data from dtype('float64') to dtype('int64')\n according to the rule 'safe'\n\n A possible use of ``bincount`` is to perform sums over\n variable-size chunks of an array, using the ``weights`` keyword.\n\n >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights\n >>> x = np.array([0, 1, 1, 2, 2, 2])\n >>> np.bincount(x, weights=w)\n array([ 0.3, 0.7, 1.1])\n\n """\n return (x, weights)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)\ndef ravel_multi_index(multi_index, dims, mode=None, order=None):\n """\n ravel_multi_index(multi_index, dims, mode='raise', order='C')\n\n Converts a tuple of index arrays into an array of flat\n indices, applying boundary modes to the multi-index.\n\n Parameters\n ----------\n multi_index : tuple of array_like\n A tuple of integer arrays, one array for each dimension.\n dims : tuple of ints\n The shape of array into which the indices from ``multi_index`` apply.\n mode : {'raise', 'wrap', 'clip'}, optional\n Specifies how out-of-bounds indices are handled. Can specify\n either one mode or a tuple of modes, one mode per index.\n\n * 'raise' -- raise an error (default)\n * 'wrap' -- wrap around\n * 'clip' -- clip to the range\n\n In 'clip' mode, a negative index which would normally\n wrap will clip to 0 instead.\n order : {'C', 'F'}, optional\n Determines whether the multi-index should be viewed as\n indexing in row-major (C-style) or column-major\n (Fortran-style) order.\n\n Returns\n -------\n raveled_indices : ndarray\n An array of indices into the flattened version of an array\n of dimensions ``dims``.\n\n See Also\n --------\n unravel_index\n\n Examples\n --------\n >>> import numpy as np\n >>> arr = np.array([[3,6,6],[4,5,1]])\n >>> np.ravel_multi_index(arr, (7,6))\n array([22, 41, 37])\n >>> np.ravel_multi_index(arr, (7,6), order='F')\n array([31, 41, 13])\n >>> np.ravel_multi_index(arr, (4,6), mode='clip')\n array([22, 23, 19])\n >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))\n array([12, 13, 13])\n\n >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))\n 1621\n """\n return multi_index\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)\ndef unravel_index(indices, shape=None, order=None):\n """\n unravel_index(indices, shape, order='C')\n\n Converts a flat index or array of flat indices into a tuple\n of coordinate arrays.\n\n Parameters\n ----------\n indices : array_like\n An integer array whose elements are indices into the flattened\n version of an array of dimensions ``shape``. Before version 1.6.0,\n this function accepted just one index value.\n shape : tuple of ints\n The shape of the array to use for unraveling ``indices``.\n order : {'C', 'F'}, optional\n Determines whether the indices should be viewed as indexing in\n row-major (C-style) or column-major (Fortran-style) order.\n\n Returns\n -------\n unraveled_coords : tuple of ndarray\n Each array in the tuple has the same shape as the ``indices``\n array.\n\n See Also\n --------\n ravel_multi_index\n\n Examples\n --------\n >>> import numpy as np\n >>> np.unravel_index([22, 41, 37], (7,6))\n (array([3, 6, 6]), array([4, 5, 1]))\n >>> np.unravel_index([31, 41, 13], (7,6), order='F')\n (array([3, 6, 6]), array([4, 5, 1]))\n\n >>> np.unravel_index(1621, (6,7,8,9))\n (3, 1, 4, 1)\n\n """\n return (indices,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)\ndef copyto(dst, src, casting=None, where=None):\n """\n copyto(dst, src, casting='same_kind', where=True)\n\n Copies values from one array to another, broadcasting as necessary.\n\n Raises a TypeError if the `casting` rule is violated, and if\n `where` is provided, it selects which elements to copy.\n\n Parameters\n ----------\n dst : ndarray\n The array into which values are copied.\n src : array_like\n The array from which values are copied.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur when copying.\n\n * 'no' means the data types should not be cast at all.\n * 'equiv' means only byte-order changes are allowed.\n * 'safe' means only casts which can preserve values are allowed.\n * 'same_kind' means only safe casts or casts within a kind,\n like float64 to float32, are allowed.\n * 'unsafe' means any data conversions may be done.\n where : array_like of bool, optional\n A boolean array which is broadcasted to match the dimensions\n of `dst`, and selects elements to copy from `src` to `dst`\n wherever it contains the value True.\n\n Examples\n --------\n >>> import numpy as np\n >>> A = np.array([4, 5, 6])\n >>> B = [1, 2, 3]\n >>> np.copyto(A, B)\n >>> A\n array([1, 2, 3])\n\n >>> A = np.array([[1, 2, 3], [4, 5, 6]])\n >>> B = [[4, 5, 6], [7, 8, 9]]\n >>> np.copyto(A, B)\n >>> A\n array([[4, 5, 6],\n [7, 8, 9]])\n\n """\n return (dst, src, where)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)\ndef putmask(a, /, mask, values):\n """\n putmask(a, mask, values)\n\n Changes elements of an array based on conditional and input values.\n\n Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.\n\n If `values` is not the same size as `a` and `mask` then it will repeat.\n This gives behavior different from ``a[mask] = values``.\n\n Parameters\n ----------\n a : ndarray\n Target array.\n mask : array_like\n Boolean mask array. It has to be the same shape as `a`.\n values : array_like\n Values to put into `a` where `mask` is True. If `values` is smaller\n than `a` it will be repeated.\n\n See Also\n --------\n place, put, take, copyto\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6).reshape(2, 3)\n >>> np.putmask(x, x>2, x**2)\n >>> x\n array([[ 0, 1, 2],\n [ 9, 16, 25]])\n\n If `values` is smaller than `a` it is repeated:\n\n >>> x = np.arange(5)\n >>> np.putmask(x, x>1, [-33, -44])\n >>> x\n array([ 0, 1, -33, -44, -33])\n\n """\n return (a, mask, values)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)\ndef packbits(a, axis=None, bitorder='big'):\n """\n packbits(a, /, axis=None, bitorder='big')\n\n Packs the elements of a binary-valued array into bits in a uint8 array.\n\n The result is padded to full bytes by inserting zero bits at the end.\n\n Parameters\n ----------\n a : array_like\n An array of integers or booleans whose elements should be packed to\n bits.\n axis : int, optional\n The dimension over which bit-packing is done.\n ``None`` implies packing the flattened array.\n bitorder : {'big', 'little'}, optional\n The order of the input bits. 'big' will mimic bin(val),\n ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will\n reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.\n Defaults to 'big'.\n\n Returns\n -------\n packed : ndarray\n Array of type uint8 whose elements represent bits corresponding to the\n logical (0 or nonzero) value of the input elements. The shape of\n `packed` has the same number of dimensions as the input (unless `axis`\n is None, in which case the output is 1-D).\n\n See Also\n --------\n unpackbits: Unpacks elements of a uint8 array into a binary-valued output\n array.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[[1,0,1],\n ... [0,1,0]],\n ... [[1,1,0],\n ... [0,0,1]]])\n >>> b = np.packbits(a, axis=-1)\n >>> b\n array([[[160],\n [ 64]],\n [[192],\n [ 32]]], dtype=uint8)\n\n Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,\n and 32 = 0010 0000.\n\n """\n return (a,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)\ndef unpackbits(a, axis=None, count=None, bitorder='big'):\n """\n unpackbits(a, /, axis=None, count=None, bitorder='big')\n\n Unpacks elements of a uint8 array into a binary-valued output array.\n\n Each element of `a` represents a bit-field that should be unpacked\n into a binary-valued output array. The shape of the output array is\n either 1-D (if `axis` is ``None``) or the same shape as the input\n array with unpacking done along the axis specified.\n\n Parameters\n ----------\n a : ndarray, uint8 type\n Input array.\n axis : int, optional\n The dimension over which bit-unpacking is done.\n ``None`` implies unpacking the flattened array.\n count : int or None, optional\n The number of elements to unpack along `axis`, provided as a way\n of undoing the effect of packing a size that is not a multiple\n of eight. A non-negative number means to only unpack `count`\n bits. A negative number means to trim off that many bits from\n the end. ``None`` means to unpack the entire array (the\n default). Counts larger than the available number of bits will\n add zero padding to the output. Negative counts must not\n exceed the available number of bits.\n bitorder : {'big', 'little'}, optional\n The order of the returned bits. 'big' will mimic bin(val),\n ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse\n the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.\n Defaults to 'big'.\n\n Returns\n -------\n unpacked : ndarray, uint8 type\n The elements are binary-valued (0 or 1).\n\n See Also\n --------\n packbits : Packs the elements of a binary-valued array into bits in\n a uint8 array.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([[2], [7], [23]], dtype=np.uint8)\n >>> a\n array([[ 2],\n [ 7],\n [23]], dtype=uint8)\n >>> b = np.unpackbits(a, axis=1)\n >>> b\n array([[0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)\n >>> c = np.unpackbits(a, axis=1, count=-3)\n >>> c\n array([[0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0],\n [0, 0, 0, 1, 0]], dtype=uint8)\n\n >>> p = np.packbits(b, axis=0)\n >>> np.unpackbits(p, axis=0)\n array([[0, 0, 0, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 0, 1, 1, 1],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)\n >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))\n True\n\n """\n return (a,)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)\ndef shares_memory(a, b, max_work=None):\n """\n shares_memory(a, b, /, max_work=None)\n\n Determine if two arrays share memory.\n\n .. warning::\n\n This function can be exponentially slow for some inputs, unless\n `max_work` is set to zero or a positive integer.\n If in doubt, use `numpy.may_share_memory` instead.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem (maximum number\n of candidate solutions to consider). The following special\n values are recognized:\n\n max_work=-1 (default)\n The problem is solved exactly. In this case, the function returns\n True only if there is an element shared between the arrays. Finding\n the exact solution may take extremely long in some cases.\n max_work=0\n Only the memory bounds of a and b are checked.\n This is equivalent to using ``may_share_memory()``.\n\n Raises\n ------\n numpy.exceptions.TooHardError\n Exceeded max_work.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n may_share_memory\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array([1, 2, 3, 4])\n >>> np.shares_memory(x, np.array([5, 6, 7]))\n False\n >>> np.shares_memory(x[::2], x)\n True\n >>> np.shares_memory(x[::2], x[1::2])\n False\n\n Checking whether two arrays share memory is NP-complete, and\n runtime may increase exponentially in the number of\n dimensions. Hence, `max_work` should generally be set to a finite\n number, as it is possible to construct examples that take\n extremely long to run:\n\n >>> from numpy.lib.stride_tricks import as_strided\n >>> x = np.zeros([192163377], dtype=np.int8)\n >>> x1 = as_strided(\n ... x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))\n >>> x2 = as_strided(\n ... x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))\n >>> np.shares_memory(x1, x2, max_work=1000)\n Traceback (most recent call last):\n ...\n numpy.exceptions.TooHardError: Exceeded max_work\n\n Running ``np.shares_memory(x1, x2)`` without `max_work` set takes\n around 1 minute for this case. It is possible to find problems\n that take still significantly longer.\n\n """\n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)\ndef may_share_memory(a, b, max_work=None):\n """\n may_share_memory(a, b, /, max_work=None)\n\n Determine if two arrays might share memory\n\n A return of True does not necessarily mean that the two arrays\n share any element. It just means that they *might*.\n\n Only the memory bounds of a and b are checked by default.\n\n Parameters\n ----------\n a, b : ndarray\n Input arrays\n max_work : int, optional\n Effort to spend on solving the overlap problem. See\n `shares_memory` for details. Default for ``may_share_memory``\n is to do a bounds check.\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n shares_memory\n\n Examples\n --------\n >>> import numpy as np\n >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))\n False\n >>> x = np.zeros([3, 4])\n >>> np.may_share_memory(x[:,0], x[:,1])\n True\n\n """\n return (a, b)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)\ndef is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):\n """\n is_busday(\n dates,\n weekmask='1111100',\n holidays=None,\n busdaycal=None,\n out=None\n )\n\n Calculates which of the given dates are valid days, and which are not.\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of bool, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of bool\n An array with the same shape as ``dates``, containing True for\n each valid day, and False for each invalid day.\n\n See Also\n --------\n busdaycalendar : An object that specifies a custom set of valid days.\n busday_offset : Applies an offset counted in valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> import numpy as np\n >>> # The weekdays are Friday, Saturday, and Monday\n ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],\n ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])\n array([False, False, True])\n """\n return (dates, weekmask, holidays, out)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)\ndef busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,\n busdaycal=None, out=None):\n """\n busday_offset(\n dates,\n offsets,\n roll='raise',\n weekmask='1111100',\n holidays=None,\n busdaycal=None,\n out=None\n )\n\n First adjusts the date to fall on a valid day according to\n the ``roll`` rule, then applies offsets to the given dates\n counted in valid days.\n\n Parameters\n ----------\n dates : array_like of datetime64[D]\n The array of dates to process.\n offsets : array_like of int\n The array of offsets, which is broadcast with ``dates``.\n roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', \\n 'modifiedfollowing', 'modifiedpreceding'}, optional\n How to treat dates that do not fall on a valid day. The default\n is 'raise'.\n\n * 'raise' means to raise an exception for an invalid day.\n * 'nat' means to return a NaT (not-a-time) for an invalid day.\n * 'forward' and 'following' mean to take the first valid day\n later in time.\n * 'backward' and 'preceding' mean to take the first valid day\n earlier in time.\n * 'modifiedfollowing' means to take the first valid day\n later in time unless it is across a Month boundary, in which\n case to take the first valid day earlier in time.\n * 'modifiedpreceding' means to take the first valid day\n earlier in time unless it is across a Month boundary, in which\n case to take the first valid day later in time.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of datetime64[D], optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of datetime64[D]\n An array with a shape from broadcasting ``dates`` and ``offsets``\n together, containing the dates with offsets applied.\n\n See Also\n --------\n busdaycalendar : An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_count : Counts how many valid days are in a half-open date range.\n\n Examples\n --------\n >>> import numpy as np\n >>> # First business day in October 2011 (not accounting for holidays)\n ... np.busday_offset('2011-10', 0, roll='forward')\n np.datetime64('2011-10-03')\n >>> # Last business day in February 2012 (not accounting for holidays)\n ... np.busday_offset('2012-03', -1, roll='forward')\n np.datetime64('2012-02-29')\n >>> # Third Wednesday in January 2011\n ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')\n np.datetime64('2011-01-19')\n >>> # 2012 Mother's Day in Canada and the U.S.\n ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')\n np.datetime64('2012-05-13')\n\n >>> # First business day on or after a date\n ... np.busday_offset('2011-03-20', 0, roll='forward')\n np.datetime64('2011-03-21')\n >>> np.busday_offset('2011-03-22', 0, roll='forward')\n np.datetime64('2011-03-22')\n >>> # First business day after a date\n ... np.busday_offset('2011-03-20', 1, roll='backward')\n np.datetime64('2011-03-21')\n >>> np.busday_offset('2011-03-22', 1, roll='backward')\n np.datetime64('2011-03-23')\n """\n return (dates, offsets, weekmask, holidays, out)\n\n\n@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)\ndef busday_count(begindates, enddates, weekmask=None, holidays=None,\n busdaycal=None, out=None):\n """\n busday_count(\n begindates,\n enddates,\n weekmask='1111100',\n holidays=[],\n busdaycal=None,\n out=None\n )\n\n Counts the number of valid days between `begindates` and\n `enddates`, not including the day of `enddates`.\n\n If ``enddates`` specifies a date value that is earlier than the\n corresponding ``begindates`` date value, the count will be negative.\n\n Parameters\n ----------\n begindates : array_like of datetime64[D]\n The array of the first dates for counting.\n enddates : array_like of datetime64[D]\n The array of the end dates for counting, which are excluded\n from the count themselves.\n weekmask : str or array_like of bool, optional\n A seven-element array indicating which of Monday through Sunday are\n valid days. May be specified as a length-seven list or array, like\n [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string\n like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for\n weekdays, optionally separated by white space. Valid abbreviations\n are: Mon Tue Wed Thu Fri Sat Sun\n holidays : array_like of datetime64[D], optional\n An array of dates to consider as invalid dates. They may be\n specified in any order, and NaT (not-a-time) dates are ignored.\n This list is saved in a normalized form that is suited for\n fast calculations of valid days.\n busdaycal : busdaycalendar, optional\n A `busdaycalendar` object which specifies the valid days. If this\n parameter is provided, neither weekmask nor holidays may be\n provided.\n out : array of int, optional\n If provided, this array is filled with the result.\n\n Returns\n -------\n out : array of int\n An array with a shape from broadcasting ``begindates`` and ``enddates``\n together, containing the number of valid days between\n the begin and end dates.\n\n See Also\n --------\n busdaycalendar : An object that specifies a custom set of valid days.\n is_busday : Returns a boolean array indicating valid days.\n busday_offset : Applies an offset counted in valid days.\n\n Examples\n --------\n >>> import numpy as np\n >>> # Number of weekdays in January 2011\n ... np.busday_count('2011-01', '2011-02')\n 21\n >>> # Number of weekdays in 2011\n >>> np.busday_count('2011', '2012')\n 260\n >>> # Number of Saturdays in 2011\n ... np.busday_count('2011', '2012', weekmask='Sat')\n 53\n """\n return (begindates, enddates, weekmask, holidays, out)\n\n\n@array_function_from_c_func_and_dispatcher(\n _multiarray_umath.datetime_as_string)\ndef datetime_as_string(arr, unit=None, timezone=None, casting=None):\n """\n datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')\n\n Convert an array of datetimes into an array of strings.\n\n Parameters\n ----------\n arr : array_like of datetime64\n The array of UTC timestamps to format.\n unit : str\n One of None, 'auto', or\n a :ref:`datetime unit <arrays.dtypes.dateunits>`.\n timezone : {'naive', 'UTC', 'local'} or tzinfo\n Timezone information to use when displaying the datetime. If 'UTC',\n end with a Z to indicate UTC time. If 'local', convert to the local\n timezone first, and suffix with a +-#### timezone offset. If a tzinfo\n object, then do as with 'local', but use the specified timezone.\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}\n Casting to allow when changing between datetime units.\n\n Returns\n -------\n str_arr : ndarray\n An array of strings the same shape as `arr`.\n\n Examples\n --------\n >>> import numpy as np\n >>> import pytz\n >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')\n >>> d\n array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',\n '2002-10-27T07:30'], dtype='datetime64[m]')\n\n Setting the timezone to UTC shows the same information, but with a Z suffix\n\n >>> np.datetime_as_string(d, timezone='UTC')\n array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',\n '2002-10-27T07:30Z'], dtype='<U35')\n\n Note that we picked datetimes that cross a DST boundary. Passing in a\n ``pytz`` timezone object will print the appropriate offset\n\n >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))\n array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',\n '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')\n\n Passing in a unit will change the precision\n\n >>> np.datetime_as_string(d, unit='h')\n array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],\n dtype='<U32')\n >>> np.datetime_as_string(d, unit='s')\n array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',\n '2002-10-27T07:30:00'], dtype='<U38')\n\n 'casting' can be used to specify whether precision can be changed\n\n >>> np.datetime_as_string(d, unit='h', casting='safe')\n Traceback (most recent call last):\n ...\n TypeError: Cannot create a datetime string as units 'h' from a NumPy\n datetime with units 'm' according to the rule 'safe'\n """\n return (arr,)\n
.venv\Lib\site-packages\numpy\_core\multiarray.py
multiarray.py
Python
59,917
0.75
0.057889
0.018646
node-utils
603
2025-05-16T02:11:46.025392
MIT
false
9e867577b52f3c569f591e4194aa2424
# TODO: Sort out any and all missing functions in this namespace\nimport datetime as dt\nfrom collections.abc import Callable, Iterable, Sequence\nfrom typing import (\n Any,\n ClassVar,\n Final,\n Protocol,\n SupportsIndex,\n TypeAlias,\n TypedDict,\n TypeVar,\n Unpack,\n final,\n overload,\n type_check_only,\n)\nfrom typing import (\n Literal as L,\n)\n\nfrom _typeshed import StrOrBytesPath, SupportsLenAndGetItem\nfrom typing_extensions import CapsuleType\n\nimport numpy as np\nfrom numpy import ( # type: ignore[attr-defined]\n _AnyShapeT,\n _CastingKind,\n _CopyMode,\n _ModeKind,\n _NDIterFlagsKind,\n _NDIterFlagsOp,\n _OrderCF,\n _OrderKACF,\n _SupportsBuffer,\n _SupportsFileMethods,\n broadcast,\n # Re-exports\n busdaycalendar,\n complexfloating,\n correlate,\n count_nonzero,\n datetime64,\n dtype,\n flatiter,\n float64,\n floating,\n from_dlpack,\n generic,\n int_,\n interp,\n intp,\n matmul,\n ndarray,\n nditer,\n signedinteger,\n str_,\n timedelta64,\n # The rest\n ufunc,\n uint8,\n unsignedinteger,\n vecdot,\n)\nfrom numpy import (\n einsum as c_einsum,\n)\nfrom numpy._typing import (\n ArrayLike,\n # DTypes\n DTypeLike,\n # Arrays\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeBytes_co,\n _ArrayLikeComplex_co,\n _ArrayLikeDT64_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeStr_co,\n _ArrayLikeTD64_co,\n _ArrayLikeUInt_co,\n _DTypeLike,\n _FloatLike_co,\n _IntLike_co,\n _NestedSequence,\n _ScalarLike_co,\n # Shapes\n _Shape,\n _ShapeLike,\n _SupportsArrayFunc,\n _SupportsDType,\n _TD64Like_co,\n)\nfrom numpy._typing._ufunc import (\n _2PTuple,\n _PyFunc_Nin1_Nout1,\n _PyFunc_Nin1P_Nout2P,\n _PyFunc_Nin2_Nout1,\n _PyFunc_Nin3P_Nout1,\n)\nfrom numpy.lib._array_utils_impl import normalize_axis_index\n\n__all__ = [\n "_ARRAY_API",\n "ALLOW_THREADS",\n "BUFSIZE",\n "CLIP",\n "DATETIMEUNITS",\n "ITEM_HASOBJECT",\n "ITEM_IS_POINTER",\n "LIST_PICKLE",\n "MAXDIMS",\n "MAY_SHARE_BOUNDS",\n "MAY_SHARE_EXACT",\n "NEEDS_INIT",\n "NEEDS_PYAPI",\n "RAISE",\n "USE_GETITEM",\n "USE_SETITEM",\n "WRAP",\n "_flagdict",\n "from_dlpack",\n "_place",\n "_reconstruct",\n "_vec_string",\n "_monotonicity",\n "add_docstring",\n "arange",\n "array",\n "asarray",\n "asanyarray",\n "ascontiguousarray",\n "asfortranarray",\n "bincount",\n "broadcast",\n "busday_count",\n "busday_offset",\n "busdaycalendar",\n "can_cast",\n "compare_chararrays",\n "concatenate",\n "copyto",\n "correlate",\n "correlate2",\n "count_nonzero",\n "c_einsum",\n "datetime_as_string",\n "datetime_data",\n "dot",\n "dragon4_positional",\n "dragon4_scientific",\n "dtype",\n "empty",\n "empty_like",\n "error",\n "flagsobj",\n "flatiter",\n "format_longfloat",\n "frombuffer",\n "fromfile",\n "fromiter",\n "fromstring",\n "get_handler_name",\n "get_handler_version",\n "inner",\n "interp",\n "interp_complex",\n "is_busday",\n "lexsort",\n "matmul",\n "vecdot",\n "may_share_memory",\n "min_scalar_type",\n "ndarray",\n "nditer",\n "nested_iters",\n "normalize_axis_index",\n "packbits",\n "promote_types",\n "putmask",\n "ravel_multi_index",\n "result_type",\n "scalar",\n "set_datetimeparse_function",\n "set_typeDict",\n "shares_memory",\n "typeinfo",\n "unpackbits",\n "unravel_index",\n "vdot",\n "where",\n "zeros",\n]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any])\n_ArrayT_co = TypeVar(\n "_ArrayT_co",\n bound=ndarray[Any, Any],\n covariant=True,\n)\n_ReturnType = TypeVar("_ReturnType")\n_IDType = TypeVar("_IDType")\n_Nin = TypeVar("_Nin", bound=int)\n_Nout = TypeVar("_Nout", bound=int)\n\n_ShapeT = TypeVar("_ShapeT", bound=_Shape)\n_Array: TypeAlias = ndarray[_ShapeT, dtype[_ScalarT]]\n_Array1D: TypeAlias = ndarray[tuple[int], dtype[_ScalarT]]\n\n# Valid time units\n_UnitKind: TypeAlias = L[\n "Y",\n "M",\n "D",\n "h",\n "m",\n "s",\n "ms",\n "us", "μs",\n "ns",\n "ps",\n "fs",\n "as",\n]\n_RollKind: TypeAlias = L[ # `raise` is deliberately excluded\n "nat",\n "forward",\n "following",\n "backward",\n "preceding",\n "modifiedfollowing",\n "modifiedpreceding",\n]\n\n@type_check_only\nclass _SupportsArray(Protocol[_ArrayT_co]):\n def __array__(self, /) -> _ArrayT_co: ...\n\n@type_check_only\nclass _KwargsEmpty(TypedDict, total=False):\n device: L["cpu"] | None\n like: _SupportsArrayFunc | None\n\n@type_check_only\nclass _ConstructorEmpty(Protocol):\n # 1-D shape\n @overload\n def __call__(\n self,\n /,\n shape: SupportsIndex,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array1D[float64]: ...\n @overload\n def __call__(\n self,\n /,\n shape: SupportsIndex,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> ndarray[tuple[int], _DTypeT]: ...\n @overload\n def __call__(\n self,\n /,\n shape: SupportsIndex,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array1D[_ScalarT]: ...\n @overload\n def __call__(\n self,\n /,\n shape: SupportsIndex,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array1D[Any]: ...\n\n # known shape\n @overload\n def __call__(\n self,\n /,\n shape: _AnyShapeT,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array[_AnyShapeT, float64]: ...\n @overload\n def __call__(\n self,\n /,\n shape: _AnyShapeT,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> ndarray[_AnyShapeT, _DTypeT]: ...\n @overload\n def __call__(\n self,\n /,\n shape: _AnyShapeT,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array[_AnyShapeT, _ScalarT]: ...\n @overload\n def __call__(\n self,\n /,\n shape: _AnyShapeT,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> _Array[_AnyShapeT, Any]: ...\n\n # unknown shape\n @overload\n def __call__(\n self, /,\n shape: _ShapeLike,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> NDArray[float64]: ...\n @overload\n def __call__(\n self, /,\n shape: _ShapeLike,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> ndarray[Any, _DTypeT]: ...\n @overload\n def __call__(\n self, /,\n shape: _ShapeLike,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> NDArray[_ScalarT]: ...\n @overload\n def __call__(\n self,\n /,\n shape: _ShapeLike,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n ) -> NDArray[Any]: ...\n\n# using `Final` or `TypeAlias` will break stubtest\nerror = Exception\n\n# from ._multiarray_umath\nITEM_HASOBJECT: Final = 1\nLIST_PICKLE: Final = 2\nITEM_IS_POINTER: Final = 4\nNEEDS_INIT: Final = 8\nNEEDS_PYAPI: Final = 16\nUSE_GETITEM: Final = 32\nUSE_SETITEM: Final = 64\nDATETIMEUNITS: Final[CapsuleType]\n_ARRAY_API: Final[CapsuleType]\n_flagdict: Final[dict[str, int]]\n_monotonicity: Final[Callable[..., object]]\n_place: Final[Callable[..., object]]\n_reconstruct: Final[Callable[..., object]]\n_vec_string: Final[Callable[..., object]]\ncorrelate2: Final[Callable[..., object]]\ndragon4_positional: Final[Callable[..., object]]\ndragon4_scientific: Final[Callable[..., object]]\ninterp_complex: Final[Callable[..., object]]\nset_datetimeparse_function: Final[Callable[..., object]]\ndef get_handler_name(a: NDArray[Any] = ..., /) -> str | None: ...\ndef get_handler_version(a: NDArray[Any] = ..., /) -> int | None: ...\ndef format_longfloat(x: np.longdouble, precision: int) -> str: ...\ndef scalar(dtype: _DTypeT, object: bytes | object = ...) -> ndarray[tuple[()], _DTypeT]: ...\ndef set_typeDict(dict_: dict[str, np.dtype], /) -> None: ...\ntypeinfo: Final[dict[str, np.dtype[np.generic]]]\n\nALLOW_THREADS: Final[int] # 0 or 1 (system-specific)\nBUFSIZE: L[8192]\nCLIP: L[0]\nWRAP: L[1]\nRAISE: L[2]\nMAXDIMS: L[32]\nMAY_SHARE_BOUNDS: L[0]\nMAY_SHARE_EXACT: L[-1]\ntracemalloc_domain: L[389047]\n\nzeros: Final[_ConstructorEmpty]\nempty: Final[_ConstructorEmpty]\n\n@overload\ndef empty_like(\n prototype: _ArrayT,\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> _ArrayT: ...\n@overload\ndef empty_like(\n prototype: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef empty_like(\n prototype: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef empty_like(\n prototype: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef array(\n object: _ArrayT,\n dtype: None = ...,\n *,\n copy: bool | _CopyMode | None = ...,\n order: _OrderKACF = ...,\n subok: L[True],\n ndmin: int = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _ArrayT: ...\n@overload\ndef array(\n object: _SupportsArray[_ArrayT],\n dtype: None = ...,\n *,\n copy: bool | _CopyMode | None = ...,\n order: _OrderKACF = ...,\n subok: L[True],\n ndmin: L[0] = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _ArrayT: ...\n@overload\ndef array(\n object: _ArrayLike[_ScalarT],\n dtype: None = ...,\n *,\n copy: bool | _CopyMode | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n ndmin: int = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef array(\n object: Any,\n dtype: _DTypeLike[_ScalarT],\n *,\n copy: bool | _CopyMode | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n ndmin: int = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef array(\n object: Any,\n dtype: DTypeLike | None = ...,\n *,\n copy: bool | _CopyMode | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n ndmin: int = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n#\n@overload\ndef ravel_multi_index(\n multi_index: SupportsLenAndGetItem[_IntLike_co],\n dims: _ShapeLike,\n mode: _ModeKind | tuple[_ModeKind, ...] = "raise",\n order: _OrderCF = "C",\n) -> intp: ...\n@overload\ndef ravel_multi_index(\n multi_index: SupportsLenAndGetItem[_ArrayLikeInt_co],\n dims: _ShapeLike,\n mode: _ModeKind | tuple[_ModeKind, ...] = "raise",\n order: _OrderCF = "C",\n) -> NDArray[intp]: ...\n\n#\n@overload\ndef unravel_index(indices: _IntLike_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[intp, ...]: ...\n@overload\ndef unravel_index(indices: _ArrayLikeInt_co, shape: _ShapeLike, order: _OrderCF = "C") -> tuple[NDArray[intp], ...]: ...\n\n# NOTE: Allow any sequence of array-like objects\n@overload\ndef concatenate( # type: ignore[misc]\n arrays: _ArrayLike[_ScalarT],\n /,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n *,\n dtype: None = ...,\n casting: _CastingKind | None = ...\n) -> NDArray[_ScalarT]: ...\n@overload\n@overload\ndef concatenate( # type: ignore[misc]\n arrays: SupportsLenAndGetItem[ArrayLike],\n /,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n casting: _CastingKind | None = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef concatenate( # type: ignore[misc]\n arrays: SupportsLenAndGetItem[ArrayLike],\n /,\n axis: SupportsIndex | None = ...,\n out: None = ...,\n *,\n dtype: DTypeLike | None = None,\n casting: _CastingKind | None = ...\n) -> NDArray[Any]: ...\n@overload\ndef concatenate(\n arrays: SupportsLenAndGetItem[ArrayLike],\n /,\n axis: SupportsIndex | None = ...,\n out: _ArrayT = ...,\n *,\n dtype: DTypeLike = ...,\n casting: _CastingKind | None = ...\n) -> _ArrayT: ...\n\ndef inner(\n a: ArrayLike,\n b: ArrayLike,\n /,\n) -> Any: ...\n\n@overload\ndef where(\n condition: ArrayLike,\n /,\n) -> tuple[NDArray[intp], ...]: ...\n@overload\ndef where(\n condition: ArrayLike,\n x: ArrayLike,\n y: ArrayLike,\n /,\n) -> NDArray[Any]: ...\n\ndef lexsort(\n keys: ArrayLike,\n axis: SupportsIndex | None = ...,\n) -> Any: ...\n\ndef can_cast(\n from_: ArrayLike | DTypeLike,\n to: DTypeLike,\n casting: _CastingKind | None = ...,\n) -> bool: ...\n\ndef min_scalar_type(a: ArrayLike, /) -> dtype: ...\n\ndef result_type(*arrays_and_dtypes: ArrayLike | DTypeLike) -> dtype: ...\n\n@overload\ndef dot(a: ArrayLike, b: ArrayLike, out: None = ...) -> Any: ...\n@overload\ndef dot(a: ArrayLike, b: ArrayLike, out: _ArrayT) -> _ArrayT: ...\n\n@overload\ndef vdot(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co, /) -> np.bool: ... # type: ignore[misc]\n@overload\ndef vdot(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co, /) -> unsignedinteger: ... # type: ignore[misc]\n@overload\ndef vdot(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co, /) -> signedinteger: ... # type: ignore[misc]\n@overload\ndef vdot(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co, /) -> floating: ... # type: ignore[misc]\n@overload\ndef vdot(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co, /) -> complexfloating: ... # type: ignore[misc]\n@overload\ndef vdot(a: _ArrayLikeTD64_co, b: _ArrayLikeTD64_co, /) -> timedelta64: ...\n@overload\ndef vdot(a: _ArrayLikeObject_co, b: Any, /) -> Any: ...\n@overload\ndef vdot(a: Any, b: _ArrayLikeObject_co, /) -> Any: ...\n\ndef bincount(\n x: ArrayLike,\n /,\n weights: ArrayLike | None = ...,\n minlength: SupportsIndex = ...,\n) -> NDArray[intp]: ...\n\ndef copyto(\n dst: NDArray[Any],\n src: ArrayLike,\n casting: _CastingKind | None = ...,\n where: _ArrayLikeBool_co | None = ...,\n) -> None: ...\n\ndef putmask(\n a: NDArray[Any],\n /,\n mask: _ArrayLikeBool_co,\n values: ArrayLike,\n) -> None: ...\n\ndef packbits(\n a: _ArrayLikeInt_co,\n /,\n axis: SupportsIndex | None = ...,\n bitorder: L["big", "little"] = ...,\n) -> NDArray[uint8]: ...\n\ndef unpackbits(\n a: _ArrayLike[uint8],\n /,\n axis: SupportsIndex | None = ...,\n count: SupportsIndex | None = ...,\n bitorder: L["big", "little"] = ...,\n) -> NDArray[uint8]: ...\n\ndef shares_memory(\n a: object,\n b: object,\n /,\n max_work: int | None = ...,\n) -> bool: ...\n\ndef may_share_memory(\n a: object,\n b: object,\n /,\n max_work: int | None = ...,\n) -> bool: ...\n\n@overload\ndef asarray(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asarray(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asarray(\n a: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef asanyarray(\n a: _ArrayT, # Preserve subclass-information\n dtype: None = ...,\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _ArrayT: ...\n@overload\ndef asanyarray(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asanyarray(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asanyarray(\n a: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n *,\n device: L["cpu"] | None = ...,\n copy: bool | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef ascontiguousarray(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef ascontiguousarray(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef ascontiguousarray(\n a: Any,\n dtype: DTypeLike | None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef asfortranarray(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asfortranarray(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef asfortranarray(\n a: Any,\n dtype: DTypeLike | None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\ndef promote_types(__type1: DTypeLike, __type2: DTypeLike) -> dtype: ...\n\n# `sep` is a de facto mandatory argument, as its default value is deprecated\n@overload\ndef fromstring(\n string: str | bytes,\n dtype: None = ...,\n count: SupportsIndex = ...,\n *,\n sep: str,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef fromstring(\n string: str | bytes,\n dtype: _DTypeLike[_ScalarT],\n count: SupportsIndex = ...,\n *,\n sep: str,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef fromstring(\n string: str | bytes,\n dtype: DTypeLike | None = ...,\n count: SupportsIndex = ...,\n *,\n sep: str,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[[Any], _ReturnType], /,\n nin: L[1],\n nout: L[1],\n *,\n identity: None = ...,\n) -> _PyFunc_Nin1_Nout1[_ReturnType, None]: ...\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[[Any], _ReturnType], /,\n nin: L[1],\n nout: L[1],\n *,\n identity: _IDType,\n) -> _PyFunc_Nin1_Nout1[_ReturnType, _IDType]: ...\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[[Any, Any], _ReturnType], /,\n nin: L[2],\n nout: L[1],\n *,\n identity: None = ...,\n) -> _PyFunc_Nin2_Nout1[_ReturnType, None]: ...\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[[Any, Any], _ReturnType], /,\n nin: L[2],\n nout: L[1],\n *,\n identity: _IDType,\n) -> _PyFunc_Nin2_Nout1[_ReturnType, _IDType]: ...\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[..., _ReturnType], /,\n nin: _Nin,\n nout: L[1],\n *,\n identity: None = ...,\n) -> _PyFunc_Nin3P_Nout1[_ReturnType, None, _Nin]: ...\n@overload\ndef frompyfunc( # type: ignore[overload-overlap]\n func: Callable[..., _ReturnType], /,\n nin: _Nin,\n nout: L[1],\n *,\n identity: _IDType,\n) -> _PyFunc_Nin3P_Nout1[_ReturnType, _IDType, _Nin]: ...\n@overload\ndef frompyfunc(\n func: Callable[..., _2PTuple[_ReturnType]], /,\n nin: _Nin,\n nout: _Nout,\n *,\n identity: None = ...,\n) -> _PyFunc_Nin1P_Nout2P[_ReturnType, None, _Nin, _Nout]: ...\n@overload\ndef frompyfunc(\n func: Callable[..., _2PTuple[_ReturnType]], /,\n nin: _Nin,\n nout: _Nout,\n *,\n identity: _IDType,\n) -> _PyFunc_Nin1P_Nout2P[_ReturnType, _IDType, _Nin, _Nout]: ...\n@overload\ndef frompyfunc(\n func: Callable[..., Any], /,\n nin: SupportsIndex,\n nout: SupportsIndex,\n *,\n identity: object | None = ...,\n) -> ufunc: ...\n\n@overload\ndef fromfile(\n file: StrOrBytesPath | _SupportsFileMethods,\n dtype: None = ...,\n count: SupportsIndex = ...,\n sep: str = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef fromfile(\n file: StrOrBytesPath | _SupportsFileMethods,\n dtype: _DTypeLike[_ScalarT],\n count: SupportsIndex = ...,\n sep: str = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef fromfile(\n file: StrOrBytesPath | _SupportsFileMethods,\n dtype: DTypeLike | None = ...,\n count: SupportsIndex = ...,\n sep: str = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef fromiter(\n iter: Iterable[Any],\n dtype: _DTypeLike[_ScalarT],\n count: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef fromiter(\n iter: Iterable[Any],\n dtype: DTypeLike,\n count: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef frombuffer(\n buffer: _SupportsBuffer,\n dtype: None = ...,\n count: SupportsIndex = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef frombuffer(\n buffer: _SupportsBuffer,\n dtype: _DTypeLike[_ScalarT],\n count: SupportsIndex = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef frombuffer(\n buffer: _SupportsBuffer,\n dtype: DTypeLike | None = ...,\n count: SupportsIndex = ...,\n offset: SupportsIndex = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\n@overload\ndef arange( # type: ignore[misc]\n stop: _IntLike_co,\n /, *,\n dtype: None = ...,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[signedinteger]: ...\n@overload\ndef arange( # type: ignore[misc]\n start: _IntLike_co,\n stop: _IntLike_co,\n step: _IntLike_co = ...,\n dtype: None = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[signedinteger]: ...\n@overload\ndef arange( # type: ignore[misc]\n stop: _FloatLike_co,\n /, *,\n dtype: None = ...,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[floating]: ...\n@overload\ndef arange( # type: ignore[misc]\n start: _FloatLike_co,\n stop: _FloatLike_co,\n step: _FloatLike_co = ...,\n dtype: None = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[floating]: ...\n@overload\ndef arange(\n stop: _TD64Like_co,\n /, *,\n dtype: None = ...,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[timedelta64]: ...\n@overload\ndef arange(\n start: _TD64Like_co,\n stop: _TD64Like_co,\n step: _TD64Like_co = ...,\n dtype: None = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[timedelta64]: ...\n@overload\ndef arange( # both start and stop must always be specified for datetime64\n start: datetime64,\n stop: datetime64,\n step: datetime64 = ...,\n dtype: None = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[datetime64]: ...\n@overload\ndef arange(\n stop: Any,\n /, *,\n dtype: _DTypeLike[_ScalarT],\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[_ScalarT]: ...\n@overload\ndef arange(\n start: Any,\n stop: Any,\n step: Any = ...,\n dtype: _DTypeLike[_ScalarT] = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[_ScalarT]: ...\n@overload\ndef arange(\n stop: Any, /,\n *,\n dtype: DTypeLike | None = ...,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[Any]: ...\n@overload\ndef arange(\n start: Any,\n stop: Any,\n step: Any = ...,\n dtype: DTypeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n like: _SupportsArrayFunc | None = ...,\n) -> _Array1D[Any]: ...\n\ndef datetime_data(\n dtype: str | _DTypeLike[datetime64] | _DTypeLike[timedelta64], /,\n) -> tuple[str, int]: ...\n\n# The datetime functions perform unsafe casts to `datetime64[D]`,\n# so a lot of different argument types are allowed here\n\n@overload\ndef busday_count( # type: ignore[misc]\n begindates: _ScalarLike_co | dt.date,\n enddates: _ScalarLike_co | dt.date,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> int_: ...\n@overload\ndef busday_count( # type: ignore[misc]\n begindates: ArrayLike | dt.date | _NestedSequence[dt.date],\n enddates: ArrayLike | dt.date | _NestedSequence[dt.date],\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> NDArray[int_]: ...\n@overload\ndef busday_count(\n begindates: ArrayLike | dt.date | _NestedSequence[dt.date],\n enddates: ArrayLike | dt.date | _NestedSequence[dt.date],\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: _ArrayT = ...,\n) -> _ArrayT: ...\n\n# `roll="raise"` is (more or less?) equivalent to `casting="safe"`\n@overload\ndef busday_offset( # type: ignore[misc]\n dates: datetime64 | dt.date,\n offsets: _TD64Like_co | dt.timedelta,\n roll: L["raise"] = ...,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> datetime64: ...\n@overload\ndef busday_offset( # type: ignore[misc]\n dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],\n offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],\n roll: L["raise"] = ...,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> NDArray[datetime64]: ...\n@overload\ndef busday_offset( # type: ignore[misc]\n dates: _ArrayLike[datetime64] | dt.date | _NestedSequence[dt.date],\n offsets: _ArrayLikeTD64_co | dt.timedelta | _NestedSequence[dt.timedelta],\n roll: L["raise"] = ...,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: _ArrayT = ...,\n) -> _ArrayT: ...\n@overload\ndef busday_offset( # type: ignore[misc]\n dates: _ScalarLike_co | dt.date,\n offsets: _ScalarLike_co | dt.timedelta,\n roll: _RollKind,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> datetime64: ...\n@overload\ndef busday_offset( # type: ignore[misc]\n dates: ArrayLike | dt.date | _NestedSequence[dt.date],\n offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],\n roll: _RollKind,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> NDArray[datetime64]: ...\n@overload\ndef busday_offset(\n dates: ArrayLike | dt.date | _NestedSequence[dt.date],\n offsets: ArrayLike | dt.timedelta | _NestedSequence[dt.timedelta],\n roll: _RollKind,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: _ArrayT = ...,\n) -> _ArrayT: ...\n\n@overload\ndef is_busday( # type: ignore[misc]\n dates: _ScalarLike_co | dt.date,\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> np.bool: ...\n@overload\ndef is_busday( # type: ignore[misc]\n dates: ArrayLike | _NestedSequence[dt.date],\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef is_busday(\n dates: ArrayLike | _NestedSequence[dt.date],\n weekmask: ArrayLike = ...,\n holidays: ArrayLike | dt.date | _NestedSequence[dt.date] | None = ...,\n busdaycal: busdaycalendar | None = ...,\n out: _ArrayT = ...,\n) -> _ArrayT: ...\n\n@overload\ndef datetime_as_string( # type: ignore[misc]\n arr: datetime64 | dt.date,\n unit: L["auto"] | _UnitKind | None = ...,\n timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,\n casting: _CastingKind = ...,\n) -> str_: ...\n@overload\ndef datetime_as_string(\n arr: _ArrayLikeDT64_co | _NestedSequence[dt.date],\n unit: L["auto"] | _UnitKind | None = ...,\n timezone: L["naive", "UTC", "local"] | dt.tzinfo = ...,\n casting: _CastingKind = ...,\n) -> NDArray[str_]: ...\n\n@overload\ndef compare_chararrays(\n a1: _ArrayLikeStr_co,\n a2: _ArrayLikeStr_co,\n cmp: L["<", "<=", "==", ">=", ">", "!="],\n rstrip: bool,\n) -> NDArray[np.bool]: ...\n@overload\ndef compare_chararrays(\n a1: _ArrayLikeBytes_co,\n a2: _ArrayLikeBytes_co,\n cmp: L["<", "<=", "==", ">=", ">", "!="],\n rstrip: bool,\n) -> NDArray[np.bool]: ...\n\ndef add_docstring(obj: Callable[..., Any], docstring: str, /) -> None: ...\n\n_GetItemKeys: TypeAlias = L[\n "C", "CONTIGUOUS", "C_CONTIGUOUS",\n "F", "FORTRAN", "F_CONTIGUOUS",\n "W", "WRITEABLE",\n "B", "BEHAVED",\n "O", "OWNDATA",\n "A", "ALIGNED",\n "X", "WRITEBACKIFCOPY",\n "CA", "CARRAY",\n "FA", "FARRAY",\n "FNC",\n "FORC",\n]\n_SetItemKeys: TypeAlias = L[\n "A", "ALIGNED",\n "W", "WRITEABLE",\n "X", "WRITEBACKIFCOPY",\n]\n\n@final\nclass flagsobj:\n __hash__: ClassVar[None] # type: ignore[assignment]\n aligned: bool\n # NOTE: deprecated\n # updateifcopy: bool\n writeable: bool\n writebackifcopy: bool\n @property\n def behaved(self) -> bool: ...\n @property\n def c_contiguous(self) -> bool: ...\n @property\n def carray(self) -> bool: ...\n @property\n def contiguous(self) -> bool: ...\n @property\n def f_contiguous(self) -> bool: ...\n @property\n def farray(self) -> bool: ...\n @property\n def fnc(self) -> bool: ...\n @property\n def forc(self) -> bool: ...\n @property\n def fortran(self) -> bool: ...\n @property\n def num(self) -> int: ...\n @property\n def owndata(self) -> bool: ...\n def __getitem__(self, key: _GetItemKeys) -> bool: ...\n def __setitem__(self, key: _SetItemKeys, value: bool) -> None: ...\n\ndef nested_iters(\n op: ArrayLike | Sequence[ArrayLike],\n axes: Sequence[Sequence[SupportsIndex]],\n flags: Sequence[_NDIterFlagsKind] | None = ...,\n op_flags: Sequence[Sequence[_NDIterFlagsOp]] | None = ...,\n op_dtypes: DTypeLike | Sequence[DTypeLike] = ...,\n order: _OrderKACF = ...,\n casting: _CastingKind = ...,\n buffersize: SupportsIndex = ...,\n) -> tuple[nditer, ...]: ...\n
.venv\Lib\site-packages\numpy\_core\multiarray.pyi
multiarray.pyi
Other
33,435
0.95
0.109728
0.070033
vue-tools
803
2024-08-05T16:17:00.601272
Apache-2.0
false
8790de7634a5aed5bc78709b30732e08
import builtins\nimport functools\nimport itertools\nimport math\nimport numbers\nimport operator\nimport sys\nimport warnings\n\nimport numpy as np\nfrom numpy.exceptions import AxisError\n\nfrom . import multiarray, numerictypes, overrides, shape_base, umath\nfrom . import numerictypes as nt\nfrom ._ufunc_config import errstate\nfrom .multiarray import ( # noqa: F401\n ALLOW_THREADS,\n BUFSIZE,\n CLIP,\n MAXDIMS,\n MAY_SHARE_BOUNDS,\n MAY_SHARE_EXACT,\n RAISE,\n WRAP,\n arange,\n array,\n asanyarray,\n asarray,\n ascontiguousarray,\n asfortranarray,\n broadcast,\n can_cast,\n concatenate,\n copyto,\n dot,\n dtype,\n empty,\n empty_like,\n flatiter,\n from_dlpack,\n frombuffer,\n fromfile,\n fromiter,\n fromstring,\n inner,\n lexsort,\n matmul,\n may_share_memory,\n min_scalar_type,\n ndarray,\n nditer,\n nested_iters,\n normalize_axis_index,\n promote_types,\n putmask,\n result_type,\n shares_memory,\n vdot,\n vecdot,\n where,\n zeros,\n)\nfrom .overrides import finalize_array_function_like, set_module\nfrom .umath import NAN, PINF, invert, multiply, sin\n\nbitwise_not = invert\nufunc = type(sin)\nnewaxis = None\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\n__all__ = [\n 'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',\n 'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',\n 'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',\n 'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where',\n 'argwhere', 'copyto', 'concatenate', 'lexsort', 'astype',\n 'can_cast', 'promote_types', 'min_scalar_type',\n 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',\n 'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',\n 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',\n 'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',\n 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',\n 'identity', 'allclose', 'putmask',\n 'flatnonzero', 'inf', 'nan', 'False_', 'True_', 'bitwise_not',\n 'full', 'full_like', 'matmul', 'vecdot', 'shares_memory',\n 'may_share_memory']\n\n\ndef _zeros_like_dispatcher(\n a, dtype=None, order=None, subok=None, shape=None, *, device=None\n):\n return (a,)\n\n\n@array_function_dispatch(_zeros_like_dispatcher)\ndef zeros_like(\n a, dtype=None, order='K', subok=True, shape=None, *, device=None\n):\n """\n Return an array of zeros with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible.\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of `a`, otherwise it will be a base-class array. Defaults\n to True.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n Array of zeros with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0, 1, 2],\n [3, 4, 5]])\n >>> np.zeros_like(x)\n array([[0, 0, 0],\n [0, 0, 0]])\n\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.])\n >>> np.zeros_like(y)\n array([0., 0., 0.])\n\n """\n res = empty_like(\n a, dtype=dtype, order=order, subok=subok, shape=shape, device=device\n )\n # needed instead of a 0 to get same result as zeros for string dtypes\n z = zeros(1, dtype=res.dtype)\n multiarray.copyto(res, z, casting='unsafe')\n return res\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef ones(shape, dtype=None, order='C', *, device=None, like=None):\n """\n Return a new array of given shape and type, filled with ones.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n dtype : data-type, optional\n The desired data-type for the array, e.g., `numpy.int8`. Default is\n `numpy.float64`.\n order : {'C', 'F'}, optional, default: C\n Whether to store multi-dimensional data in row-major\n (C-style) or column-major (Fortran-style) order in\n memory.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n Array of ones with the given shape, dtype, and order.\n\n See Also\n --------\n ones_like : Return an array of ones with shape and type of input.\n empty : Return a new uninitialized array.\n zeros : Return a new array setting values to zero.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.ones(5)\n array([1., 1., 1., 1., 1.])\n\n >>> np.ones((5,), dtype=int)\n array([1, 1, 1, 1, 1])\n\n >>> np.ones((2, 1))\n array([[1.],\n [1.]])\n\n >>> s = (2,2)\n >>> np.ones(s)\n array([[1., 1.],\n [1., 1.]])\n\n """\n if like is not None:\n return _ones_with_like(\n like, shape, dtype=dtype, order=order, device=device\n )\n\n a = empty(shape, dtype, order, device=device)\n multiarray.copyto(a, 1, casting='unsafe')\n return a\n\n\n_ones_with_like = array_function_dispatch()(ones)\n\n\ndef _ones_like_dispatcher(\n a, dtype=None, order=None, subok=None, shape=None, *, device=None\n):\n return (a,)\n\n\n@array_function_dispatch(_ones_like_dispatcher)\ndef ones_like(\n a, dtype=None, order='K', subok=True, shape=None, *, device=None\n):\n """\n Return an array of ones with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible.\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of `a`, otherwise it will be a base-class array. Defaults\n to True.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n Array of ones with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full_like : Return a new array with shape of input filled with value.\n ones : Return a new array setting values to one.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6)\n >>> x = x.reshape((2, 3))\n >>> x\n array([[0, 1, 2],\n [3, 4, 5]])\n >>> np.ones_like(x)\n array([[1, 1, 1],\n [1, 1, 1]])\n\n >>> y = np.arange(3, dtype=float)\n >>> y\n array([0., 1., 2.])\n >>> np.ones_like(y)\n array([1., 1., 1.])\n\n """\n res = empty_like(\n a, dtype=dtype, order=order, subok=subok, shape=shape, device=device\n )\n multiarray.copyto(res, 1, casting='unsafe')\n return res\n\n\ndef _full_dispatcher(\n shape, fill_value, dtype=None, order=None, *, device=None, like=None\n):\n return (like,)\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef full(shape, fill_value, dtype=None, order='C', *, device=None, like=None):\n """\n Return a new array of given shape and type, filled with `fill_value`.\n\n Parameters\n ----------\n shape : int or sequence of ints\n Shape of the new array, e.g., ``(2, 3)`` or ``2``.\n fill_value : scalar or array_like\n Fill value.\n dtype : data-type, optional\n The desired data-type for the array The default, None, means\n ``np.array(fill_value).dtype``.\n order : {'C', 'F'}, optional\n Whether to store multidimensional data in C- or Fortran-contiguous\n (row- or column-wise) order in memory.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the given shape, dtype, and order.\n\n See Also\n --------\n full_like : Return a new array with shape of input filled with value.\n empty : Return a new uninitialized array.\n ones : Return a new array setting values to one.\n zeros : Return a new array setting values to zero.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.full((2, 2), np.inf)\n array([[inf, inf],\n [inf, inf]])\n >>> np.full((2, 2), 10)\n array([[10, 10],\n [10, 10]])\n\n >>> np.full((2, 2), [1, 2])\n array([[1, 2],\n [1, 2]])\n\n """\n if like is not None:\n return _full_with_like(\n like, shape, fill_value, dtype=dtype, order=order, device=device\n )\n\n if dtype is None:\n fill_value = asarray(fill_value)\n dtype = fill_value.dtype\n a = empty(shape, dtype, order, device=device)\n multiarray.copyto(a, fill_value, casting='unsafe')\n return a\n\n\n_full_with_like = array_function_dispatch()(full)\n\n\ndef _full_like_dispatcher(\n a, fill_value, dtype=None, order=None, subok=None, shape=None,\n *, device=None\n):\n return (a,)\n\n\n@array_function_dispatch(_full_like_dispatcher)\ndef full_like(\n a, fill_value, dtype=None, order='K', subok=True, shape=None,\n *, device=None\n):\n """\n Return a full array with the same shape and type as a given array.\n\n Parameters\n ----------\n a : array_like\n The shape and data-type of `a` define these same attributes of\n the returned array.\n fill_value : array_like\n Fill value.\n dtype : data-type, optional\n Overrides the data type of the result.\n order : {'C', 'F', 'A', or 'K'}, optional\n Overrides the memory layout of the result. 'C' means C-order,\n 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,\n 'C' otherwise. 'K' means match the layout of `a` as closely\n as possible.\n subok : bool, optional.\n If True, then the newly created array will use the sub-class\n type of `a`, otherwise it will be a base-class array. Defaults\n to True.\n shape : int or sequence of ints, optional.\n Overrides the shape of the result. If order='K' and the number of\n dimensions is unchanged, will try to keep order, otherwise,\n order='C' is implied.\n device : str, optional\n The device on which to place the created array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.0.0\n\n Returns\n -------\n out : ndarray\n Array of `fill_value` with the same shape and type as `a`.\n\n See Also\n --------\n empty_like : Return an empty array with shape and type of input.\n ones_like : Return an array of ones with shape and type of input.\n zeros_like : Return an array of zeros with shape and type of input.\n full : Return a new array of given shape filled with value.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6, dtype=int)\n >>> np.full_like(x, 1)\n array([1, 1, 1, 1, 1, 1])\n >>> np.full_like(x, 0.1)\n array([0, 0, 0, 0, 0, 0])\n >>> np.full_like(x, 0.1, dtype=np.double)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n >>> np.full_like(x, np.nan, dtype=np.double)\n array([nan, nan, nan, nan, nan, nan])\n\n >>> y = np.arange(6, dtype=np.double)\n >>> np.full_like(y, 0.1)\n array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])\n\n >>> y = np.zeros([2, 2, 3], dtype=int)\n >>> np.full_like(y, [0, 0, 255])\n array([[[ 0, 0, 255],\n [ 0, 0, 255]],\n [[ 0, 0, 255],\n [ 0, 0, 255]]])\n """\n res = empty_like(\n a, dtype=dtype, order=order, subok=subok, shape=shape, device=device\n )\n multiarray.copyto(res, fill_value, casting='unsafe')\n return res\n\n\ndef _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):\n return (a,)\n\n\n@array_function_dispatch(_count_nonzero_dispatcher)\ndef count_nonzero(a, axis=None, *, keepdims=False):\n """\n Counts the number of non-zero values in the array ``a``.\n\n The word "non-zero" is in reference to the Python 2.x\n built-in method ``__nonzero__()`` (renamed ``__bool__()``\n in Python 3.x) of Python objects that tests an object's\n "truthfulness". For example, any number is considered\n truthful if it is nonzero, whereas any string is considered\n truthful if it is not the empty string. Thus, this function\n (recursively) counts how many elements in ``a`` (and in\n sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``\n method evaluated to ``True``.\n\n Parameters\n ----------\n a : array_like\n The array for which to count non-zeros.\n axis : int or tuple, optional\n Axis or tuple of axes along which to count non-zeros.\n Default is None, meaning that non-zeros will be counted\n along a flattened version of ``a``.\n keepdims : bool, optional\n If this is set to True, the axes that are counted are left\n in the result as dimensions with size one. With this option,\n the result will broadcast correctly against the input array.\n\n Returns\n -------\n count : int or array of int\n Number of non-zero values in the array along a given axis.\n Otherwise, the total number of non-zero values in the array\n is returned.\n\n See Also\n --------\n nonzero : Return the coordinates of all the non-zero values.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.count_nonzero(np.eye(4))\n 4\n >>> a = np.array([[0, 1, 7, 0],\n ... [3, 0, 2, 19]])\n >>> np.count_nonzero(a)\n 5\n >>> np.count_nonzero(a, axis=0)\n array([1, 1, 2, 1])\n >>> np.count_nonzero(a, axis=1)\n array([2, 3])\n >>> np.count_nonzero(a, axis=1, keepdims=True)\n array([[2],\n [3]])\n """\n if axis is None and not keepdims:\n return multiarray.count_nonzero(a)\n\n a = asanyarray(a)\n\n # TODO: this works around .astype(bool) not working properly (gh-9847)\n if np.issubdtype(a.dtype, np.character):\n a_bool = a != a.dtype.type()\n else:\n a_bool = a.astype(np.bool, copy=False)\n\n return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)\n\n\n@set_module('numpy')\ndef isfortran(a):\n """\n Check if the array is Fortran contiguous but *not* C contiguous.\n\n This function is obsolete. If you only want to check if an array is Fortran\n contiguous use ``a.flags.f_contiguous`` instead.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n\n Returns\n -------\n isfortran : bool\n Returns True if the array is Fortran contiguous but *not* C contiguous.\n\n\n Examples\n --------\n\n np.array allows to specify whether the array is written in C-contiguous\n order (last index varies the fastest), or FORTRAN-contiguous order in\n memory (first index varies the fastest).\n\n >>> import numpy as np\n >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')\n >>> a\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> np.isfortran(a)\n False\n\n >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')\n >>> b\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> np.isfortran(b)\n True\n\n\n The transpose of a C-ordered array is a FORTRAN-ordered array.\n\n >>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')\n >>> a\n array([[1, 2, 3],\n [4, 5, 6]])\n >>> np.isfortran(a)\n False\n >>> b = a.T\n >>> b\n array([[1, 4],\n [2, 5],\n [3, 6]])\n >>> np.isfortran(b)\n True\n\n C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.\n\n >>> np.isfortran(np.array([1, 2], order='F'))\n False\n\n """\n return a.flags.fnc\n\n\ndef _argwhere_dispatcher(a):\n return (a,)\n\n\n@array_function_dispatch(_argwhere_dispatcher)\ndef argwhere(a):\n """\n Find the indices of array elements that are non-zero, grouped by element.\n\n Parameters\n ----------\n a : array_like\n Input data.\n\n Returns\n -------\n index_array : (N, a.ndim) ndarray\n Indices of elements that are non-zero. Indices are grouped by element.\n This array will have shape ``(N, a.ndim)`` where ``N`` is the number of\n non-zero items.\n\n See Also\n --------\n where, nonzero\n\n Notes\n -----\n ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,\n but produces a result of the correct shape for a 0D array.\n\n The output of ``argwhere`` is not suitable for indexing arrays.\n For this purpose use ``nonzero(a)`` instead.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6).reshape(2,3)\n >>> x\n array([[0, 1, 2],\n [3, 4, 5]])\n >>> np.argwhere(x>1)\n array([[0, 2],\n [1, 0],\n [1, 1],\n [1, 2]])\n\n """\n # nonzero does not behave well on 0d, so promote to 1d\n if np.ndim(a) == 0:\n a = shape_base.atleast_1d(a)\n # then remove the added dimension\n return argwhere(a)[:, :0]\n return transpose(nonzero(a))\n\n\ndef _flatnonzero_dispatcher(a):\n return (a,)\n\n\n@array_function_dispatch(_flatnonzero_dispatcher)\ndef flatnonzero(a):\n """\n Return indices that are non-zero in the flattened version of a.\n\n This is equivalent to ``np.nonzero(np.ravel(a))[0]``.\n\n Parameters\n ----------\n a : array_like\n Input data.\n\n Returns\n -------\n res : ndarray\n Output array, containing the indices of the elements of ``a.ravel()``\n that are non-zero.\n\n See Also\n --------\n nonzero : Return the indices of the non-zero elements of the input array.\n ravel : Return a 1-D array containing the elements of the input array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(-2, 3)\n >>> x\n array([-2, -1, 0, 1, 2])\n >>> np.flatnonzero(x)\n array([0, 1, 3, 4])\n\n Use the indices of the non-zero elements as an index array to extract\n these elements:\n\n >>> x.ravel()[np.flatnonzero(x)]\n array([-2, -1, 1, 2])\n\n """\n return np.nonzero(np.ravel(a))[0]\n\n\ndef _correlate_dispatcher(a, v, mode=None):\n return (a, v)\n\n\n@array_function_dispatch(_correlate_dispatcher)\ndef correlate(a, v, mode='valid'):\n r"""\n Cross-correlation of two 1-dimensional sequences.\n\n This function computes the correlation as generally defined in signal\n processing texts [1]_:\n\n .. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n\n\n with a and v sequences being zero-padded where necessary and\n :math:`\overline v` denoting complex conjugation.\n\n Parameters\n ----------\n a, v : array_like\n Input sequences.\n mode : {'valid', 'same', 'full'}, optional\n Refer to the `convolve` docstring. Note that the default\n is 'valid', unlike `convolve`, which uses 'full'.\n\n Returns\n -------\n out : ndarray\n Discrete cross-correlation of `a` and `v`.\n\n See Also\n --------\n convolve : Discrete, linear convolution of two one-dimensional sequences.\n scipy.signal.correlate : uses FFT which has superior performance\n on large arrays.\n\n Notes\n -----\n The definition of correlation above is not unique and sometimes\n correlation may be defined differently. Another common definition is [1]_:\n\n .. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}\n\n which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.\n\n `numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5)\n because it does not use the FFT to compute the convolution; in that case,\n `scipy.signal.correlate` might be preferable.\n\n References\n ----------\n .. [1] Wikipedia, "Cross-correlation",\n https://en.wikipedia.org/wiki/Cross-correlation\n\n Examples\n --------\n >>> import numpy as np\n >>> np.correlate([1, 2, 3], [0, 1, 0.5])\n array([3.5])\n >>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")\n array([2. , 3.5, 3. ])\n >>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")\n array([0.5, 2. , 3.5, 3. , 0. ])\n\n Using complex sequences:\n\n >>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')\n array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])\n\n Note that you get the time reversed, complex conjugated result\n (:math:`\overline{c_{-k}}`) when the two input sequences a and v change\n places:\n\n >>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')\n array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])\n\n """\n return multiarray.correlate2(a, v, mode)\n\n\ndef _convolve_dispatcher(a, v, mode=None):\n return (a, v)\n\n\n@array_function_dispatch(_convolve_dispatcher)\ndef convolve(a, v, mode='full'):\n """\n Returns the discrete, linear convolution of two one-dimensional sequences.\n\n The convolution operator is often seen in signal processing, where it\n models the effect of a linear time-invariant system on a signal [1]_. In\n probability theory, the sum of two independent random variables is\n distributed according to the convolution of their individual\n distributions.\n\n If `v` is longer than `a`, the arrays are swapped before computation.\n\n Parameters\n ----------\n a : (N,) array_like\n First one-dimensional input array.\n v : (M,) array_like\n Second one-dimensional input array.\n mode : {'full', 'valid', 'same'}, optional\n 'full':\n By default, mode is 'full'. This returns the convolution\n at each point of overlap, with an output shape of (N+M-1,). At\n the end-points of the convolution, the signals do not overlap\n completely, and boundary effects may be seen.\n\n 'same':\n Mode 'same' returns output of length ``max(M, N)``. Boundary\n effects are still visible.\n\n 'valid':\n Mode 'valid' returns output of length\n ``max(M, N) - min(M, N) + 1``. The convolution product is only given\n for points where the signals overlap completely. Values outside\n the signal boundary have no effect.\n\n Returns\n -------\n out : ndarray\n Discrete, linear convolution of `a` and `v`.\n\n See Also\n --------\n scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier\n Transform.\n scipy.linalg.toeplitz : Used to construct the convolution operator.\n polymul : Polynomial multiplication. Same output as convolve, but also\n accepts poly1d objects as input.\n\n Notes\n -----\n The discrete convolution operation is defined as\n\n .. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m}\n\n It can be shown that a convolution :math:`x(t) * y(t)` in time/space\n is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier\n domain, after appropriate padding (padding is necessary to prevent\n circular convolution). Since multiplication is more efficient (faster)\n than convolution, the function `scipy.signal.fftconvolve` exploits the\n FFT to calculate the convolution of large data-sets.\n\n References\n ----------\n .. [1] Wikipedia, "Convolution",\n https://en.wikipedia.org/wiki/Convolution\n\n Examples\n --------\n Note how the convolution operator flips the second array\n before "sliding" the two across one another:\n\n >>> import numpy as np\n >>> np.convolve([1, 2, 3], [0, 1, 0.5])\n array([0. , 1. , 2.5, 4. , 1.5])\n\n Only return the middle values of the convolution.\n Contains boundary effects, where zeros are taken\n into account:\n\n >>> np.convolve([1,2,3],[0,1,0.5], 'same')\n array([1. , 2.5, 4. ])\n\n The two arrays are of the same length, so there\n is only one position where they completely overlap:\n\n >>> np.convolve([1,2,3],[0,1,0.5], 'valid')\n array([2.5])\n\n """\n a, v = array(a, copy=None, ndmin=1), array(v, copy=None, ndmin=1)\n if (len(v) > len(a)):\n a, v = v, a\n if len(a) == 0:\n raise ValueError('a cannot be empty')\n if len(v) == 0:\n raise ValueError('v cannot be empty')\n return multiarray.correlate(a, v[::-1], mode)\n\n\ndef _outer_dispatcher(a, b, out=None):\n return (a, b, out)\n\n\n@array_function_dispatch(_outer_dispatcher)\ndef outer(a, b, out=None):\n """\n Compute the outer product of two vectors.\n\n Given two vectors `a` and `b` of length ``M`` and ``N``, respectively,\n the outer product [1]_ is::\n\n [[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ]\n [a_1*b_0 .\n [ ... .\n [a_{M-1}*b_0 a_{M-1}*b_{N-1} ]]\n\n Parameters\n ----------\n a : (M,) array_like\n First input vector. Input is flattened if\n not already 1-dimensional.\n b : (N,) array_like\n Second input vector. Input is flattened if\n not already 1-dimensional.\n out : (M, N) ndarray, optional\n A location where the result is stored\n\n Returns\n -------\n out : (M, N) ndarray\n ``out[i, j] = a[i] * b[j]``\n\n See also\n --------\n inner\n einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.\n ufunc.outer : A generalization to dimensions other than 1D and other\n operations. ``np.multiply.outer(a.ravel(), b.ravel())``\n is the equivalent.\n linalg.outer : An Array API compatible variation of ``np.outer``,\n which accepts 1-dimensional inputs only.\n tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``\n is the equivalent.\n\n References\n ----------\n .. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd\n ed., Baltimore, MD, Johns Hopkins University Press, 1996,\n pg. 8.\n\n Examples\n --------\n Make a (*very* coarse) grid for computing a Mandelbrot set:\n\n >>> import numpy as np\n >>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))\n >>> rl\n array([[-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.],\n [-2., -1., 0., 1., 2.]])\n >>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))\n >>> im\n array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],\n [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],\n [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],\n [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],\n [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])\n >>> grid = rl + im\n >>> grid\n array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],\n [-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],\n [-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],\n [-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],\n [-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])\n\n An example using a "vector" of letters:\n\n >>> x = np.array(['a', 'b', 'c'], dtype=object)\n >>> np.outer(x, [1, 2, 3])\n array([['a', 'aa', 'aaa'],\n ['b', 'bb', 'bbb'],\n ['c', 'cc', 'ccc']], dtype=object)\n\n """\n a = asarray(a)\n b = asarray(b)\n return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)\n\n\ndef _tensordot_dispatcher(a, b, axes=None):\n return (a, b)\n\n\n@array_function_dispatch(_tensordot_dispatcher)\ndef tensordot(a, b, axes=2):\n """\n Compute tensor dot product along specified axes.\n\n Given two tensors, `a` and `b`, and an array_like object containing\n two array_like objects, ``(a_axes, b_axes)``, sum the products of\n `a`'s and `b`'s elements (components) over the axes specified by\n ``a_axes`` and ``b_axes``. The third argument can be a single non-negative\n integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions\n of `a` and the first ``N`` dimensions of `b` are summed over.\n\n Parameters\n ----------\n a, b : array_like\n Tensors to "dot".\n\n axes : int or (2,) array_like\n * integer_like\n If an int N, sum over the last N axes of `a` and the first N axes\n of `b` in order. The sizes of the corresponding axes must match.\n * (2,) array_like\n Or, a list of axes to be summed over, first sequence applying to `a`,\n second to `b`. Both elements array_like must be of the same length.\n\n Returns\n -------\n output : ndarray\n The tensor dot product of the input.\n\n See Also\n --------\n dot, einsum\n\n Notes\n -----\n Three common use cases are:\n * ``axes = 0`` : tensor product :math:`a\\otimes b`\n * ``axes = 1`` : tensor dot product :math:`a\\cdot b`\n * ``axes = 2`` : (default) tensor double contraction :math:`a:b`\n\n When `axes` is integer_like, the sequence of axes for evaluation\n will be: from the -Nth axis to the -1th axis in `a`,\n and from the 0th axis to (N-1)th axis in `b`.\n For example, ``axes = 2`` is the equal to\n ``axes = [[-2, -1], [0, 1]]``.\n When N-1 is smaller than 0, or when -N is larger than -1,\n the element of `a` and `b` are defined as the `axes`.\n\n When there is more than one axis to sum over - and they are not the last\n (first) axes of `a` (`b`) - the argument `axes` should consist of\n two sequences of the same length, with the first axis to sum over given\n first in both sequences, the second axis second, and so forth.\n The calculation can be referred to ``numpy.einsum``.\n\n The shape of the result consists of the non-contracted axes of the\n first tensor, followed by the non-contracted axes of the second.\n\n Examples\n --------\n An example on integer_like:\n\n >>> a_0 = np.array([[1, 2], [3, 4]])\n >>> b_0 = np.array([[5, 6], [7, 8]])\n >>> c_0 = np.tensordot(a_0, b_0, axes=0)\n >>> c_0.shape\n (2, 2, 2, 2)\n >>> c_0\n array([[[[ 5, 6],\n [ 7, 8]],\n [[10, 12],\n [14, 16]]],\n [[[15, 18],\n [21, 24]],\n [[20, 24],\n [28, 32]]]])\n\n An example on array_like:\n\n >>> a = np.arange(60.).reshape(3,4,5)\n >>> b = np.arange(24.).reshape(4,3,2)\n >>> c = np.tensordot(a,b, axes=([1,0],[0,1]))\n >>> c.shape\n (5, 2)\n >>> c\n array([[4400., 4730.],\n [4532., 4874.],\n [4664., 5018.],\n [4796., 5162.],\n [4928., 5306.]])\n\n A slower but equivalent way of computing the same...\n\n >>> d = np.zeros((5,2))\n >>> for i in range(5):\n ... for j in range(2):\n ... for k in range(3):\n ... for n in range(4):\n ... d[i,j] += a[k,n,i] * b[n,k,j]\n >>> c == d\n array([[ True, True],\n [ True, True],\n [ True, True],\n [ True, True],\n [ True, True]])\n\n An extended example taking advantage of the overloading of + and \\*:\n\n >>> a = np.array(range(1, 9))\n >>> a.shape = (2, 2, 2)\n >>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)\n >>> A.shape = (2, 2)\n >>> a; A\n array([[[1, 2],\n [3, 4]],\n [[5, 6],\n [7, 8]]])\n array([['a', 'b'],\n ['c', 'd']], dtype=object)\n\n >>> np.tensordot(a, A) # third argument default is 2 for double-contraction\n array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)\n\n >>> np.tensordot(a, A, 1)\n array([[['acc', 'bdd'],\n ['aaacccc', 'bbbdddd']],\n [['aaaaacccccc', 'bbbbbdddddd'],\n ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)\n\n >>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)\n array([[[[['a', 'b'],\n ['c', 'd']],\n ...\n\n >>> np.tensordot(a, A, (0, 1))\n array([[['abbbbb', 'cddddd'],\n ['aabbbbbb', 'ccdddddd']],\n [['aaabbbbbbb', 'cccddddddd'],\n ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)\n\n >>> np.tensordot(a, A, (2, 1))\n array([[['abb', 'cdd'],\n ['aaabbbb', 'cccdddd']],\n [['aaaaabbbbbb', 'cccccdddddd'],\n ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)\n\n >>> np.tensordot(a, A, ((0, 1), (0, 1)))\n array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)\n\n >>> np.tensordot(a, A, ((2, 1), (1, 0)))\n array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)\n\n """\n try:\n iter(axes)\n except Exception:\n axes_a = list(range(-axes, 0))\n axes_b = list(range(axes))\n else:\n axes_a, axes_b = axes\n try:\n na = len(axes_a)\n axes_a = list(axes_a)\n except TypeError:\n axes_a = [axes_a]\n na = 1\n try:\n nb = len(axes_b)\n axes_b = list(axes_b)\n except TypeError:\n axes_b = [axes_b]\n nb = 1\n\n a, b = asarray(a), asarray(b)\n as_ = a.shape\n nda = a.ndim\n bs = b.shape\n ndb = b.ndim\n equal = True\n if na != nb:\n equal = False\n else:\n for k in range(na):\n if as_[axes_a[k]] != bs[axes_b[k]]:\n equal = False\n break\n if axes_a[k] < 0:\n axes_a[k] += nda\n if axes_b[k] < 0:\n axes_b[k] += ndb\n if not equal:\n raise ValueError("shape-mismatch for sum")\n\n # Move the axes to sum over to the end of "a"\n # and to the front of "b"\n notin = [k for k in range(nda) if k not in axes_a]\n newaxes_a = notin + axes_a\n N2 = math.prod(as_[axis] for axis in axes_a)\n newshape_a = (math.prod(as_[ax] for ax in notin), N2)\n olda = [as_[axis] for axis in notin]\n\n notin = [k for k in range(ndb) if k not in axes_b]\n newaxes_b = axes_b + notin\n N2 = math.prod(bs[axis] for axis in axes_b)\n newshape_b = (N2, math.prod(bs[ax] for ax in notin))\n oldb = [bs[axis] for axis in notin]\n\n at = a.transpose(newaxes_a).reshape(newshape_a)\n bt = b.transpose(newaxes_b).reshape(newshape_b)\n res = dot(at, bt)\n return res.reshape(olda + oldb)\n\n\ndef _roll_dispatcher(a, shift, axis=None):\n return (a,)\n\n\n@array_function_dispatch(_roll_dispatcher)\ndef roll(a, shift, axis=None):\n """\n Roll array elements along a given axis.\n\n Elements that roll beyond the last position are re-introduced at\n the first.\n\n Parameters\n ----------\n a : array_like\n Input array.\n shift : int or tuple of ints\n The number of places by which elements are shifted. If a tuple,\n then `axis` must be a tuple of the same size, and each of the\n given axes is shifted by the corresponding number. If an int\n while `axis` is a tuple of ints, then the same value is used for\n all given axes.\n axis : int or tuple of ints, optional\n Axis or axes along which elements are shifted. By default, the\n array is flattened before shifting, after which the original\n shape is restored.\n\n Returns\n -------\n res : ndarray\n Output array, with the same shape as `a`.\n\n See Also\n --------\n rollaxis : Roll the specified axis backwards, until it lies in a\n given position.\n\n Notes\n -----\n Supports rolling over multiple dimensions simultaneously.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(10)\n >>> np.roll(x, 2)\n array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])\n >>> np.roll(x, -2)\n array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])\n\n >>> x2 = np.reshape(x, (2, 5))\n >>> x2\n array([[0, 1, 2, 3, 4],\n [5, 6, 7, 8, 9]])\n >>> np.roll(x2, 1)\n array([[9, 0, 1, 2, 3],\n [4, 5, 6, 7, 8]])\n >>> np.roll(x2, -1)\n array([[1, 2, 3, 4, 5],\n [6, 7, 8, 9, 0]])\n >>> np.roll(x2, 1, axis=0)\n array([[5, 6, 7, 8, 9],\n [0, 1, 2, 3, 4]])\n >>> np.roll(x2, -1, axis=0)\n array([[5, 6, 7, 8, 9],\n [0, 1, 2, 3, 4]])\n >>> np.roll(x2, 1, axis=1)\n array([[4, 0, 1, 2, 3],\n [9, 5, 6, 7, 8]])\n >>> np.roll(x2, -1, axis=1)\n array([[1, 2, 3, 4, 0],\n [6, 7, 8, 9, 5]])\n >>> np.roll(x2, (1, 1), axis=(1, 0))\n array([[9, 5, 6, 7, 8],\n [4, 0, 1, 2, 3]])\n >>> np.roll(x2, (2, 1), axis=(1, 0))\n array([[8, 9, 5, 6, 7],\n [3, 4, 0, 1, 2]])\n\n """\n a = asanyarray(a)\n if axis is None:\n return roll(a.ravel(), shift, 0).reshape(a.shape)\n\n else:\n axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)\n broadcasted = broadcast(shift, axis)\n if broadcasted.ndim > 1:\n raise ValueError(\n "'shift' and 'axis' should be scalars or 1D sequences")\n shifts = dict.fromkeys(range(a.ndim), 0)\n for sh, ax in broadcasted:\n shifts[ax] += int(sh)\n\n rolls = [((slice(None), slice(None)),)] * a.ndim\n for ax, offset in shifts.items():\n offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.\n if offset:\n # (original, result), (original, result)\n rolls[ax] = ((slice(None, -offset), slice(offset, None)),\n (slice(-offset, None), slice(None, offset)))\n\n result = empty_like(a)\n for indices in itertools.product(*rolls):\n arr_index, res_index = zip(*indices)\n result[res_index] = a[arr_index]\n\n return result\n\n\ndef _rollaxis_dispatcher(a, axis, start=None):\n return (a,)\n\n\n@array_function_dispatch(_rollaxis_dispatcher)\ndef rollaxis(a, axis, start=0):\n """\n Roll the specified axis backwards, until it lies in a given position.\n\n This function continues to be supported for backward compatibility, but you\n should prefer `moveaxis`. The `moveaxis` function was added in NumPy\n 1.11.\n\n Parameters\n ----------\n a : ndarray\n Input array.\n axis : int\n The axis to be rolled. The positions of the other axes do not\n change relative to one another.\n start : int, optional\n When ``start <= axis``, the axis is rolled back until it lies in\n this position. When ``start > axis``, the axis is rolled until it\n lies before this position. The default, 0, results in a "complete"\n roll. The following table describes how negative values of ``start``\n are interpreted:\n\n .. table::\n :align: left\n\n +-------------------+----------------------+\n | ``start`` | Normalized ``start`` |\n +===================+======================+\n | ``-(arr.ndim+1)`` | raise ``AxisError`` |\n +-------------------+----------------------+\n | ``-arr.ndim`` | 0 |\n +-------------------+----------------------+\n | |vdots| | |vdots| |\n +-------------------+----------------------+\n | ``-1`` | ``arr.ndim-1`` |\n +-------------------+----------------------+\n | ``0`` | ``0`` |\n +-------------------+----------------------+\n | |vdots| | |vdots| |\n +-------------------+----------------------+\n | ``arr.ndim`` | ``arr.ndim`` |\n +-------------------+----------------------+\n | ``arr.ndim + 1`` | raise ``AxisError`` |\n +-------------------+----------------------+\n\n .. |vdots| unicode:: U+22EE .. Vertical Ellipsis\n\n Returns\n -------\n res : ndarray\n For NumPy >= 1.10.0 a view of `a` is always returned. For earlier\n NumPy versions a view of `a` is returned only if the order of the\n axes is changed, otherwise the input array is returned.\n\n See Also\n --------\n moveaxis : Move array axes to new positions.\n roll : Roll the elements of an array by a number of positions along a\n given axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.ones((3,4,5,6))\n >>> np.rollaxis(a, 3, 1).shape\n (3, 6, 4, 5)\n >>> np.rollaxis(a, 2).shape\n (5, 3, 4, 6)\n >>> np.rollaxis(a, 1, 4).shape\n (3, 5, 6, 4)\n\n """\n n = a.ndim\n axis = normalize_axis_index(axis, n)\n if start < 0:\n start += n\n msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"\n if not (0 <= start < n + 1):\n raise AxisError(msg % ('start', -n, 'start', n + 1, start))\n if axis < start:\n # it's been removed\n start -= 1\n if axis == start:\n return a[...]\n axes = list(range(n))\n axes.remove(axis)\n axes.insert(start, axis)\n return a.transpose(axes)\n\n\n@set_module("numpy.lib.array_utils")\ndef normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):\n """\n Normalizes an axis argument into a tuple of non-negative integer axes.\n\n This handles shorthands such as ``1`` and converts them to ``(1,)``,\n as well as performing the handling of negative indices covered by\n `normalize_axis_index`.\n\n By default, this forbids axes from being specified multiple times.\n\n Used internally by multi-axis-checking logic.\n\n Parameters\n ----------\n axis : int, iterable of int\n The un-normalized index or indices of the axis.\n ndim : int\n The number of dimensions of the array that `axis` should be normalized\n against.\n argname : str, optional\n A prefix to put before the error message, typically the name of the\n argument.\n allow_duplicate : bool, optional\n If False, the default, disallow an axis from being specified twice.\n\n Returns\n -------\n normalized_axes : tuple of int\n The normalized axis index, such that `0 <= normalized_axis < ndim`\n\n Raises\n ------\n AxisError\n If any axis provided is out of range\n ValueError\n If an axis is repeated\n\n See also\n --------\n normalize_axis_index : normalizing a single scalar axis\n """\n # Optimization to speed-up the most common cases.\n if not isinstance(axis, (tuple, list)):\n try:\n axis = [operator.index(axis)]\n except TypeError:\n pass\n # Going via an iterator directly is slower than via list comprehension.\n axis = tuple(normalize_axis_index(ax, ndim, argname) for ax in axis)\n if not allow_duplicate and len(set(axis)) != len(axis):\n if argname:\n raise ValueError(f'repeated axis in `{argname}` argument')\n else:\n raise ValueError('repeated axis')\n return axis\n\n\ndef _moveaxis_dispatcher(a, source, destination):\n return (a,)\n\n\n@array_function_dispatch(_moveaxis_dispatcher)\ndef moveaxis(a, source, destination):\n """\n Move axes of an array to new positions.\n\n Other axes remain in their original order.\n\n Parameters\n ----------\n a : np.ndarray\n The array whose axes should be reordered.\n source : int or sequence of int\n Original positions of the axes to move. These must be unique.\n destination : int or sequence of int\n Destination positions for each of the original axes. These must also be\n unique.\n\n Returns\n -------\n result : np.ndarray\n Array with moved axes. This array is a view of the input array.\n\n See Also\n --------\n transpose : Permute the dimensions of an array.\n swapaxes : Interchange two axes of an array.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.zeros((3, 4, 5))\n >>> np.moveaxis(x, 0, -1).shape\n (4, 5, 3)\n >>> np.moveaxis(x, -1, 0).shape\n (5, 3, 4)\n\n These all achieve the same result:\n\n >>> np.transpose(x).shape\n (5, 4, 3)\n >>> np.swapaxes(x, 0, -1).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1], [-1, -2]).shape\n (5, 4, 3)\n >>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape\n (5, 4, 3)\n\n """\n try:\n # allow duck-array types if they define transpose\n transpose = a.transpose\n except AttributeError:\n a = asarray(a)\n transpose = a.transpose\n\n source = normalize_axis_tuple(source, a.ndim, 'source')\n destination = normalize_axis_tuple(destination, a.ndim, 'destination')\n if len(source) != len(destination):\n raise ValueError('`source` and `destination` arguments must have '\n 'the same number of elements')\n\n order = [n for n in range(a.ndim) if n not in source]\n\n for dest, src in sorted(zip(destination, source)):\n order.insert(dest, src)\n\n result = transpose(order)\n return result\n\n\ndef _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):\n return (a, b)\n\n\n@array_function_dispatch(_cross_dispatcher)\ndef cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):\n """\n Return the cross product of two (arrays of) vectors.\n\n The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular\n to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors\n are defined by the last axis of `a` and `b` by default, and these axes\n can have dimensions 2 or 3. Where the dimension of either `a` or `b` is\n 2, the third component of the input vector is assumed to be zero and the\n cross product calculated accordingly. In cases where both input vectors\n have dimension 2, the z-component of the cross product is returned.\n\n Parameters\n ----------\n a : array_like\n Components of the first vector(s).\n b : array_like\n Components of the second vector(s).\n axisa : int, optional\n Axis of `a` that defines the vector(s). By default, the last axis.\n axisb : int, optional\n Axis of `b` that defines the vector(s). By default, the last axis.\n axisc : int, optional\n Axis of `c` containing the cross product vector(s). Ignored if\n both input vectors have dimension 2, as the return is scalar.\n By default, the last axis.\n axis : int, optional\n If defined, the axis of `a`, `b` and `c` that defines the vector(s)\n and cross product(s). Overrides `axisa`, `axisb` and `axisc`.\n\n Returns\n -------\n c : ndarray\n Vector cross product(s).\n\n Raises\n ------\n ValueError\n When the dimension of the vector(s) in `a` and/or `b` does not\n equal 2 or 3.\n\n See Also\n --------\n inner : Inner product\n outer : Outer product.\n linalg.cross : An Array API compatible variation of ``np.cross``,\n which accepts (arrays of) 3-element vectors only.\n ix_ : Construct index arrays.\n\n Notes\n -----\n Supports full broadcasting of the inputs.\n\n Dimension-2 input arrays were deprecated in 2.0.0. If you do need this\n functionality, you can use::\n\n def cross2d(x, y):\n return x[..., 0] * y[..., 1] - x[..., 1] * y[..., 0]\n\n Examples\n --------\n Vector cross-product.\n\n >>> import numpy as np\n >>> x = [1, 2, 3]\n >>> y = [4, 5, 6]\n >>> np.cross(x, y)\n array([-3, 6, -3])\n\n One vector with dimension 2.\n\n >>> x = [1, 2]\n >>> y = [4, 5, 6]\n >>> np.cross(x, y)\n array([12, -6, -3])\n\n Equivalently:\n\n >>> x = [1, 2, 0]\n >>> y = [4, 5, 6]\n >>> np.cross(x, y)\n array([12, -6, -3])\n\n Both vectors with dimension 2.\n\n >>> x = [1,2]\n >>> y = [4,5]\n >>> np.cross(x, y)\n array(-3)\n\n Multiple vector cross-products. Note that the direction of the cross\n product vector is defined by the *right-hand rule*.\n\n >>> x = np.array([[1,2,3], [4,5,6]])\n >>> y = np.array([[4,5,6], [1,2,3]])\n >>> np.cross(x, y)\n array([[-3, 6, -3],\n [ 3, -6, 3]])\n\n The orientation of `c` can be changed using the `axisc` keyword.\n\n >>> np.cross(x, y, axisc=0)\n array([[-3, 3],\n [ 6, -6],\n [-3, 3]])\n\n Change the vector definition of `x` and `y` using `axisa` and `axisb`.\n\n >>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])\n >>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])\n >>> np.cross(x, y)\n array([[ -6, 12, -6],\n [ 0, 0, 0],\n [ 6, -12, 6]])\n >>> np.cross(x, y, axisa=0, axisb=0)\n array([[-24, 48, -24],\n [-30, 60, -30],\n [-36, 72, -36]])\n\n """\n if axis is not None:\n axisa, axisb, axisc = (axis,) * 3\n a = asarray(a)\n b = asarray(b)\n\n if (a.ndim < 1) or (b.ndim < 1):\n raise ValueError("At least one array has zero dimension")\n\n # Check axisa and axisb are within bounds\n axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')\n axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')\n\n # Move working axis to the end of the shape\n a = moveaxis(a, axisa, -1)\n b = moveaxis(b, axisb, -1)\n msg = ("incompatible dimensions for cross product\n"\n "(dimension must be 2 or 3)")\n if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):\n raise ValueError(msg)\n if a.shape[-1] == 2 or b.shape[-1] == 2:\n # Deprecated in NumPy 2.0, 2023-09-26\n warnings.warn(\n "Arrays of 2-dimensional vectors are deprecated. Use arrays of "\n "3-dimensional vectors instead. (deprecated in NumPy 2.0)",\n DeprecationWarning, stacklevel=2\n )\n\n # Create the output array\n shape = broadcast(a[..., 0], b[..., 0]).shape\n if a.shape[-1] == 3 or b.shape[-1] == 3:\n shape += (3,)\n # Check axisc is within bounds\n axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')\n dtype = promote_types(a.dtype, b.dtype)\n cp = empty(shape, dtype)\n\n # recast arrays as dtype\n a = a.astype(dtype)\n b = b.astype(dtype)\n\n # create local aliases for readability\n a0 = a[..., 0]\n a1 = a[..., 1]\n if a.shape[-1] == 3:\n a2 = a[..., 2]\n b0 = b[..., 0]\n b1 = b[..., 1]\n if b.shape[-1] == 3:\n b2 = b[..., 2]\n if cp.ndim != 0 and cp.shape[-1] == 3:\n cp0 = cp[..., 0]\n cp1 = cp[..., 1]\n cp2 = cp[..., 2]\n\n if a.shape[-1] == 2:\n if b.shape[-1] == 2:\n # a0 * b1 - a1 * b0\n multiply(a0, b1, out=cp)\n cp -= a1 * b0\n return cp\n else:\n assert b.shape[-1] == 3\n # cp0 = a1 * b2 - 0 (a2 = 0)\n # cp1 = 0 - a0 * b2 (a2 = 0)\n # cp2 = a0 * b1 - a1 * b0\n multiply(a1, b2, out=cp0)\n multiply(a0, b2, out=cp1)\n negative(cp1, out=cp1)\n multiply(a0, b1, out=cp2)\n cp2 -= a1 * b0\n else:\n assert a.shape[-1] == 3\n if b.shape[-1] == 3:\n # cp0 = a1 * b2 - a2 * b1\n # cp1 = a2 * b0 - a0 * b2\n # cp2 = a0 * b1 - a1 * b0\n multiply(a1, b2, out=cp0)\n tmp = np.multiply(a2, b1, out=...)\n cp0 -= tmp\n multiply(a2, b0, out=cp1)\n multiply(a0, b2, out=tmp)\n cp1 -= tmp\n multiply(a0, b1, out=cp2)\n multiply(a1, b0, out=tmp)\n cp2 -= tmp\n else:\n assert b.shape[-1] == 2\n # cp0 = 0 - a2 * b1 (b2 = 0)\n # cp1 = a2 * b0 - 0 (b2 = 0)\n # cp2 = a0 * b1 - a1 * b0\n multiply(a2, b1, out=cp0)\n negative(cp0, out=cp0)\n multiply(a2, b0, out=cp1)\n multiply(a0, b1, out=cp2)\n cp2 -= a1 * b0\n\n return moveaxis(cp, -1, axisc)\n\n\nlittle_endian = (sys.byteorder == 'little')\n\n\n@set_module('numpy')\ndef indices(dimensions, dtype=int, sparse=False):\n """\n Return an array representing the indices of a grid.\n\n Compute an array where the subarrays contain index values 0, 1, ...\n varying only along the corresponding axis.\n\n Parameters\n ----------\n dimensions : sequence of ints\n The shape of the grid.\n dtype : dtype, optional\n Data type of the result.\n sparse : boolean, optional\n Return a sparse representation of the grid instead of a dense\n representation. Default is False.\n\n Returns\n -------\n grid : one ndarray or tuple of ndarrays\n If sparse is False:\n Returns one array of grid indices,\n ``grid.shape = (len(dimensions),) + tuple(dimensions)``.\n If sparse is True:\n Returns a tuple of arrays, with\n ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with\n dimensions[i] in the ith place\n\n See Also\n --------\n mgrid, ogrid, meshgrid\n\n Notes\n -----\n The output shape in the dense case is obtained by prepending the number\n of dimensions in front of the tuple of dimensions, i.e. if `dimensions`\n is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is\n ``(N, r0, ..., rN-1)``.\n\n The subarrays ``grid[k]`` contains the N-D array of indices along the\n ``k-th`` axis. Explicitly::\n\n grid[k, i0, i1, ..., iN-1] = ik\n\n Examples\n --------\n >>> import numpy as np\n >>> grid = np.indices((2, 3))\n >>> grid.shape\n (2, 2, 3)\n >>> grid[0] # row indices\n array([[0, 0, 0],\n [1, 1, 1]])\n >>> grid[1] # column indices\n array([[0, 1, 2],\n [0, 1, 2]])\n\n The indices can be used as an index into an array.\n\n >>> x = np.arange(20).reshape(5, 4)\n >>> row, col = np.indices((2, 3))\n >>> x[row, col]\n array([[0, 1, 2],\n [4, 5, 6]])\n\n Note that it would be more straightforward in the above example to\n extract the required elements directly with ``x[:2, :3]``.\n\n If sparse is set to true, the grid will be returned in a sparse\n representation.\n\n >>> i, j = np.indices((2, 3), sparse=True)\n >>> i.shape\n (2, 1)\n >>> j.shape\n (1, 3)\n >>> i # row indices\n array([[0],\n [1]])\n >>> j # column indices\n array([[0, 1, 2]])\n\n """\n dimensions = tuple(dimensions)\n N = len(dimensions)\n shape = (1,) * N\n if sparse:\n res = ()\n else:\n res = empty((N,) + dimensions, dtype=dtype)\n for i, dim in enumerate(dimensions):\n idx = arange(dim, dtype=dtype).reshape(\n shape[:i] + (dim,) + shape[i + 1:]\n )\n if sparse:\n res = res + (idx,)\n else:\n res[i] = idx\n return res\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef fromfunction(function, shape, *, dtype=float, like=None, **kwargs):\n """\n Construct an array by executing a function over each coordinate.\n\n The resulting array therefore has a value ``fn(x, y, z)`` at\n coordinate ``(x, y, z)``.\n\n Parameters\n ----------\n function : callable\n The function is called with N parameters, where N is the rank of\n `shape`. Each parameter represents the coordinates of the array\n varying along a specific axis. For example, if `shape`\n were ``(2, 2)``, then the parameters would be\n ``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``\n shape : (N,) tuple of ints\n Shape of the output array, which also determines the shape of\n the coordinate arrays passed to `function`.\n dtype : data-type, optional\n Data-type of the coordinate arrays passed to `function`.\n By default, `dtype` is float.\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n fromfunction : any\n The result of the call to `function` is passed back directly.\n Therefore the shape of `fromfunction` is completely determined by\n `function`. If `function` returns a scalar value, the shape of\n `fromfunction` would not match the `shape` parameter.\n\n See Also\n --------\n indices, meshgrid\n\n Notes\n -----\n Keywords other than `dtype` and `like` are passed to `function`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)\n array([[0., 0.],\n [1., 1.]])\n\n >>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)\n array([[0., 1.],\n [0., 1.]])\n\n >>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)\n array([[ True, False, False],\n [False, True, False],\n [False, False, True]])\n\n >>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)\n array([[0, 1, 2],\n [1, 2, 3],\n [2, 3, 4]])\n\n """\n if like is not None:\n return _fromfunction_with_like(\n like, function, shape, dtype=dtype, **kwargs)\n\n args = indices(shape, dtype=dtype)\n return function(*args, **kwargs)\n\n\n_fromfunction_with_like = array_function_dispatch()(fromfunction)\n\n\ndef _frombuffer(buf, dtype, shape, order, axis_order=None):\n array = frombuffer(buf, dtype=dtype)\n if order == 'K' and axis_order is not None:\n return array.reshape(shape, order='C').transpose(axis_order)\n return array.reshape(shape, order=order)\n\n\n@set_module('numpy')\ndef isscalar(element):\n """\n Returns True if the type of `element` is a scalar type.\n\n Parameters\n ----------\n element : any\n Input argument, can be of any type and shape.\n\n Returns\n -------\n val : bool\n True if `element` is a scalar type, False if it is not.\n\n See Also\n --------\n ndim : Get the number of dimensions of an array\n\n Notes\n -----\n If you need a stricter way to identify a *numerical* scalar, use\n ``isinstance(x, numbers.Number)``, as that returns ``False`` for most\n non-numerical elements such as strings.\n\n In most cases ``np.ndim(x) == 0`` should be used instead of this function,\n as that will also return true for 0d arrays. This is how numpy overloads\n functions in the style of the ``dx`` arguments to `gradient` and\n the ``bins`` argument to `histogram`. Some key differences:\n\n +------------------------------------+---------------+-------------------+\n | x |``isscalar(x)``|``np.ndim(x) == 0``|\n +====================================+===============+===================+\n | PEP 3141 numeric objects | ``True`` | ``True`` |\n | (including builtins) | | |\n +------------------------------------+---------------+-------------------+\n | builtin string and buffer objects | ``True`` | ``True`` |\n +------------------------------------+---------------+-------------------+\n | other builtin objects, like | ``False`` | ``True`` |\n | `pathlib.Path`, `Exception`, | | |\n | the result of `re.compile` | | |\n +------------------------------------+---------------+-------------------+\n | third-party objects like | ``False`` | ``True`` |\n | `matplotlib.figure.Figure` | | |\n +------------------------------------+---------------+-------------------+\n | zero-dimensional numpy arrays | ``False`` | ``True`` |\n +------------------------------------+---------------+-------------------+\n | other numpy arrays | ``False`` | ``False`` |\n +------------------------------------+---------------+-------------------+\n | `list`, `tuple`, and other | ``False`` | ``False`` |\n | sequence objects | | |\n +------------------------------------+---------------+-------------------+\n\n Examples\n --------\n >>> import numpy as np\n\n >>> np.isscalar(3.1)\n True\n\n >>> np.isscalar(np.array(3.1))\n False\n\n >>> np.isscalar([3.1])\n False\n\n >>> np.isscalar(False)\n True\n\n >>> np.isscalar('numpy')\n True\n\n NumPy supports PEP 3141 numbers:\n\n >>> from fractions import Fraction\n >>> np.isscalar(Fraction(5, 17))\n True\n >>> from numbers import Number\n >>> np.isscalar(Number())\n True\n\n """\n return (isinstance(element, generic)\n or type(element) in ScalarType\n or isinstance(element, numbers.Number))\n\n\n@set_module('numpy')\ndef binary_repr(num, width=None):\n """\n Return the binary representation of the input number as a string.\n\n For negative numbers, if width is not given, a minus sign is added to the\n front. If width is given, the two's complement of the number is\n returned, with respect to that width.\n\n In a two's-complement system negative numbers are represented by the two's\n complement of the absolute value. This is the most common method of\n representing signed integers on computers [1]_. A N-bit two's-complement\n system can represent every integer in the range\n :math:`-2^{N-1}` to :math:`+2^{N-1}-1`.\n\n Parameters\n ----------\n num : int\n Only an integer decimal number can be used.\n width : int, optional\n The length of the returned string if `num` is positive, or the length\n of the two's complement if `num` is negative, provided that `width` is\n at least a sufficient number of bits for `num` to be represented in\n the designated form. If the `width` value is insufficient, an error is\n raised.\n\n Returns\n -------\n bin : str\n Binary representation of `num` or two's complement of `num`.\n\n See Also\n --------\n base_repr: Return a string representation of a number in the given base\n system.\n bin: Python's built-in binary representation generator of an integer.\n\n Notes\n -----\n `binary_repr` is equivalent to using `base_repr` with base 2, but about 25x\n faster.\n\n References\n ----------\n .. [1] Wikipedia, "Two's complement",\n https://en.wikipedia.org/wiki/Two's_complement\n\n Examples\n --------\n >>> import numpy as np\n >>> np.binary_repr(3)\n '11'\n >>> np.binary_repr(-3)\n '-11'\n >>> np.binary_repr(3, width=4)\n '0011'\n\n The two's complement is returned when the input number is negative and\n width is specified:\n\n >>> np.binary_repr(-3, width=3)\n '101'\n >>> np.binary_repr(-3, width=5)\n '11101'\n\n """\n def err_if_insufficient(width, binwidth):\n if width is not None and width < binwidth:\n raise ValueError(\n f"Insufficient bit {width=} provided for {binwidth=}"\n )\n\n # Ensure that num is a Python integer to avoid overflow or unwanted\n # casts to floating point.\n num = operator.index(num)\n\n if num == 0:\n return '0' * (width or 1)\n\n elif num > 0:\n binary = f'{num:b}'\n binwidth = len(binary)\n outwidth = (binwidth if width is None\n else builtins.max(binwidth, width))\n err_if_insufficient(width, binwidth)\n return binary.zfill(outwidth)\n\n elif width is None:\n return f'-{-num:b}'\n\n else:\n poswidth = len(f'{-num:b}')\n\n # See gh-8679: remove extra digit\n # for numbers at boundaries.\n if 2**(poswidth - 1) == -num:\n poswidth -= 1\n\n twocomp = 2**(poswidth + 1) + num\n binary = f'{twocomp:b}'\n binwidth = len(binary)\n\n outwidth = builtins.max(binwidth, width)\n err_if_insufficient(width, binwidth)\n return '1' * (outwidth - binwidth) + binary\n\n\n@set_module('numpy')\ndef base_repr(number, base=2, padding=0):\n """\n Return a string representation of a number in the given base system.\n\n Parameters\n ----------\n number : int\n The value to convert. Positive and negative values are handled.\n base : int, optional\n Convert `number` to the `base` number system. The valid range is 2-36,\n the default value is 2.\n padding : int, optional\n Number of zeros padded on the left. Default is 0 (no padding).\n\n Returns\n -------\n out : str\n String representation of `number` in `base` system.\n\n See Also\n --------\n binary_repr : Faster version of `base_repr` for base 2.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.base_repr(5)\n '101'\n >>> np.base_repr(6, 5)\n '11'\n >>> np.base_repr(7, base=5, padding=3)\n '00012'\n\n >>> np.base_repr(10, base=16)\n 'A'\n >>> np.base_repr(32, base=16)\n '20'\n\n """\n digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n if base > len(digits):\n raise ValueError("Bases greater than 36 not handled in base_repr.")\n elif base < 2:\n raise ValueError("Bases less than 2 not handled in base_repr.")\n\n num = abs(int(number))\n res = []\n while num:\n res.append(digits[num % base])\n num //= base\n if padding:\n res.append('0' * padding)\n if number < 0:\n res.append('-')\n return ''.join(reversed(res or '0'))\n\n\n# These are all essentially abbreviations\n# These might wind up in a special abbreviations module\n\n\ndef _maketup(descr, val):\n dt = dtype(descr)\n # Place val in all scalar tuples:\n fields = dt.fields\n if fields is None:\n return val\n else:\n res = [_maketup(fields[name][0], val) for name in dt.names]\n return tuple(res)\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef identity(n, dtype=None, *, like=None):\n """\n Return the identity array.\n\n The identity array is a square array with ones on\n the main diagonal.\n\n Parameters\n ----------\n n : int\n Number of rows (and columns) in `n` x `n` output.\n dtype : data-type, optional\n Data-type of the output. Defaults to ``float``.\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n `n` x `n` array with its main diagonal set to one,\n and all other elements 0.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.identity(3)\n array([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]])\n\n """\n if like is not None:\n return _identity_with_like(like, n, dtype=dtype)\n\n from numpy import eye\n return eye(n, dtype=dtype, like=like)\n\n\n_identity_with_like = array_function_dispatch()(identity)\n\n\ndef _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):\n return (a, b, rtol, atol)\n\n\n@array_function_dispatch(_allclose_dispatcher)\ndef allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n """\n Returns True if two arrays are element-wise equal within a tolerance.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n .. warning:: The default `atol` is not appropriate for comparing numbers\n with magnitudes much smaller than one (see Notes).\n\n NaNs are treated as equal if they are in the same place and if\n ``equal_nan=True``. Infs are treated as equal if they are in the same\n place and of the same sign in both arrays.\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n rtol : array_like\n The relative tolerance parameter (see Notes).\n atol : array_like\n The absolute tolerance parameter (see Notes).\n equal_nan : bool\n Whether to compare NaN's as equal. If True, NaN's in `a` will be\n considered equal to NaN's in `b` in the output array.\n\n Returns\n -------\n allclose : bool\n Returns True if the two arrays are equal within the given\n tolerance; False otherwise.\n\n See Also\n --------\n isclose, all, any, equal\n\n Notes\n -----\n If the following equation is element-wise True, then allclose returns\n True.::\n\n absolute(a - b) <= (atol + rtol * absolute(b))\n\n The above equation is not symmetric in `a` and `b`, so that\n ``allclose(a, b)`` might be different from ``allclose(b, a)`` in\n some rare cases.\n\n The default value of `atol` is not appropriate when the reference value\n `b` has magnitude smaller than one. For example, it is unlikely that\n ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet\n ``allclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure\n to select `atol` for the use case at hand, especially for defining the\n threshold below which a non-zero value in `a` will be considered "close"\n to a very small or zero value in `b`.\n\n The comparison of `a` and `b` uses standard broadcasting, which\n means that `a` and `b` need not have the same shape in order for\n ``allclose(a, b)`` to evaluate to True. The same is true for\n `equal` but not `array_equal`.\n\n `allclose` is not defined for non-numeric data types.\n `bool` is considered a numeric data-type for this purpose.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])\n False\n\n >>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])\n True\n\n >>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])\n False\n\n >>> np.allclose([1.0, np.nan], [1.0, np.nan])\n False\n\n >>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)\n True\n\n\n """\n res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))\n return builtins.bool(res)\n\n\ndef _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):\n return (a, b, rtol, atol)\n\n\n@array_function_dispatch(_isclose_dispatcher)\ndef isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):\n """\n Returns a boolean array where two arrays are element-wise equal within a\n tolerance.\n\n The tolerance values are positive, typically very small numbers. The\n relative difference (`rtol` * abs(`b`)) and the absolute difference\n `atol` are added together to compare against the absolute difference\n between `a` and `b`.\n\n .. warning:: The default `atol` is not appropriate for comparing numbers\n with magnitudes much smaller than one (see Notes).\n\n Parameters\n ----------\n a, b : array_like\n Input arrays to compare.\n rtol : array_like\n The relative tolerance parameter (see Notes).\n atol : array_like\n The absolute tolerance parameter (see Notes).\n equal_nan : bool\n Whether to compare NaN's as equal. If True, NaN's in `a` will be\n considered equal to NaN's in `b` in the output array.\n\n Returns\n -------\n y : array_like\n Returns a boolean array of where `a` and `b` are equal within the\n given tolerance. If both `a` and `b` are scalars, returns a single\n boolean value.\n\n See Also\n --------\n allclose\n math.isclose\n\n Notes\n -----\n For finite values, isclose uses the following equation to test whether\n two floating point values are equivalent.::\n\n absolute(a - b) <= (atol + rtol * absolute(b))\n\n Unlike the built-in `math.isclose`, the above equation is not symmetric\n in `a` and `b` -- it assumes `b` is the reference value -- so that\n `isclose(a, b)` might be different from `isclose(b, a)`.\n\n The default value of `atol` is not appropriate when the reference value\n `b` has magnitude smaller than one. For example, it is unlikely that\n ``a = 1e-9`` and ``b = 2e-9`` should be considered "close", yet\n ``isclose(1e-9, 2e-9)`` is ``True`` with default settings. Be sure\n to select `atol` for the use case at hand, especially for defining the\n threshold below which a non-zero value in `a` will be considered "close"\n to a very small or zero value in `b`.\n\n `isclose` is not defined for non-numeric data types.\n :class:`bool` is considered a numeric data-type for this purpose.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])\n array([ True, False])\n\n >>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])\n array([ True, True])\n\n >>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])\n array([False, True])\n\n >>> np.isclose([1.0, np.nan], [1.0, np.nan])\n array([ True, False])\n\n >>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)\n array([ True, True])\n\n >>> np.isclose([1e-8, 1e-7], [0.0, 0.0])\n array([ True, False])\n\n >>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)\n array([False, False])\n\n >>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])\n array([ True, True])\n\n >>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)\n array([False, True])\n\n """\n # Turn all but python scalars into arrays.\n x, y, atol, rtol = (\n a if isinstance(a, (int, float, complex)) else asanyarray(a)\n for a in (a, b, atol, rtol))\n\n # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).\n # This will cause casting of x later. Also, make sure to allow subclasses\n # (e.g., for numpy.ma).\n # NOTE: We explicitly allow timedelta, which used to work. This could\n # possibly be deprecated. See also gh-18286.\n # timedelta works if `atol` is an integer or also a timedelta.\n # Although, the default tolerances are unlikely to be useful\n if (dtype := getattr(y, "dtype", None)) is not None and dtype.kind != "m":\n dt = multiarray.result_type(y, 1.)\n y = asanyarray(y, dtype=dt)\n elif isinstance(y, int):\n y = float(y)\n\n # atol and rtol can be arrays\n if not (np.all(np.isfinite(atol)) and np.all(np.isfinite(rtol))):\n err_s = np.geterr()["invalid"]\n err_msg = f"One of rtol or atol is not valid, atol: {atol}, rtol: {rtol}"\n\n if err_s == "warn":\n warnings.warn(err_msg, RuntimeWarning, stacklevel=2)\n elif err_s == "raise":\n raise FloatingPointError(err_msg)\n elif err_s == "print":\n print(err_msg)\n\n with errstate(invalid='ignore'):\n\n result = (less_equal(abs(x - y), atol + rtol * abs(y))\n & isfinite(y)\n | (x == y))\n if equal_nan:\n result |= isnan(x) & isnan(y)\n\n return result[()] # Flatten 0d arrays to scalars\n\n\ndef _array_equal_dispatcher(a1, a2, equal_nan=None):\n return (a1, a2)\n\n\n_no_nan_types = {\n # should use np.dtype.BoolDType, but as of writing\n # that fails the reloading test.\n type(dtype(nt.bool)),\n type(dtype(nt.int8)),\n type(dtype(nt.int16)),\n type(dtype(nt.int32)),\n type(dtype(nt.int64)),\n}\n\n\ndef _dtype_cannot_hold_nan(dtype):\n return type(dtype) in _no_nan_types\n\n\n@array_function_dispatch(_array_equal_dispatcher)\ndef array_equal(a1, a2, equal_nan=False):\n """\n True if two arrays have the same shape and elements, False otherwise.\n\n Parameters\n ----------\n a1, a2 : array_like\n Input arrays.\n equal_nan : bool\n Whether to compare NaN's as equal. If the dtype of a1 and a2 is\n complex, values will be considered equal if either the real or the\n imaginary component of a given value is ``nan``.\n\n Returns\n -------\n b : bool\n Returns True if the arrays are equal.\n\n See Also\n --------\n allclose: Returns True if two arrays are element-wise equal within a\n tolerance.\n array_equiv: Returns True if input arrays are shape consistent and all\n elements equal.\n\n Examples\n --------\n >>> import numpy as np\n\n >>> np.array_equal([1, 2], [1, 2])\n True\n\n >>> np.array_equal(np.array([1, 2]), np.array([1, 2]))\n True\n\n >>> np.array_equal([1, 2], [1, 2, 3])\n False\n\n >>> np.array_equal([1, 2], [1, 4])\n False\n\n >>> a = np.array([1, np.nan])\n >>> np.array_equal(a, a)\n False\n\n >>> np.array_equal(a, a, equal_nan=True)\n True\n\n When ``equal_nan`` is True, complex values with nan components are\n considered equal if either the real *or* the imaginary components are nan.\n\n >>> a = np.array([1 + 1j])\n >>> b = a.copy()\n >>> a.real = np.nan\n >>> b.imag = np.nan\n >>> np.array_equal(a, b, equal_nan=True)\n True\n """\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n if a1.shape != a2.shape:\n return False\n if not equal_nan:\n return builtins.bool((asanyarray(a1 == a2)).all())\n\n if a1 is a2:\n # nan will compare equal so an array will compare equal to itself.\n return True\n\n cannot_have_nan = (_dtype_cannot_hold_nan(a1.dtype)\n and _dtype_cannot_hold_nan(a2.dtype))\n if cannot_have_nan:\n return builtins.bool(asarray(a1 == a2).all())\n\n # Handling NaN values if equal_nan is True\n a1nan, a2nan = isnan(a1), isnan(a2)\n # NaN's occur at different locations\n if not (a1nan == a2nan).all():\n return False\n # Shapes of a1, a2 and masks are guaranteed to be consistent by this point\n return builtins.bool((a1[~a1nan] == a2[~a1nan]).all())\n\n\ndef _array_equiv_dispatcher(a1, a2):\n return (a1, a2)\n\n\n@array_function_dispatch(_array_equiv_dispatcher)\ndef array_equiv(a1, a2):\n """\n Returns True if input arrays are shape consistent and all elements equal.\n\n Shape consistent means they are either the same shape, or one input array\n can be broadcasted to create the same shape as the other one.\n\n Parameters\n ----------\n a1, a2 : array_like\n Input arrays.\n\n Returns\n -------\n out : bool\n True if equivalent, False otherwise.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.array_equiv([1, 2], [1, 2])\n True\n >>> np.array_equiv([1, 2], [1, 3])\n False\n\n Showing the shape equivalence:\n\n >>> np.array_equiv([1, 2], [[1, 2], [1, 2]])\n True\n >>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])\n False\n\n >>> np.array_equiv([1, 2], [[1, 2], [1, 3]])\n False\n\n """\n try:\n a1, a2 = asarray(a1), asarray(a2)\n except Exception:\n return False\n try:\n multiarray.broadcast(a1, a2)\n except Exception:\n return False\n\n return builtins.bool(asanyarray(a1 == a2).all())\n\n\ndef _astype_dispatcher(x, dtype, /, *, copy=None, device=None):\n return (x, dtype)\n\n\n@array_function_dispatch(_astype_dispatcher)\ndef astype(x, dtype, /, *, copy=True, device=None):\n """\n Copies an array to a specified data type.\n\n This function is an Array API compatible alternative to\n `numpy.ndarray.astype`.\n\n Parameters\n ----------\n x : ndarray\n Input NumPy array to cast. ``array_likes`` are explicitly not\n supported here.\n dtype : dtype\n Data type of the result.\n copy : bool, optional\n Specifies whether to copy an array when the specified dtype matches\n the data type of the input array ``x``. If ``True``, a newly allocated\n array must always be returned. If ``False`` and the specified dtype\n matches the data type of the input array, the input array must be\n returned; otherwise, a newly allocated array must be returned.\n Defaults to ``True``.\n device : str, optional\n The device on which to place the returned array. Default: None.\n For Array-API interoperability only, so must be ``"cpu"`` if passed.\n\n .. versionadded:: 2.1.0\n\n Returns\n -------\n out : ndarray\n An array having the specified data type.\n\n See Also\n --------\n ndarray.astype\n\n Examples\n --------\n >>> import numpy as np\n >>> arr = np.array([1, 2, 3]); arr\n array([1, 2, 3])\n >>> np.astype(arr, np.float64)\n array([1., 2., 3.])\n\n Non-copy case:\n\n >>> arr = np.array([1, 2, 3])\n >>> arr_noncpy = np.astype(arr, arr.dtype, copy=False)\n >>> np.shares_memory(arr, arr_noncpy)\n True\n\n """\n if not (isinstance(x, np.ndarray) or isscalar(x)):\n raise TypeError(\n "Input should be a NumPy array or scalar. "\n f"It is a {type(x)} instead."\n )\n if device is not None and device != "cpu":\n raise ValueError(\n 'Device not understood. Only "cpu" is allowed, but received:'\n f' {device}'\n )\n return x.astype(dtype, copy=copy)\n\n\ninf = PINF\nnan = NAN\nFalse_ = nt.bool(False)\nTrue_ = nt.bool(True)\n\n\ndef extend_all(module):\n existing = set(__all__)\n mall = module.__all__\n for a in mall:\n if a not in existing:\n __all__.append(a)\n\n\nfrom . import _asarray, _ufunc_config, arrayprint, fromnumeric\nfrom ._asarray import *\nfrom ._ufunc_config import *\nfrom .arrayprint import *\nfrom .fromnumeric import *\nfrom .numerictypes import *\nfrom .umath import *\n\nextend_all(fromnumeric)\nextend_all(umath)\nextend_all(numerictypes)\nextend_all(arrayprint)\nextend_all(_asarray)\nextend_all(_ufunc_config)\n
.venv\Lib\site-packages\numpy\_core\numeric.py
numeric.py
Python
85,082
0.75
0.094928
0.0253
node-utils
612
2024-04-13T01:00:36.368215
Apache-2.0
false
acbc378071b06ce7ea6cc0f74b5b5fa2
from collections.abc import Callable, Sequence\nfrom typing import (\n Any,\n Final,\n Never,\n NoReturn,\n SupportsAbs,\n SupportsIndex,\n TypeAlias,\n TypeGuard,\n TypeVar,\n Unpack,\n overload,\n)\nfrom typing import Literal as L\n\nimport numpy as np\nfrom numpy import (\n False_,\n True_,\n _OrderCF,\n _OrderKACF,\n # re-exports\n bitwise_not,\n broadcast,\n complexfloating,\n dtype,\n flatiter,\n float64,\n floating,\n from_dlpack,\n # other\n generic,\n inf,\n int_,\n intp,\n little_endian,\n matmul,\n nan,\n ndarray,\n nditer,\n newaxis,\n object_,\n signedinteger,\n timedelta64,\n ufunc,\n unsignedinteger,\n vecdot,\n)\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _ArrayLike,\n _ArrayLikeBool_co,\n _ArrayLikeComplex_co,\n _ArrayLikeFloat_co,\n _ArrayLikeInt_co,\n _ArrayLikeObject_co,\n _ArrayLikeTD64_co,\n _ArrayLikeUInt_co,\n _DTypeLike,\n _NestedSequence,\n _ScalarLike_co,\n _Shape,\n _ShapeLike,\n _SupportsArrayFunc,\n _SupportsDType,\n)\n\nfrom .fromnumeric import all as all\nfrom .fromnumeric import any as any\nfrom .fromnumeric import argpartition as argpartition\nfrom .fromnumeric import matrix_transpose as matrix_transpose\nfrom .fromnumeric import mean as mean\nfrom .multiarray import (\n # other\n _Array,\n _ConstructorEmpty,\n _KwargsEmpty,\n # re-exports\n arange,\n array,\n asanyarray,\n asarray,\n ascontiguousarray,\n asfortranarray,\n can_cast,\n concatenate,\n copyto,\n dot,\n empty,\n empty_like,\n frombuffer,\n fromfile,\n fromiter,\n fromstring,\n inner,\n lexsort,\n may_share_memory,\n min_scalar_type,\n nested_iters,\n promote_types,\n putmask,\n result_type,\n shares_memory,\n vdot,\n where,\n zeros,\n)\n\n__all__ = [\n "newaxis",\n "ndarray",\n "flatiter",\n "nditer",\n "nested_iters",\n "ufunc",\n "arange",\n "array",\n "asarray",\n "asanyarray",\n "ascontiguousarray",\n "asfortranarray",\n "zeros",\n "count_nonzero",\n "empty",\n "broadcast",\n "dtype",\n "fromstring",\n "fromfile",\n "frombuffer",\n "from_dlpack",\n "where",\n "argwhere",\n "copyto",\n "concatenate",\n "lexsort",\n "astype",\n "can_cast",\n "promote_types",\n "min_scalar_type",\n "result_type",\n "isfortran",\n "empty_like",\n "zeros_like",\n "ones_like",\n "correlate",\n "convolve",\n "inner",\n "dot",\n "outer",\n "vdot",\n "roll",\n "rollaxis",\n "moveaxis",\n "cross",\n "tensordot",\n "little_endian",\n "fromiter",\n "array_equal",\n "array_equiv",\n "indices",\n "fromfunction",\n "isclose",\n "isscalar",\n "binary_repr",\n "base_repr",\n "ones",\n "identity",\n "allclose",\n "putmask",\n "flatnonzero",\n "inf",\n "nan",\n "False_",\n "True_",\n "bitwise_not",\n "full",\n "full_like",\n "matmul",\n "vecdot",\n "shares_memory",\n "may_share_memory",\n]\n\n_T = TypeVar("_T")\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_DTypeT = TypeVar("_DTypeT", bound=np.dtype)\n_ArrayT = TypeVar("_ArrayT", bound=np.ndarray[Any, Any])\n_ShapeT = TypeVar("_ShapeT", bound=_Shape)\n_AnyShapeT = TypeVar(\n "_AnyShapeT",\n tuple[()],\n tuple[int],\n tuple[int, int],\n tuple[int, int, int],\n tuple[int, int, int, int],\n tuple[int, ...],\n)\n\n_CorrelateMode: TypeAlias = L["valid", "same", "full"]\n\n@overload\ndef zeros_like(\n a: _ArrayT,\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: L[True] = ...,\n shape: None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> _ArrayT: ...\n@overload\ndef zeros_like(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef zeros_like(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef zeros_like(\n a: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[Any]: ...\n\nones: Final[_ConstructorEmpty]\n\n@overload\ndef ones_like(\n a: _ArrayT,\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: L[True] = ...,\n shape: None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> _ArrayT: ...\n@overload\ndef ones_like(\n a: _ArrayLike[_ScalarT],\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef ones_like(\n a: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef ones_like(\n a: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[Any]: ...\n\n# TODO: Add overloads for bool, int, float, complex, str, bytes, and memoryview\n# 1-D shape\n@overload\ndef full(\n shape: SupportsIndex,\n fill_value: _ScalarT,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[tuple[int], _ScalarT]: ...\n@overload\ndef full(\n shape: SupportsIndex,\n fill_value: Any,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> np.ndarray[tuple[int], _DTypeT]: ...\n@overload\ndef full(\n shape: SupportsIndex,\n fill_value: Any,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[tuple[int], _ScalarT]: ...\n@overload\ndef full(\n shape: SupportsIndex,\n fill_value: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[tuple[int], Any]: ...\n# known shape\n@overload\ndef full(\n shape: _AnyShapeT,\n fill_value: _ScalarT,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[_AnyShapeT, _ScalarT]: ...\n@overload\ndef full(\n shape: _AnyShapeT,\n fill_value: Any,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> np.ndarray[_AnyShapeT, _DTypeT]: ...\n@overload\ndef full(\n shape: _AnyShapeT,\n fill_value: Any,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[_AnyShapeT, _ScalarT]: ...\n@overload\ndef full(\n shape: _AnyShapeT,\n fill_value: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> _Array[_AnyShapeT, Any]: ...\n# unknown shape\n@overload\ndef full(\n shape: _ShapeLike,\n fill_value: _ScalarT,\n dtype: None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> NDArray[_ScalarT]: ...\n@overload\ndef full(\n shape: _ShapeLike,\n fill_value: Any,\n dtype: _DTypeT | _SupportsDType[_DTypeT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> np.ndarray[Any, _DTypeT]: ...\n@overload\ndef full(\n shape: _ShapeLike,\n fill_value: Any,\n dtype: type[_ScalarT],\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> NDArray[_ScalarT]: ...\n@overload\ndef full(\n shape: _ShapeLike,\n fill_value: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderCF = ...,\n **kwargs: Unpack[_KwargsEmpty],\n) -> NDArray[Any]: ...\n\n@overload\ndef full_like(\n a: _ArrayT,\n fill_value: Any,\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: L[True] = ...,\n shape: None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> _ArrayT: ...\n@overload\ndef full_like(\n a: _ArrayLike[_ScalarT],\n fill_value: Any,\n dtype: None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef full_like(\n a: Any,\n fill_value: Any,\n dtype: _DTypeLike[_ScalarT],\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef full_like(\n a: Any,\n fill_value: Any,\n dtype: DTypeLike | None = ...,\n order: _OrderKACF = ...,\n subok: bool = ...,\n shape: _ShapeLike | None = ...,\n *,\n device: L["cpu"] | None = ...,\n) -> NDArray[Any]: ...\n\n#\n@overload\ndef count_nonzero(a: ArrayLike, axis: None = None, *, keepdims: L[False] = False) -> np.intp: ...\n@overload\ndef count_nonzero(a: _ScalarLike_co, axis: _ShapeLike | None = None, *, keepdims: L[True]) -> np.intp: ...\n@overload\ndef count_nonzero(\n a: NDArray[Any] | _NestedSequence[ArrayLike], axis: _ShapeLike | None = None, *, keepdims: L[True]\n) -> NDArray[np.intp]: ...\n@overload\ndef count_nonzero(a: ArrayLike, axis: _ShapeLike | None = None, *, keepdims: bool = False) -> Any: ...\n\n#\ndef isfortran(a: NDArray[Any] | generic) -> bool: ...\n\ndef argwhere(a: ArrayLike) -> NDArray[intp]: ...\n\ndef flatnonzero(a: ArrayLike) -> NDArray[intp]: ...\n\n@overload\ndef correlate(\n a: _ArrayLike[Never],\n v: _ArrayLike[Never],\n mode: _CorrelateMode = ...,\n) -> NDArray[Any]: ...\n@overload\ndef correlate(\n a: _ArrayLikeBool_co,\n v: _ArrayLikeBool_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef correlate(\n a: _ArrayLikeUInt_co,\n v: _ArrayLikeUInt_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef correlate(\n a: _ArrayLikeInt_co,\n v: _ArrayLikeInt_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef correlate(\n a: _ArrayLikeFloat_co,\n v: _ArrayLikeFloat_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[floating]: ...\n@overload\ndef correlate(\n a: _ArrayLikeComplex_co,\n v: _ArrayLikeComplex_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef correlate(\n a: _ArrayLikeTD64_co,\n v: _ArrayLikeTD64_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef correlate(\n a: _ArrayLikeObject_co,\n v: _ArrayLikeObject_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef convolve(\n a: _ArrayLike[Never],\n v: _ArrayLike[Never],\n mode: _CorrelateMode = ...,\n) -> NDArray[Any]: ...\n@overload\ndef convolve(\n a: _ArrayLikeBool_co,\n v: _ArrayLikeBool_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef convolve(\n a: _ArrayLikeUInt_co,\n v: _ArrayLikeUInt_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef convolve(\n a: _ArrayLikeInt_co,\n v: _ArrayLikeInt_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef convolve(\n a: _ArrayLikeFloat_co,\n v: _ArrayLikeFloat_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[floating]: ...\n@overload\ndef convolve(\n a: _ArrayLikeComplex_co,\n v: _ArrayLikeComplex_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef convolve(\n a: _ArrayLikeTD64_co,\n v: _ArrayLikeTD64_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef convolve(\n a: _ArrayLikeObject_co,\n v: _ArrayLikeObject_co,\n mode: _CorrelateMode = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef outer(\n a: _ArrayLike[Never],\n b: _ArrayLike[Never],\n out: None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef outer(\n a: _ArrayLikeBool_co,\n b: _ArrayLikeBool_co,\n out: None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef outer(\n a: _ArrayLikeUInt_co,\n b: _ArrayLikeUInt_co,\n out: None = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef outer(\n a: _ArrayLikeInt_co,\n b: _ArrayLikeInt_co,\n out: None = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef outer(\n a: _ArrayLikeFloat_co,\n b: _ArrayLikeFloat_co,\n out: None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef outer(\n a: _ArrayLikeComplex_co,\n b: _ArrayLikeComplex_co,\n out: None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef outer(\n a: _ArrayLikeTD64_co,\n b: _ArrayLikeTD64_co,\n out: None = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef outer(\n a: _ArrayLikeObject_co,\n b: _ArrayLikeObject_co,\n out: None = ...,\n) -> NDArray[object_]: ...\n@overload\ndef outer(\n a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,\n out: _ArrayT,\n) -> _ArrayT: ...\n\n@overload\ndef tensordot(\n a: _ArrayLike[Never],\n b: _ArrayLike[Never],\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[Any]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeBool_co,\n b: _ArrayLikeBool_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeUInt_co,\n b: _ArrayLikeUInt_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeInt_co,\n b: _ArrayLikeInt_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeFloat_co,\n b: _ArrayLikeFloat_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[floating]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeComplex_co,\n b: _ArrayLikeComplex_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeTD64_co,\n b: _ArrayLikeTD64_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[timedelta64]: ...\n@overload\ndef tensordot(\n a: _ArrayLikeObject_co,\n b: _ArrayLikeObject_co,\n axes: int | tuple[_ShapeLike, _ShapeLike] = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef roll(\n a: _ArrayLike[_ScalarT],\n shift: _ShapeLike,\n axis: _ShapeLike | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef roll(\n a: ArrayLike,\n shift: _ShapeLike,\n axis: _ShapeLike | None = ...,\n) -> NDArray[Any]: ...\n\ndef rollaxis(\n a: NDArray[_ScalarT],\n axis: int,\n start: int = ...,\n) -> NDArray[_ScalarT]: ...\n\ndef moveaxis(\n a: NDArray[_ScalarT],\n source: _ShapeLike,\n destination: _ShapeLike,\n) -> NDArray[_ScalarT]: ...\n\n@overload\ndef cross(\n a: _ArrayLike[Never],\n b: _ArrayLike[Never],\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[Any]: ...\n@overload\ndef cross(\n a: _ArrayLikeBool_co,\n b: _ArrayLikeBool_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NoReturn: ...\n@overload\ndef cross(\n a: _ArrayLikeUInt_co,\n b: _ArrayLikeUInt_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[unsignedinteger]: ...\n@overload\ndef cross(\n a: _ArrayLikeInt_co,\n b: _ArrayLikeInt_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[signedinteger]: ...\n@overload\ndef cross(\n a: _ArrayLikeFloat_co,\n b: _ArrayLikeFloat_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[floating]: ...\n@overload\ndef cross(\n a: _ArrayLikeComplex_co,\n b: _ArrayLikeComplex_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[complexfloating]: ...\n@overload\ndef cross(\n a: _ArrayLikeObject_co,\n b: _ArrayLikeObject_co,\n axisa: int = ...,\n axisb: int = ...,\n axisc: int = ...,\n axis: int | None = ...,\n) -> NDArray[object_]: ...\n\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: type[int] = ...,\n sparse: L[False] = ...,\n) -> NDArray[int_]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: type[int],\n sparse: L[True],\n) -> tuple[NDArray[int_], ...]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: type[int] = ...,\n *,\n sparse: L[True],\n) -> tuple[NDArray[int_], ...]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: _DTypeLike[_ScalarT],\n sparse: L[False] = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: _DTypeLike[_ScalarT],\n sparse: L[True],\n) -> tuple[NDArray[_ScalarT], ...]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: DTypeLike = ...,\n sparse: L[False] = ...,\n) -> NDArray[Any]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: DTypeLike,\n sparse: L[True],\n) -> tuple[NDArray[Any], ...]: ...\n@overload\ndef indices(\n dimensions: Sequence[int],\n dtype: DTypeLike = ...,\n *,\n sparse: L[True],\n) -> tuple[NDArray[Any], ...]: ...\n\ndef fromfunction(\n function: Callable[..., _T],\n shape: Sequence[int],\n *,\n dtype: DTypeLike = ...,\n like: _SupportsArrayFunc | None = ...,\n **kwargs: Any,\n) -> _T: ...\n\ndef isscalar(element: object) -> TypeGuard[generic | complex | str | bytes | memoryview]: ...\n\ndef binary_repr(num: SupportsIndex, width: int | None = ...) -> str: ...\n\ndef base_repr(\n number: SupportsAbs[float],\n base: float = ...,\n padding: SupportsIndex | None = ...,\n) -> str: ...\n\n@overload\ndef identity(\n n: int,\n dtype: None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[float64]: ...\n@overload\ndef identity(\n n: int,\n dtype: _DTypeLike[_ScalarT],\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[_ScalarT]: ...\n@overload\ndef identity(\n n: int,\n dtype: DTypeLike | None = ...,\n *,\n like: _SupportsArrayFunc | None = ...,\n) -> NDArray[Any]: ...\n\ndef allclose(\n a: ArrayLike,\n b: ArrayLike,\n rtol: ArrayLike = ...,\n atol: ArrayLike = ...,\n equal_nan: bool = ...,\n) -> bool: ...\n\n@overload\ndef isclose(\n a: _ScalarLike_co,\n b: _ScalarLike_co,\n rtol: ArrayLike = ...,\n atol: ArrayLike = ...,\n equal_nan: bool = ...,\n) -> np.bool: ...\n@overload\ndef isclose(\n a: ArrayLike,\n b: ArrayLike,\n rtol: ArrayLike = ...,\n atol: ArrayLike = ...,\n equal_nan: bool = ...,\n) -> NDArray[np.bool]: ...\n\ndef array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...\n\ndef array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...\n\n@overload\ndef astype(\n x: ndarray[_ShapeT, dtype],\n dtype: _DTypeLike[_ScalarT],\n /,\n *,\n copy: bool = ...,\n device: L["cpu"] | None = ...,\n) -> ndarray[_ShapeT, dtype[_ScalarT]]: ...\n@overload\ndef astype(\n x: ndarray[_ShapeT, dtype],\n dtype: DTypeLike,\n /,\n *,\n copy: bool = ...,\n device: L["cpu"] | None = ...,\n) -> ndarray[_ShapeT, dtype]: ...\n
.venv\Lib\site-packages\numpy\_core\numeric.pyi
numeric.pyi
Other
19,924
0.95
0.112245
0.050648
node-utils
615
2024-11-10T06:23:16.048547
GPL-3.0
false
2b32b84ca80263ef54a76be84833a0ae
"""\nnumerictypes: Define the numeric type objects\n\nThis module is designed so "from numerictypes import \\*" is safe.\nExported symbols include:\n\n Dictionary with all registered number types (including aliases):\n sctypeDict\n\n Type objects (not all will be available, depends on platform):\n see variable sctypes for which ones you have\n\n Bit-width names\n\n int8 int16 int32 int64\n uint8 uint16 uint32 uint64\n float16 float32 float64 float96 float128\n complex64 complex128 complex192 complex256\n datetime64 timedelta64\n\n c-based names\n\n bool\n\n object_\n\n void, str_\n\n byte, ubyte,\n short, ushort\n intc, uintc,\n intp, uintp,\n int_, uint,\n longlong, ulonglong,\n\n single, csingle,\n double, cdouble,\n longdouble, clongdouble,\n\n As part of the type-hierarchy: xx -- is bit-width\n\n generic\n +-> bool (kind=b)\n +-> number\n | +-> integer\n | | +-> signedinteger (intxx) (kind=i)\n | | | byte\n | | | short\n | | | intc\n | | | intp\n | | | int_\n | | | longlong\n | | \\-> unsignedinteger (uintxx) (kind=u)\n | | ubyte\n | | ushort\n | | uintc\n | | uintp\n | | uint\n | | ulonglong\n | +-> inexact\n | +-> floating (floatxx) (kind=f)\n | | half\n | | single\n | | double\n | | longdouble\n | \\-> complexfloating (complexxx) (kind=c)\n | csingle\n | cdouble\n | clongdouble\n +-> flexible\n | +-> character\n | | bytes_ (kind=S)\n | | str_ (kind=U)\n | |\n | \\-> void (kind=V)\n \\-> object_ (not used much) (kind=O)\n\n"""\nimport numbers\nimport warnings\n\nfrom numpy._utils import set_module\n\nfrom . import multiarray as ma\nfrom .multiarray import (\n busday_count,\n busday_offset,\n busdaycalendar,\n datetime_as_string,\n datetime_data,\n dtype,\n is_busday,\n ndarray,\n)\n\n# we add more at the bottom\n__all__ = [\n 'ScalarType', 'typecodes', 'issubdtype', 'datetime_data',\n 'datetime_as_string', 'busday_offset', 'busday_count',\n 'is_busday', 'busdaycalendar', 'isdtype'\n]\n\n# we don't need all these imports, but we need to keep them for compatibility\n# for users using np._core.numerictypes.UPPER_TABLE\n# we don't export these for import *, but we do want them accessible\n# as numerictypes.bool, etc.\nfrom builtins import bool, bytes, complex, float, int, object, str # noqa: F401, UP029\n\nfrom ._dtype import _kind_name\nfrom ._string_helpers import ( # noqa: F401\n LOWER_TABLE,\n UPPER_TABLE,\n english_capitalize,\n english_lower,\n english_upper,\n)\nfrom ._type_aliases import allTypes, sctypeDict, sctypes\n\n# We use this later\ngeneric = allTypes['generic']\n\ngenericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',\n 'int32', 'uint32', 'int64', 'uint64',\n 'float16', 'float32', 'float64', 'float96', 'float128',\n 'complex64', 'complex128', 'complex192', 'complex256',\n 'object']\n\n@set_module('numpy')\ndef maximum_sctype(t):\n """\n Return the scalar type of highest precision of the same kind as the input.\n\n .. deprecated:: 2.0\n Use an explicit dtype like int64 or float64 instead.\n\n Parameters\n ----------\n t : dtype or dtype specifier\n The input data type. This can be a `dtype` object or an object that\n is convertible to a `dtype`.\n\n Returns\n -------\n out : dtype\n The highest precision data type of the same kind (`dtype.kind`) as `t`.\n\n See Also\n --------\n obj2sctype, mintypecode, sctype2char\n dtype\n\n Examples\n --------\n >>> from numpy._core.numerictypes import maximum_sctype\n >>> maximum_sctype(int)\n <class 'numpy.int64'>\n >>> maximum_sctype(np.uint8)\n <class 'numpy.uint64'>\n >>> maximum_sctype(complex)\n <class 'numpy.complex256'> # may vary\n\n >>> maximum_sctype(str)\n <class 'numpy.str_'>\n\n >>> maximum_sctype('i2')\n <class 'numpy.int64'>\n >>> maximum_sctype('f4')\n <class 'numpy.float128'> # may vary\n\n """\n\n # Deprecated in NumPy 2.0, 2023-07-11\n warnings.warn(\n "`maximum_sctype` is deprecated. Use an explicit dtype like int64 "\n "or float64 instead. (deprecated in NumPy 2.0)",\n DeprecationWarning,\n stacklevel=2\n )\n\n g = obj2sctype(t)\n if g is None:\n return t\n t = g\n base = _kind_name(dtype(t))\n if base in sctypes:\n return sctypes[base][-1]\n else:\n return t\n\n\n@set_module('numpy')\ndef issctype(rep):\n """\n Determines whether the given object represents a scalar data-type.\n\n Parameters\n ----------\n rep : any\n If `rep` is an instance of a scalar dtype, True is returned. If not,\n False is returned.\n\n Returns\n -------\n out : bool\n Boolean result of check whether `rep` is a scalar dtype.\n\n See Also\n --------\n issubsctype, issubdtype, obj2sctype, sctype2char\n\n Examples\n --------\n >>> from numpy._core.numerictypes import issctype\n >>> issctype(np.int32)\n True\n >>> issctype(list)\n False\n >>> issctype(1.1)\n False\n\n Strings are also a scalar type:\n\n >>> issctype(np.dtype('str'))\n True\n\n """\n if not isinstance(rep, (type, dtype)):\n return False\n try:\n res = obj2sctype(rep)\n if res and res != object_:\n return True\n else:\n return False\n except Exception:\n return False\n\n\ndef obj2sctype(rep, default=None):\n """\n Return the scalar dtype or NumPy equivalent of Python type of an object.\n\n Parameters\n ----------\n rep : any\n The object of which the type is returned.\n default : any, optional\n If given, this is returned for objects whose types can not be\n determined. If not given, None is returned for those objects.\n\n Returns\n -------\n dtype : dtype or Python type\n The data type of `rep`.\n\n See Also\n --------\n sctype2char, issctype, issubsctype, issubdtype\n\n Examples\n --------\n >>> from numpy._core.numerictypes import obj2sctype\n >>> obj2sctype(np.int32)\n <class 'numpy.int32'>\n >>> obj2sctype(np.array([1., 2.]))\n <class 'numpy.float64'>\n >>> obj2sctype(np.array([1.j]))\n <class 'numpy.complex128'>\n\n >>> obj2sctype(dict)\n <class 'numpy.object_'>\n >>> obj2sctype('string')\n\n >>> obj2sctype(1, default=list)\n <class 'list'>\n\n """\n # prevent abstract classes being upcast\n if isinstance(rep, type) and issubclass(rep, generic):\n return rep\n # extract dtype from arrays\n if isinstance(rep, ndarray):\n return rep.dtype.type\n # fall back on dtype to convert\n try:\n res = dtype(rep)\n except Exception:\n return default\n else:\n return res.type\n\n\n@set_module('numpy')\ndef issubclass_(arg1, arg2):\n """\n Determine if a class is a subclass of a second class.\n\n `issubclass_` is equivalent to the Python built-in ``issubclass``,\n except that it returns False instead of raising a TypeError if one\n of the arguments is not a class.\n\n Parameters\n ----------\n arg1 : class\n Input class. True is returned if `arg1` is a subclass of `arg2`.\n arg2 : class or tuple of classes.\n Input class. If a tuple of classes, True is returned if `arg1` is a\n subclass of any of the tuple elements.\n\n Returns\n -------\n out : bool\n Whether `arg1` is a subclass of `arg2` or not.\n\n See Also\n --------\n issubsctype, issubdtype, issctype\n\n Examples\n --------\n >>> np.issubclass_(np.int32, int)\n False\n >>> np.issubclass_(np.int32, float)\n False\n >>> np.issubclass_(np.float64, float)\n True\n\n """\n try:\n return issubclass(arg1, arg2)\n except TypeError:\n return False\n\n\n@set_module('numpy')\ndef issubsctype(arg1, arg2):\n """\n Determine if the first argument is a subclass of the second argument.\n\n Parameters\n ----------\n arg1, arg2 : dtype or dtype specifier\n Data-types.\n\n Returns\n -------\n out : bool\n The result.\n\n See Also\n --------\n issctype, issubdtype, obj2sctype\n\n Examples\n --------\n >>> from numpy._core import issubsctype\n >>> issubsctype('S8', str)\n False\n >>> issubsctype(np.array([1]), int)\n True\n >>> issubsctype(np.array([1]), float)\n False\n\n """\n return issubclass(obj2sctype(arg1), obj2sctype(arg2))\n\n\nclass _PreprocessDTypeError(Exception):\n pass\n\n\ndef _preprocess_dtype(dtype):\n """\n Preprocess dtype argument by:\n 1. fetching type from a data type\n 2. verifying that types are built-in NumPy dtypes\n """\n if isinstance(dtype, ma.dtype):\n dtype = dtype.type\n if isinstance(dtype, ndarray) or dtype not in allTypes.values():\n raise _PreprocessDTypeError\n return dtype\n\n\n@set_module('numpy')\ndef isdtype(dtype, kind):\n """\n Determine if a provided dtype is of a specified data type ``kind``.\n\n This function only supports built-in NumPy's data types.\n Third-party dtypes are not yet supported.\n\n Parameters\n ----------\n dtype : dtype\n The input dtype.\n kind : dtype or str or tuple of dtypes/strs.\n dtype or dtype kind. Allowed dtype kinds are:\n * ``'bool'`` : boolean kind\n * ``'signed integer'`` : signed integer data types\n * ``'unsigned integer'`` : unsigned integer data types\n * ``'integral'`` : integer data types\n * ``'real floating'`` : real-valued floating-point data types\n * ``'complex floating'`` : complex floating-point data types\n * ``'numeric'`` : numeric data types\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n issubdtype\n\n Examples\n --------\n >>> import numpy as np\n >>> np.isdtype(np.float32, np.float64)\n False\n >>> np.isdtype(np.float32, "real floating")\n True\n >>> np.isdtype(np.complex128, ("real floating", "complex floating"))\n True\n\n """\n try:\n dtype = _preprocess_dtype(dtype)\n except _PreprocessDTypeError:\n raise TypeError(\n "dtype argument must be a NumPy dtype, "\n f"but it is a {type(dtype)}."\n ) from None\n\n input_kinds = kind if isinstance(kind, tuple) else (kind,)\n\n processed_kinds = set()\n\n for kind in input_kinds:\n if kind == "bool":\n processed_kinds.add(allTypes["bool"])\n elif kind == "signed integer":\n processed_kinds.update(sctypes["int"])\n elif kind == "unsigned integer":\n processed_kinds.update(sctypes["uint"])\n elif kind == "integral":\n processed_kinds.update(sctypes["int"] + sctypes["uint"])\n elif kind == "real floating":\n processed_kinds.update(sctypes["float"])\n elif kind == "complex floating":\n processed_kinds.update(sctypes["complex"])\n elif kind == "numeric":\n processed_kinds.update(\n sctypes["int"] + sctypes["uint"] +\n sctypes["float"] + sctypes["complex"]\n )\n elif isinstance(kind, str):\n raise ValueError(\n "kind argument is a string, but"\n f" {kind!r} is not a known kind name."\n )\n else:\n try:\n kind = _preprocess_dtype(kind)\n except _PreprocessDTypeError:\n raise TypeError(\n "kind argument must be comprised of "\n "NumPy dtypes or strings only, "\n f"but is a {type(kind)}."\n ) from None\n processed_kinds.add(kind)\n\n return dtype in processed_kinds\n\n\n@set_module('numpy')\ndef issubdtype(arg1, arg2):\n r"""\n Returns True if first argument is a typecode lower/equal in type hierarchy.\n\n This is like the builtin :func:`issubclass`, but for `dtype`\ s.\n\n Parameters\n ----------\n arg1, arg2 : dtype_like\n `dtype` or object coercible to one\n\n Returns\n -------\n out : bool\n\n See Also\n --------\n :ref:`arrays.scalars` : Overview of the numpy type hierarchy.\n\n Examples\n --------\n `issubdtype` can be used to check the type of arrays:\n\n >>> ints = np.array([1, 2, 3], dtype=np.int32)\n >>> np.issubdtype(ints.dtype, np.integer)\n True\n >>> np.issubdtype(ints.dtype, np.floating)\n False\n\n >>> floats = np.array([1, 2, 3], dtype=np.float32)\n >>> np.issubdtype(floats.dtype, np.integer)\n False\n >>> np.issubdtype(floats.dtype, np.floating)\n True\n\n Similar types of different sizes are not subdtypes of each other:\n\n >>> np.issubdtype(np.float64, np.float32)\n False\n >>> np.issubdtype(np.float32, np.float64)\n False\n\n but both are subtypes of `floating`:\n\n >>> np.issubdtype(np.float64, np.floating)\n True\n >>> np.issubdtype(np.float32, np.floating)\n True\n\n For convenience, dtype-like objects are allowed too:\n\n >>> np.issubdtype('S1', np.bytes_)\n True\n >>> np.issubdtype('i4', np.signedinteger)\n True\n\n """\n if not issubclass_(arg1, generic):\n arg1 = dtype(arg1).type\n if not issubclass_(arg2, generic):\n arg2 = dtype(arg2).type\n\n return issubclass(arg1, arg2)\n\n\n@set_module('numpy')\ndef sctype2char(sctype):\n """\n Return the string representation of a scalar dtype.\n\n Parameters\n ----------\n sctype : scalar dtype or object\n If a scalar dtype, the corresponding string character is\n returned. If an object, `sctype2char` tries to infer its scalar type\n and then return the corresponding string character.\n\n Returns\n -------\n typechar : str\n The string character corresponding to the scalar type.\n\n Raises\n ------\n ValueError\n If `sctype` is an object for which the type can not be inferred.\n\n See Also\n --------\n obj2sctype, issctype, issubsctype, mintypecode\n\n Examples\n --------\n >>> from numpy._core.numerictypes import sctype2char\n >>> for sctype in [np.int32, np.double, np.cdouble, np.bytes_, np.ndarray]:\n ... print(sctype2char(sctype))\n l # may vary\n d\n D\n S\n O\n\n >>> x = np.array([1., 2-1.j])\n >>> sctype2char(x)\n 'D'\n >>> sctype2char(list)\n 'O'\n\n """\n sctype = obj2sctype(sctype)\n if sctype is None:\n raise ValueError("unrecognized type")\n if sctype not in sctypeDict.values():\n # for compatibility\n raise KeyError(sctype)\n return dtype(sctype).char\n\n\ndef _scalar_type_key(typ):\n """A ``key`` function for `sorted`."""\n dt = dtype(typ)\n return (dt.kind.lower(), dt.itemsize)\n\n\nScalarType = [int, float, complex, bool, bytes, str, memoryview]\nScalarType += sorted(set(sctypeDict.values()), key=_scalar_type_key)\nScalarType = tuple(ScalarType)\n\n\n# Now add the types we've determined to this module\nfor key in allTypes:\n globals()[key] = allTypes[key]\n __all__.append(key)\n\ndel key\n\ntypecodes = {'Character': 'c',\n 'Integer': 'bhilqnp',\n 'UnsignedInteger': 'BHILQNP',\n 'Float': 'efdg',\n 'Complex': 'FDG',\n 'AllInteger': 'bBhHiIlLqQnNpP',\n 'AllFloat': 'efdgFDG',\n 'Datetime': 'Mm',\n 'All': '?bhilqnpBHILQNPefdgFDGSUVOMm'}\n\n# backwards compatibility --- deprecated name\n# Formal deprecation: Numpy 1.20.0, 2020-10-19 (see numpy/__init__.py)\ntypeDict = sctypeDict\n\ndef _register_types():\n numbers.Integral.register(integer)\n numbers.Complex.register(inexact)\n numbers.Real.register(floating)\n numbers.Number.register(number)\n\n\n_register_types()\n
.venv\Lib\site-packages\numpy\_core\numerictypes.py
numerictypes.py
Python
16,590
0.95
0.112164
0.040856
react-lib
868
2024-04-02T13:29:29.349529
GPL-3.0
false
2e4da94b83bea76b0fa28870fb524478
import builtins\nfrom typing import Any, TypedDict, type_check_only\nfrom typing import Literal as L\n\nimport numpy as np\nfrom numpy import (\n bool,\n bool_,\n byte,\n bytes_,\n cdouble,\n character,\n clongdouble,\n complex64,\n complex128,\n complexfloating,\n csingle,\n datetime64,\n double,\n dtype,\n flexible,\n float16,\n float32,\n float64,\n floating,\n generic,\n half,\n inexact,\n int8,\n int16,\n int32,\n int64,\n int_,\n intc,\n integer,\n intp,\n long,\n longdouble,\n longlong,\n number,\n object_,\n short,\n signedinteger,\n single,\n str_,\n timedelta64,\n ubyte,\n uint,\n uint8,\n uint16,\n uint32,\n uint64,\n uintc,\n uintp,\n ulong,\n ulonglong,\n unsignedinteger,\n ushort,\n void,\n)\nfrom numpy._typing import DTypeLike\nfrom numpy._typing._extended_precision import complex192, complex256, float96, float128\n\nfrom ._type_aliases import sctypeDict # noqa: F401\nfrom .multiarray import (\n busday_count,\n busday_offset,\n busdaycalendar,\n datetime_as_string,\n datetime_data,\n is_busday,\n)\n\n__all__ = [\n "ScalarType",\n "typecodes",\n "issubdtype",\n "datetime_data",\n "datetime_as_string",\n "busday_offset",\n "busday_count",\n "is_busday",\n "busdaycalendar",\n "isdtype",\n "generic",\n "unsignedinteger",\n "character",\n "inexact",\n "number",\n "integer",\n "flexible",\n "complexfloating",\n "signedinteger",\n "floating",\n "bool",\n "float16",\n "float32",\n "float64",\n "longdouble",\n "complex64",\n "complex128",\n "clongdouble",\n "bytes_",\n "str_",\n "void",\n "object_",\n "datetime64",\n "timedelta64",\n "int8",\n "byte",\n "uint8",\n "ubyte",\n "int16",\n "short",\n "uint16",\n "ushort",\n "int32",\n "intc",\n "uint32",\n "uintc",\n "int64",\n "long",\n "uint64",\n "ulong",\n "longlong",\n "ulonglong",\n "intp",\n "uintp",\n "double",\n "cdouble",\n "single",\n "csingle",\n "half",\n "bool_",\n "int_",\n "uint",\n "float96",\n "float128",\n "complex192",\n "complex256",\n]\n\n@type_check_only\nclass _TypeCodes(TypedDict):\n Character: L['c']\n Integer: L['bhilqnp']\n UnsignedInteger: L['BHILQNP']\n Float: L['efdg']\n Complex: L['FDG']\n AllInteger: L['bBhHiIlLqQnNpP']\n AllFloat: L['efdgFDG']\n Datetime: L['Mm']\n All: L['?bhilqnpBHILQNPefdgFDGSUVOMm']\n\ndef isdtype(dtype: dtype | type[Any], kind: DTypeLike | tuple[DTypeLike, ...]) -> builtins.bool: ...\n\ndef issubdtype(arg1: DTypeLike, arg2: DTypeLike) -> builtins.bool: ...\n\ntypecodes: _TypeCodes\nScalarType: tuple[\n type[int],\n type[float],\n type[complex],\n type[builtins.bool],\n type[bytes],\n type[str],\n type[memoryview],\n type[np.bool],\n type[csingle],\n type[cdouble],\n type[clongdouble],\n type[half],\n type[single],\n type[double],\n type[longdouble],\n type[byte],\n type[short],\n type[intc],\n type[long],\n type[longlong],\n type[timedelta64],\n type[datetime64],\n type[object_],\n type[bytes_],\n type[str_],\n type[ubyte],\n type[ushort],\n type[uintc],\n type[ulong],\n type[ulonglong],\n type[void],\n]\n
.venv\Lib\site-packages\numpy\_core\numerictypes.pyi
numerictypes.pyi
Other
3,462
0.95
0.015625
0
vue-tools
258
2023-09-23T02:20:27.622676
GPL-3.0
false
6a1148f1875bfdc4566169a1ff523093
"""Implementation of __array_function__ overrides from NEP-18."""\nimport collections\nimport functools\n\nfrom numpy._core._multiarray_umath import (\n _ArrayFunctionDispatcher,\n _get_implementing_args,\n add_docstring,\n)\nfrom numpy._utils import set_module # noqa: F401\nfrom numpy._utils._inspect import getargspec\n\nARRAY_FUNCTIONS = set()\n\narray_function_like_doc = (\n """like : array_like, optional\n Reference object to allow the creation of arrays which are not\n NumPy arrays. If an array-like passed in as ``like`` supports\n the ``__array_function__`` protocol, the result will be defined\n by it. In this case, it ensures the creation of an array object\n compatible with that passed in via this argument."""\n)\n\ndef get_array_function_like_doc(public_api, docstring_template=""):\n ARRAY_FUNCTIONS.add(public_api)\n docstring = public_api.__doc__ or docstring_template\n return docstring.replace("${ARRAY_FUNCTION_LIKE}", array_function_like_doc)\n\ndef finalize_array_function_like(public_api):\n public_api.__doc__ = get_array_function_like_doc(public_api)\n return public_api\n\n\nadd_docstring(\n _ArrayFunctionDispatcher,\n """\n Class to wrap functions with checks for __array_function__ overrides.\n\n All arguments are required, and can only be passed by position.\n\n Parameters\n ----------\n dispatcher : function or None\n The dispatcher function that returns a single sequence-like object\n of all arguments relevant. It must have the same signature (except\n the default values) as the actual implementation.\n If ``None``, this is a ``like=`` dispatcher and the\n ``_ArrayFunctionDispatcher`` must be called with ``like`` as the\n first (additional and positional) argument.\n implementation : function\n Function that implements the operation on NumPy arrays without\n overrides. Arguments passed calling the ``_ArrayFunctionDispatcher``\n will be forwarded to this (and the ``dispatcher``) as if using\n ``*args, **kwargs``.\n\n Attributes\n ----------\n _implementation : function\n The original implementation passed in.\n """)\n\n\n# exposed for testing purposes; used internally by _ArrayFunctionDispatcher\nadd_docstring(\n _get_implementing_args,\n """\n Collect arguments on which to call __array_function__.\n\n Parameters\n ----------\n relevant_args : iterable of array-like\n Iterable of possibly array-like arguments to check for\n __array_function__ methods.\n\n Returns\n -------\n Sequence of arguments with __array_function__ methods, in the order in\n which they should be called.\n """)\n\n\nArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')\n\n\ndef verify_matching_signatures(implementation, dispatcher):\n """Verify that a dispatcher function has the right signature."""\n implementation_spec = ArgSpec(*getargspec(implementation))\n dispatcher_spec = ArgSpec(*getargspec(dispatcher))\n\n if (implementation_spec.args != dispatcher_spec.args or\n implementation_spec.varargs != dispatcher_spec.varargs or\n implementation_spec.keywords != dispatcher_spec.keywords or\n (bool(implementation_spec.defaults) !=\n bool(dispatcher_spec.defaults)) or\n (implementation_spec.defaults is not None and\n len(implementation_spec.defaults) !=\n len(dispatcher_spec.defaults))):\n raise RuntimeError('implementation and dispatcher for %s have '\n 'different function signatures' % implementation)\n\n if implementation_spec.defaults is not None:\n if dispatcher_spec.defaults != (None,) * len(dispatcher_spec.defaults):\n raise RuntimeError('dispatcher functions can only use None for '\n 'default argument values')\n\n\ndef array_function_dispatch(dispatcher=None, module=None, verify=True,\n docs_from_dispatcher=False):\n """Decorator for adding dispatch with the __array_function__ protocol.\n\n See NEP-18 for example usage.\n\n Parameters\n ----------\n dispatcher : callable or None\n Function that when called like ``dispatcher(*args, **kwargs)`` with\n arguments from the NumPy function call returns an iterable of\n array-like arguments to check for ``__array_function__``.\n\n If `None`, the first argument is used as the single `like=` argument\n and not passed on. A function implementing `like=` must call its\n dispatcher with `like` as the first non-keyword argument.\n module : str, optional\n __module__ attribute to set on new function, e.g., ``module='numpy'``.\n By default, module is copied from the decorated function.\n verify : bool, optional\n If True, verify the that the signature of the dispatcher and decorated\n function signatures match exactly: all required and optional arguments\n should appear in order with the same names, but the default values for\n all optional arguments should be ``None``. Only disable verification\n if the dispatcher's signature needs to deviate for some particular\n reason, e.g., because the function has a signature like\n ``func(*args, **kwargs)``.\n docs_from_dispatcher : bool, optional\n If True, copy docs from the dispatcher function onto the dispatched\n function, rather than from the implementation. This is useful for\n functions defined in C, which otherwise don't have docstrings.\n\n Returns\n -------\n Function suitable for decorating the implementation of a NumPy function.\n\n """\n def decorator(implementation):\n if verify:\n if dispatcher is not None:\n verify_matching_signatures(implementation, dispatcher)\n else:\n # Using __code__ directly similar to verify_matching_signature\n co = implementation.__code__\n last_arg = co.co_argcount + co.co_kwonlyargcount - 1\n last_arg = co.co_varnames[last_arg]\n if last_arg != "like" or co.co_kwonlyargcount == 0:\n raise RuntimeError(\n "__array_function__ expects `like=` to be the last "\n "argument and a keyword-only argument. "\n f"{implementation} does not seem to comply.")\n\n if docs_from_dispatcher:\n add_docstring(implementation, dispatcher.__doc__)\n\n public_api = _ArrayFunctionDispatcher(dispatcher, implementation)\n public_api = functools.wraps(implementation)(public_api)\n\n if module is not None:\n public_api.__module__ = module\n\n ARRAY_FUNCTIONS.add(public_api)\n\n return public_api\n\n return decorator\n\n\ndef array_function_from_dispatcher(\n implementation, module=None, verify=True, docs_from_dispatcher=True):\n """Like array_function_dispatcher, but with function arguments flipped."""\n\n def decorator(dispatcher):\n return array_function_dispatch(\n dispatcher, module, verify=verify,\n docs_from_dispatcher=docs_from_dispatcher)(implementation)\n return decorator\n
.venv\Lib\site-packages\numpy\_core\overrides.py
overrides.py
Python
7,424
0.95
0.245902
0.013605
vue-tools
127
2024-03-22T00:09:31.761842
BSD-3-Clause
false
abcf7baa90078b1864c18cc7170aea96
from collections.abc import Callable, Iterable\nfrom typing import Any, Final, NamedTuple, ParamSpec, TypeVar\n\nfrom numpy._typing import _SupportsArrayFunc\n\n_T = TypeVar("_T")\n_Tss = ParamSpec("_Tss")\n_FuncT = TypeVar("_FuncT", bound=Callable[..., object])\n\n###\n\nARRAY_FUNCTIONS: set[Callable[..., Any]] = ...\narray_function_like_doc: Final[str] = ...\n\nclass ArgSpec(NamedTuple):\n args: list[str]\n varargs: str | None\n keywords: str | None\n defaults: tuple[Any, ...]\n\ndef get_array_function_like_doc(public_api: Callable[..., Any], docstring_template: str = "") -> str: ...\ndef finalize_array_function_like(public_api: _FuncT) -> _FuncT: ...\n\n#\ndef verify_matching_signatures(\n implementation: Callable[_Tss, object],\n dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]],\n) -> None: ...\n\n# NOTE: This actually returns a `_ArrayFunctionDispatcher` callable wrapper object, with\n# the original wrapped callable stored in the `._implementation` attribute. It checks\n# for any `__array_function__` of the values of specific arguments that the dispatcher\n# specifies. Since the dispatcher only returns an iterable of passed array-like args,\n# this overridable behaviour is impossible to annotate.\ndef array_function_dispatch(\n dispatcher: Callable[_Tss, Iterable[_SupportsArrayFunc]] | None = None,\n module: str | None = None,\n verify: bool = True,\n docs_from_dispatcher: bool = False,\n) -> Callable[[_FuncT], _FuncT]: ...\n\n#\ndef array_function_from_dispatcher(\n implementation: Callable[_Tss, _T],\n module: str | None = None,\n verify: bool = True,\n docs_from_dispatcher: bool = True,\n) -> Callable[[Callable[_Tss, Iterable[_SupportsArrayFunc]]], Callable[_Tss, _T]]: ...\n
.venv\Lib\site-packages\numpy\_core\overrides.pyi
overrides.pyi
Other
1,761
0.95
0.145833
0.205128
react-lib
546
2023-10-21T04:24:19.765541
BSD-3-Clause
false
5cc5cb08a0c9ba8a1e34500138adf8cf
"""\nStores and defines the low-level format_options context variable.\n\nThis is defined in its own file outside of the arrayprint module\nso we can import it from C while initializing the multiarray\nC module during import without introducing circular dependencies.\n"""\n\nimport sys\nfrom contextvars import ContextVar\n\n__all__ = ["format_options"]\n\ndefault_format_options_dict = {\n "edgeitems": 3, # repr N leading and trailing items of each dimension\n "threshold": 1000, # total items > triggers array summarization\n "floatmode": "maxprec",\n "precision": 8, # precision of floating point representations\n "suppress": False, # suppress printing small floating values in exp format\n "linewidth": 75,\n "nanstr": "nan",\n "infstr": "inf",\n "sign": "-",\n "formatter": None,\n # Internally stored as an int to simplify comparisons; converted from/to\n # str/False on the way in/out.\n 'legacy': sys.maxsize,\n 'override_repr': None,\n}\n\nformat_options = ContextVar(\n "format_options", default=default_format_options_dict)\n
.venv\Lib\site-packages\numpy\_core\printoptions.py
printoptions.py
Python
1,088
0.95
0.03125
0.074074
react-lib
465
2024-07-30T02:42:07.109882
MIT
false
4d91a211dbc14ffa5b97d3b83af10645
from collections.abc import Callable\nfrom contextvars import ContextVar\nfrom typing import Any, Final, TypedDict\n\nfrom .arrayprint import _FormatDict\n\n__all__ = ["format_options"]\n\n###\n\nclass _FormatOptionsDict(TypedDict):\n edgeitems: int\n threshold: int\n floatmode: str\n precision: int\n suppress: bool\n linewidth: int\n nanstr: str\n infstr: str\n sign: str\n formatter: _FormatDict | None\n legacy: int\n override_repr: Callable[[Any], str] | None\n\n###\n\ndefault_format_options_dict: Final[_FormatOptionsDict] = ...\nformat_options: ContextVar[_FormatOptionsDict]\n
.venv\Lib\site-packages\numpy\_core\printoptions.pyi
printoptions.pyi
Other
622
0.95
0.035714
0.090909
node-utils
411
2024-10-05T14:14:54.492386
BSD-3-Clause
false
3167fea396a6eedeb8b8b706a8867843
"""\nThis module contains a set of functions for record arrays.\n"""\nimport os\nimport warnings\nfrom collections import Counter\nfrom contextlib import nullcontext\n\nfrom numpy._utils import set_module\n\nfrom . import numeric as sb\nfrom . import numerictypes as nt\nfrom .arrayprint import _get_legacy_print_mode\n\n# All of the functions allow formats to be a dtype\n__all__ = [\n 'record', 'recarray', 'format_parser', 'fromarrays', 'fromrecords',\n 'fromstring', 'fromfile', 'array', 'find_duplicate',\n]\n\n\nndarray = sb.ndarray\n\n_byteorderconv = {'b': '>',\n 'l': '<',\n 'n': '=',\n 'B': '>',\n 'L': '<',\n 'N': '=',\n 'S': 's',\n 's': 's',\n '>': '>',\n '<': '<',\n '=': '=',\n '|': '|',\n 'I': '|',\n 'i': '|'}\n\n# formats regular expression\n# allows multidimensional spec with a tuple syntax in front\n# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '\n# are equally allowed\n\nnumfmt = nt.sctypeDict\n\n\n@set_module('numpy.rec')\ndef find_duplicate(list):\n """Find duplication in a list, return a list of duplicated elements"""\n return [\n item\n for item, counts in Counter(list).items()\n if counts > 1\n ]\n\n\n@set_module('numpy.rec')\nclass format_parser:\n """\n Class to convert formats, names, titles description to a dtype.\n\n After constructing the format_parser object, the dtype attribute is\n the converted data-type:\n ``dtype = format_parser(formats, names, titles).dtype``\n\n Attributes\n ----------\n dtype : dtype\n The converted data-type.\n\n Parameters\n ----------\n formats : str or list of str\n The format description, either specified as a string with\n comma-separated format descriptions in the form ``'f8, i4, S5'``, or\n a list of format description strings in the form\n ``['f8', 'i4', 'S5']``.\n names : str or list/tuple of str\n The field names, either specified as a comma-separated string in the\n form ``'col1, col2, col3'``, or as a list or tuple of strings in the\n form ``['col1', 'col2', 'col3']``.\n An empty list can be used, in that case default field names\n ('f0', 'f1', ...) are used.\n titles : sequence\n Sequence of title strings. An empty list can be used to leave titles\n out.\n aligned : bool, optional\n If True, align the fields by padding as the C-compiler would.\n Default is False.\n byteorder : str, optional\n If specified, all the fields will be changed to the\n provided byte-order. Otherwise, the default byte-order is\n used. For all available string specifiers, see `dtype.newbyteorder`.\n\n See Also\n --------\n numpy.dtype, numpy.typename\n\n Examples\n --------\n >>> import numpy as np\n >>> np.rec.format_parser(['<f8', '<i4'], ['col1', 'col2'],\n ... ['T1', 'T2']).dtype\n dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4')])\n\n `names` and/or `titles` can be empty lists. If `titles` is an empty list,\n titles will simply not appear. If `names` is empty, default field names\n will be used.\n\n >>> np.rec.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],\n ... []).dtype\n dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])\n >>> np.rec.format_parser(['<f8', '<i4', '<a5'], [], []).dtype\n dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])\n\n """\n\n def __init__(self, formats, names, titles, aligned=False, byteorder=None):\n self._parseFormats(formats, aligned)\n self._setfieldnames(names, titles)\n self._createdtype(byteorder)\n\n def _parseFormats(self, formats, aligned=False):\n """ Parse the field formats """\n\n if formats is None:\n raise ValueError("Need formats argument")\n if isinstance(formats, list):\n dtype = sb.dtype(\n [\n (f'f{i}', format_)\n for i, format_ in enumerate(formats)\n ],\n aligned,\n )\n else:\n dtype = sb.dtype(formats, aligned)\n fields = dtype.fields\n if fields is None:\n dtype = sb.dtype([('f1', dtype)], aligned)\n fields = dtype.fields\n keys = dtype.names\n self._f_formats = [fields[key][0] for key in keys]\n self._offsets = [fields[key][1] for key in keys]\n self._nfields = len(keys)\n\n def _setfieldnames(self, names, titles):\n """convert input field names into a list and assign to the _names\n attribute """\n\n if names:\n if type(names) in [list, tuple]:\n pass\n elif isinstance(names, str):\n names = names.split(',')\n else:\n raise NameError(f"illegal input names {repr(names)}")\n\n self._names = [n.strip() for n in names[:self._nfields]]\n else:\n self._names = []\n\n # if the names are not specified, they will be assigned as\n # "f0, f1, f2,..."\n # if not enough names are specified, they will be assigned as "f[n],\n # f[n+1],..." etc. where n is the number of specified names..."\n self._names += ['f%d' % i for i in range(len(self._names),\n self._nfields)]\n # check for redundant names\n _dup = find_duplicate(self._names)\n if _dup:\n raise ValueError(f"Duplicate field names: {_dup}")\n\n if titles:\n self._titles = [n.strip() for n in titles[:self._nfields]]\n else:\n self._titles = []\n titles = []\n\n if self._nfields > len(titles):\n self._titles += [None] * (self._nfields - len(titles))\n\n def _createdtype(self, byteorder):\n dtype = sb.dtype({\n 'names': self._names,\n 'formats': self._f_formats,\n 'offsets': self._offsets,\n 'titles': self._titles,\n })\n if byteorder is not None:\n byteorder = _byteorderconv[byteorder[0]]\n dtype = dtype.newbyteorder(byteorder)\n\n self.dtype = dtype\n\n\nclass record(nt.void):\n """A data-type scalar that allows field access as attribute lookup.\n """\n\n # manually set name and module so that this class's type shows up\n # as numpy.record when printed\n __name__ = 'record'\n __module__ = 'numpy'\n\n def __repr__(self):\n if _get_legacy_print_mode() <= 113:\n return self.__str__()\n return super().__repr__()\n\n def __str__(self):\n if _get_legacy_print_mode() <= 113:\n return str(self.item())\n return super().__str__()\n\n def __getattribute__(self, attr):\n if attr in ('setfield', 'getfield', 'dtype'):\n return nt.void.__getattribute__(self, attr)\n try:\n return nt.void.__getattribute__(self, attr)\n except AttributeError:\n pass\n fielddict = nt.void.__getattribute__(self, 'dtype').fields\n res = fielddict.get(attr, None)\n if res:\n obj = self.getfield(*res[:2])\n # if it has fields return a record,\n # otherwise return the object\n try:\n dt = obj.dtype\n except AttributeError:\n # happens if field is Object type\n return obj\n if dt.names is not None:\n return obj.view((self.__class__, obj.dtype))\n return obj\n else:\n raise AttributeError(f"'record' object has no attribute '{attr}'")\n\n def __setattr__(self, attr, val):\n if attr in ('setfield', 'getfield', 'dtype'):\n raise AttributeError(f"Cannot set '{attr}' attribute")\n fielddict = nt.void.__getattribute__(self, 'dtype').fields\n res = fielddict.get(attr, None)\n if res:\n return self.setfield(val, *res[:2])\n elif getattr(self, attr, None):\n return nt.void.__setattr__(self, attr, val)\n else:\n raise AttributeError(f"'record' object has no attribute '{attr}'")\n\n def __getitem__(self, indx):\n obj = nt.void.__getitem__(self, indx)\n\n # copy behavior of record.__getattribute__,\n if isinstance(obj, nt.void) and obj.dtype.names is not None:\n return obj.view((self.__class__, obj.dtype))\n else:\n # return a single element\n return obj\n\n def pprint(self):\n """Pretty-print all fields."""\n # pretty-print all fields\n names = self.dtype.names\n maxlen = max(len(name) for name in names)\n fmt = '%% %ds: %%s' % maxlen\n rows = [fmt % (name, getattr(self, name)) for name in names]\n return "\n".join(rows)\n\n# The recarray is almost identical to a standard array (which supports\n# named fields already) The biggest difference is that it can use\n# attribute-lookup to find the fields and it is constructed using\n# a record.\n\n# If byteorder is given it forces a particular byteorder on all\n# the fields (and any subfields)\n\n\n@set_module("numpy.rec")\nclass recarray(ndarray):\n """Construct an ndarray that allows field access using attributes.\n\n Arrays may have a data-types containing fields, analogous\n to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,\n where each entry in the array is a pair of ``(int, float)``. Normally,\n these attributes are accessed using dictionary lookups such as ``arr['x']``\n and ``arr['y']``. Record arrays allow the fields to be accessed as members\n of the array, using ``arr.x`` and ``arr.y``.\n\n Parameters\n ----------\n shape : tuple\n Shape of output array.\n dtype : data-type, optional\n The desired data-type. By default, the data-type is determined\n from `formats`, `names`, `titles`, `aligned` and `byteorder`.\n formats : list of data-types, optional\n A list containing the data-types for the different columns, e.g.\n ``['i4', 'f8', 'i4']``. `formats` does *not* support the new\n convention of using types directly, i.e. ``(int, float, int)``.\n Note that `formats` must be a list, not a tuple.\n Given that `formats` is somewhat limited, we recommend specifying\n `dtype` instead.\n names : tuple of str, optional\n The name of each column, e.g. ``('x', 'y', 'z')``.\n buf : buffer, optional\n By default, a new array is created of the given shape and data-type.\n If `buf` is specified and is an object exposing the buffer interface,\n the array will use the memory from the existing buffer. In this case,\n the `offset` and `strides` keywords are available.\n\n Other Parameters\n ----------------\n titles : tuple of str, optional\n Aliases for column names. For example, if `names` were\n ``('x', 'y', 'z')`` and `titles` is\n ``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then\n ``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.\n byteorder : {'<', '>', '='}, optional\n Byte-order for all fields.\n aligned : bool, optional\n Align the fields in memory as the C-compiler would.\n strides : tuple of ints, optional\n Buffer (`buf`) is interpreted according to these strides (strides\n define how many bytes each array element, row, column, etc.\n occupy in memory).\n offset : int, optional\n Start reading buffer (`buf`) from this offset onwards.\n order : {'C', 'F'}, optional\n Row-major (C-style) or column-major (Fortran-style) order.\n\n Returns\n -------\n rec : recarray\n Empty array of the given shape and type.\n\n See Also\n --------\n numpy.rec.fromrecords : Construct a record array from data.\n numpy.record : fundamental data-type for `recarray`.\n numpy.rec.format_parser : determine data-type from formats, names, titles.\n\n Notes\n -----\n This constructor can be compared to ``empty``: it creates a new record\n array but does not fill it with data. To create a record array from data,\n use one of the following methods:\n\n 1. Create a standard ndarray and convert it to a record array,\n using ``arr.view(np.recarray)``\n 2. Use the `buf` keyword.\n 3. Use `np.rec.fromrecords`.\n\n Examples\n --------\n Create an array with two fields, ``x`` and ``y``:\n\n >>> import numpy as np\n >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])\n >>> x\n array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])\n\n >>> x['x']\n array([1., 3.])\n\n View the array as a record array:\n\n >>> x = x.view(np.recarray)\n\n >>> x.x\n array([1., 3.])\n\n >>> x.y\n array([2, 4])\n\n Create a new, empty record array:\n\n >>> np.recarray((2,),\n ... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP\n rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),\n (3471280, 1.2134086255804012e-316, 0)],\n dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])\n\n """\n\n def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,\n formats=None, names=None, titles=None,\n byteorder=None, aligned=False, order='C'):\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(\n formats, names, titles, aligned, byteorder\n ).dtype\n\n if buf is None:\n self = ndarray.__new__(\n subtype, shape, (record, descr), order=order\n )\n else:\n self = ndarray.__new__(\n subtype, shape, (record, descr), buffer=buf,\n offset=offset, strides=strides, order=order\n )\n return self\n\n def __array_finalize__(self, obj):\n if self.dtype.type is not record and self.dtype.names is not None:\n # if self.dtype is not np.record, invoke __setattr__ which will\n # convert it to a record if it is a void dtype.\n self.dtype = self.dtype\n\n def __getattribute__(self, attr):\n # See if ndarray has this attr, and return it if so. (note that this\n # means a field with the same name as an ndarray attr cannot be\n # accessed by attribute).\n try:\n return object.__getattribute__(self, attr)\n except AttributeError: # attr must be a fieldname\n pass\n\n # look for a field with this name\n fielddict = ndarray.__getattribute__(self, 'dtype').fields\n try:\n res = fielddict[attr][:2]\n except (TypeError, KeyError) as e:\n raise AttributeError(f"recarray has no attribute {attr}") from e\n obj = self.getfield(*res)\n\n # At this point obj will always be a recarray, since (see\n # PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is\n # non-structured, convert it to an ndarray. Then if obj is structured\n # with void type convert it to the same dtype.type (eg to preserve\n # numpy.record type if present), since nested structured fields do not\n # inherit type. Don't do this for non-void structures though.\n if obj.dtype.names is not None:\n if issubclass(obj.dtype.type, nt.void):\n return obj.view(dtype=(self.dtype.type, obj.dtype))\n return obj\n else:\n return obj.view(ndarray)\n\n # Save the dictionary.\n # If the attr is a field name and not in the saved dictionary\n # Undo any "setting" of the attribute and do a setfield\n # Thus, you can't create attributes on-the-fly that are field names.\n def __setattr__(self, attr, val):\n\n # Automatically convert (void) structured types to records\n # (but not non-void structures, subarrays, or non-structured voids)\n if (\n attr == 'dtype' and\n issubclass(val.type, nt.void) and\n val.names is not None\n ):\n val = sb.dtype((record, val))\n\n newattr = attr not in self.__dict__\n try:\n ret = object.__setattr__(self, attr, val)\n except Exception:\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n if attr not in fielddict:\n raise\n else:\n fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}\n if attr not in fielddict:\n return ret\n if newattr:\n # We just added this one or this setattr worked on an\n # internal attribute.\n try:\n object.__delattr__(self, attr)\n except Exception:\n return ret\n try:\n res = fielddict[attr][:2]\n except (TypeError, KeyError) as e:\n raise AttributeError(\n f"record array has no attribute {attr}"\n ) from e\n return self.setfield(val, *res)\n\n def __getitem__(self, indx):\n obj = super().__getitem__(indx)\n\n # copy behavior of getattr, except that here\n # we might also be returning a single element\n if isinstance(obj, ndarray):\n if obj.dtype.names is not None:\n obj = obj.view(type(self))\n if issubclass(obj.dtype.type, nt.void):\n return obj.view(dtype=(self.dtype.type, obj.dtype))\n return obj\n else:\n return obj.view(type=ndarray)\n else:\n # return a single element\n return obj\n\n def __repr__(self):\n\n repr_dtype = self.dtype\n if (\n self.dtype.type is record or\n not issubclass(self.dtype.type, nt.void)\n ):\n # If this is a full record array (has numpy.record dtype),\n # or if it has a scalar (non-void) dtype with no records,\n # represent it using the rec.array function. Since rec.array\n # converts dtype to a numpy.record for us, convert back\n # to non-record before printing\n if repr_dtype.type is record:\n repr_dtype = sb.dtype((nt.void, repr_dtype))\n prefix = "rec.array("\n fmt = 'rec.array(%s,%sdtype=%s)'\n else:\n # otherwise represent it using np.array plus a view\n # This should only happen if the user is playing\n # strange games with dtypes.\n prefix = "array("\n fmt = 'array(%s,%sdtype=%s).view(numpy.recarray)'\n\n # get data/shape string. logic taken from numeric.array_repr\n if self.size > 0 or self.shape == (0,):\n lst = sb.array2string(\n self, separator=', ', prefix=prefix, suffix=',')\n else:\n # show zero-length shape unless it is (0,)\n lst = f"[], shape={repr(self.shape)}"\n\n lf = '\n' + ' ' * len(prefix)\n if _get_legacy_print_mode() <= 113:\n lf = ' ' + lf # trailing space\n return fmt % (lst, lf, repr_dtype)\n\n def field(self, attr, val=None):\n if isinstance(attr, int):\n names = ndarray.__getattribute__(self, 'dtype').names\n attr = names[attr]\n\n fielddict = ndarray.__getattribute__(self, 'dtype').fields\n\n res = fielddict[attr][:2]\n\n if val is None:\n obj = self.getfield(*res)\n if obj.dtype.names is not None:\n return obj\n return obj.view(ndarray)\n else:\n return self.setfield(val, *res)\n\n\ndef _deprecate_shape_0_as_None(shape):\n if shape == 0:\n warnings.warn(\n "Passing `shape=0` to have the shape be inferred is deprecated, "\n "and in future will be equivalent to `shape=(0,)`. To infer "\n "the shape and suppress this warning, pass `shape=None` instead.",\n FutureWarning, stacklevel=3)\n return None\n else:\n return shape\n\n\n@set_module("numpy.rec")\ndef fromarrays(arrayList, dtype=None, shape=None, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n """Create a record array from a (flat) list of arrays\n\n Parameters\n ----------\n arrayList : list or tuple\n List of array-like objects (such as lists, tuples,\n and ndarrays).\n dtype : data-type, optional\n valid dtype for all arrays\n shape : int or tuple of ints, optional\n Shape of the resulting array. If not provided, inferred from\n ``arrayList[0]``.\n formats, names, titles, aligned, byteorder :\n If `dtype` is ``None``, these arguments are passed to\n `numpy.rec.format_parser` to construct a dtype. See that function for\n detailed documentation.\n\n Returns\n -------\n np.recarray\n Record array consisting of given arrayList columns.\n\n Examples\n --------\n >>> x1=np.array([1,2,3,4])\n >>> x2=np.array(['a','dd','xyz','12'])\n >>> x3=np.array([1.1,2,3,4])\n >>> r = np.rec.fromarrays([x1,x2,x3],names='a,b,c')\n >>> print(r[1])\n (2, 'dd', 2.0) # may vary\n >>> x1[1]=34\n >>> r.a\n array([1, 2, 3, 4])\n\n >>> x1 = np.array([1, 2, 3, 4])\n >>> x2 = np.array(['a', 'dd', 'xyz', '12'])\n >>> x3 = np.array([1.1, 2, 3,4])\n >>> r = np.rec.fromarrays(\n ... [x1, x2, x3],\n ... dtype=np.dtype([('a', np.int32), ('b', 'S3'), ('c', np.float32)]))\n >>> r\n rec.array([(1, b'a', 1.1), (2, b'dd', 2. ), (3, b'xyz', 3. ),\n (4, b'12', 4. )],\n dtype=[('a', '<i4'), ('b', 'S3'), ('c', '<f4')])\n """\n\n arrayList = [sb.asarray(x) for x in arrayList]\n\n # NumPy 1.19.0, 2020-01-01\n shape = _deprecate_shape_0_as_None(shape)\n\n if shape is None:\n shape = arrayList[0].shape\n elif isinstance(shape, int):\n shape = (shape,)\n\n if formats is None and dtype is None:\n # go through each object in the list to see if it is an ndarray\n # and determine the formats.\n formats = [obj.dtype for obj in arrayList]\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder).dtype\n _names = descr.names\n\n # Determine shape from data-type.\n if len(descr) != len(arrayList):\n raise ValueError("mismatch between the number of fields "\n "and the number of arrays")\n\n d0 = descr[0].shape\n nn = len(d0)\n if nn > 0:\n shape = shape[:-nn]\n\n _array = recarray(shape, descr)\n\n # populate the record array (makes a copy)\n for k, obj in enumerate(arrayList):\n nn = descr[k].ndim\n testshape = obj.shape[:obj.ndim - nn]\n name = _names[k]\n if testshape != shape:\n raise ValueError(f'array-shape mismatch in array {k} ("{name}")')\n\n _array[name] = obj\n\n return _array\n\n\n@set_module("numpy.rec")\ndef fromrecords(recList, dtype=None, shape=None, formats=None, names=None,\n titles=None, aligned=False, byteorder=None):\n """Create a recarray from a list of records in text form.\n\n Parameters\n ----------\n recList : sequence\n data in the same field may be heterogeneous - they will be promoted\n to the highest data type.\n dtype : data-type, optional\n valid dtype for all arrays\n shape : int or tuple of ints, optional\n shape of each array.\n formats, names, titles, aligned, byteorder :\n If `dtype` is ``None``, these arguments are passed to\n `numpy.format_parser` to construct a dtype. See that function for\n detailed documentation.\n\n If both `formats` and `dtype` are None, then this will auto-detect\n formats. Use list of tuples rather than list of lists for faster\n processing.\n\n Returns\n -------\n np.recarray\n record array consisting of given recList rows.\n\n Examples\n --------\n >>> r=np.rec.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],\n ... names='col1,col2,col3')\n >>> print(r[0])\n (456, 'dbe', 1.2)\n >>> r.col1\n array([456, 2])\n >>> r.col2\n array(['dbe', 'de'], dtype='<U3')\n >>> import pickle\n >>> pickle.loads(pickle.dumps(r))\n rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],\n dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])\n """\n\n if formats is None and dtype is None: # slower\n obj = sb.array(recList, dtype=object)\n arrlist = [\n sb.array(obj[..., i].tolist()) for i in range(obj.shape[-1])\n ]\n return fromarrays(arrlist, formats=formats, shape=shape, names=names,\n titles=titles, aligned=aligned, byteorder=byteorder)\n\n if dtype is not None:\n descr = sb.dtype((record, dtype))\n else:\n descr = format_parser(\n formats, names, titles, aligned, byteorder\n ).dtype\n\n try:\n retval = sb.array(recList, dtype=descr)\n except (TypeError, ValueError):\n # NumPy 1.19.0, 2020-01-01\n shape = _deprecate_shape_0_as_None(shape)\n if shape is None:\n shape = len(recList)\n if isinstance(shape, int):\n shape = (shape,)\n if len(shape) > 1:\n raise ValueError("Can only deal with 1-d array.")\n _array = recarray(shape, descr)\n for k in range(_array.size):\n _array[k] = tuple(recList[k])\n # list of lists instead of list of tuples ?\n # 2018-02-07, 1.14.1\n warnings.warn(\n "fromrecords expected a list of tuples, may have received a list "\n "of lists instead. In the future that will raise an error",\n FutureWarning, stacklevel=2)\n return _array\n else:\n if shape is not None and retval.shape != shape:\n retval.shape = shape\n\n res = retval.view(recarray)\n\n return res\n\n\n@set_module("numpy.rec")\ndef fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n r"""Create a record array from binary data\n\n Note that despite the name of this function it does not accept `str`\n instances.\n\n Parameters\n ----------\n datastring : bytes-like\n Buffer of binary data\n dtype : data-type, optional\n Valid dtype for all arrays\n shape : int or tuple of ints, optional\n Shape of each array.\n offset : int, optional\n Position in the buffer to start reading from.\n formats, names, titles, aligned, byteorder :\n If `dtype` is ``None``, these arguments are passed to\n `numpy.format_parser` to construct a dtype. See that function for\n detailed documentation.\n\n\n Returns\n -------\n np.recarray\n Record array view into the data in datastring. This will be readonly\n if `datastring` is readonly.\n\n See Also\n --------\n numpy.frombuffer\n\n Examples\n --------\n >>> a = b'\x01\x02\x03abc'\n >>> np.rec.fromstring(a, dtype='u1,u1,u1,S3')\n rec.array([(1, 2, 3, b'abc')],\n dtype=[('f0', 'u1'), ('f1', 'u1'), ('f2', 'u1'), ('f3', 'S3')])\n\n >>> grades_dtype = [('Name', (np.str_, 10)), ('Marks', np.float64),\n ... ('GradeLevel', np.int32)]\n >>> grades_array = np.array([('Sam', 33.3, 3), ('Mike', 44.4, 5),\n ... ('Aadi', 66.6, 6)], dtype=grades_dtype)\n >>> np.rec.fromstring(grades_array.tobytes(), dtype=grades_dtype)\n rec.array([('Sam', 33.3, 3), ('Mike', 44.4, 5), ('Aadi', 66.6, 6)],\n dtype=[('Name', '<U10'), ('Marks', '<f8'), ('GradeLevel', '<i4')])\n\n >>> s = '\x01\x02\x03abc'\n >>> np.rec.fromstring(s, dtype='u1,u1,u1,S3')\n Traceback (most recent call last):\n ...\n TypeError: a bytes-like object is required, not 'str'\n """\n\n if dtype is None and formats is None:\n raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(formats, names, titles, aligned, byteorder).dtype\n\n itemsize = descr.itemsize\n\n # NumPy 1.19.0, 2020-01-01\n shape = _deprecate_shape_0_as_None(shape)\n\n if shape in (None, -1):\n shape = (len(datastring) - offset) // itemsize\n\n _array = recarray(shape, descr, buf=datastring, offset=offset)\n return _array\n\ndef get_remaining_size(fd):\n pos = fd.tell()\n try:\n fd.seek(0, 2)\n return fd.tell() - pos\n finally:\n fd.seek(pos, 0)\n\n\n@set_module("numpy.rec")\ndef fromfile(fd, dtype=None, shape=None, offset=0, formats=None,\n names=None, titles=None, aligned=False, byteorder=None):\n """Create an array from binary file data\n\n Parameters\n ----------\n fd : str or file type\n If file is a string or a path-like object then that file is opened,\n else it is assumed to be a file object. The file object must\n support random access (i.e. it must have tell and seek methods).\n dtype : data-type, optional\n valid dtype for all arrays\n shape : int or tuple of ints, optional\n shape of each array.\n offset : int, optional\n Position in the file to start reading from.\n formats, names, titles, aligned, byteorder :\n If `dtype` is ``None``, these arguments are passed to\n `numpy.format_parser` to construct a dtype. See that function for\n detailed documentation\n\n Returns\n -------\n np.recarray\n record array consisting of data enclosed in file.\n\n Examples\n --------\n >>> from tempfile import TemporaryFile\n >>> a = np.empty(10,dtype='f8,i4,a5')\n >>> a[5] = (0.5,10,'abcde')\n >>>\n >>> fd=TemporaryFile()\n >>> a = a.view(a.dtype.newbyteorder('<'))\n >>> a.tofile(fd)\n >>>\n >>> _ = fd.seek(0)\n >>> r=np.rec.fromfile(fd, formats='f8,i4,a5', shape=10,\n ... byteorder='<')\n >>> print(r[5])\n (0.5, 10, b'abcde')\n >>> r.shape\n (10,)\n """\n\n if dtype is None and formats is None:\n raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")\n\n # NumPy 1.19.0, 2020-01-01\n shape = _deprecate_shape_0_as_None(shape)\n\n if shape is None:\n shape = (-1,)\n elif isinstance(shape, int):\n shape = (shape,)\n\n if hasattr(fd, 'readinto'):\n # GH issue 2504. fd supports io.RawIOBase or io.BufferedIOBase\n # interface. Example of fd: gzip, BytesIO, BufferedReader\n # file already opened\n ctx = nullcontext(fd)\n else:\n # open file\n ctx = open(os.fspath(fd), 'rb')\n\n with ctx as fd:\n if offset > 0:\n fd.seek(offset, 1)\n size = get_remaining_size(fd)\n\n if dtype is not None:\n descr = sb.dtype(dtype)\n else:\n descr = format_parser(\n formats, names, titles, aligned, byteorder\n ).dtype\n\n itemsize = descr.itemsize\n\n shapeprod = sb.array(shape).prod(dtype=nt.intp)\n shapesize = shapeprod * itemsize\n if shapesize < 0:\n shape = list(shape)\n shape[shape.index(-1)] = size // -shapesize\n shape = tuple(shape)\n shapeprod = sb.array(shape).prod(dtype=nt.intp)\n\n nbytes = shapeprod * itemsize\n\n if nbytes > size:\n raise ValueError(\n "Not enough bytes left in file for specified "\n "shape and type."\n )\n\n # create the array\n _array = recarray(shape, descr)\n nbytesread = fd.readinto(_array.data)\n if nbytesread != nbytes:\n raise OSError("Didn't read as many bytes as expected")\n\n return _array\n\n\n@set_module("numpy.rec")\ndef array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,\n names=None, titles=None, aligned=False, byteorder=None, copy=True):\n """\n Construct a record array from a wide-variety of objects.\n\n A general-purpose record array constructor that dispatches to the\n appropriate `recarray` creation function based on the inputs (see Notes).\n\n Parameters\n ----------\n obj : any\n Input object. See Notes for details on how various input types are\n treated.\n dtype : data-type, optional\n Valid dtype for array.\n shape : int or tuple of ints, optional\n Shape of each array.\n offset : int, optional\n Position in the file or buffer to start reading from.\n strides : tuple of ints, optional\n Buffer (`buf`) is interpreted according to these strides (strides\n define how many bytes each array element, row, column, etc.\n occupy in memory).\n formats, names, titles, aligned, byteorder :\n If `dtype` is ``None``, these arguments are passed to\n `numpy.format_parser` to construct a dtype. See that function for\n detailed documentation.\n copy : bool, optional\n Whether to copy the input object (True), or to use a reference instead.\n This option only applies when the input is an ndarray or recarray.\n Defaults to True.\n\n Returns\n -------\n np.recarray\n Record array created from the specified object.\n\n Notes\n -----\n If `obj` is ``None``, then call the `~numpy.recarray` constructor. If\n `obj` is a string, then call the `fromstring` constructor. If `obj` is a\n list or a tuple, then if the first object is an `~numpy.ndarray`, call\n `fromarrays`, otherwise call `fromrecords`. If `obj` is a\n `~numpy.recarray`, then make a copy of the data in the recarray\n (if ``copy=True``) and use the new formats, names, and titles. If `obj`\n is a file, then call `fromfile`. Finally, if obj is an `ndarray`, then\n return ``obj.view(recarray)``, making a copy of the data if ``copy=True``.\n\n Examples\n --------\n >>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> a\n array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]])\n\n >>> np.rec.array(a)\n rec.array([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]],\n dtype=int64)\n\n >>> b = [(1, 1), (2, 4), (3, 9)]\n >>> c = np.rec.array(b, formats = ['i2', 'f2'], names = ('x', 'y'))\n >>> c\n rec.array([(1, 1.), (2, 4.), (3, 9.)],\n dtype=[('x', '<i2'), ('y', '<f2')])\n\n >>> c.x\n array([1, 2, 3], dtype=int16)\n\n >>> c.y\n array([1., 4., 9.], dtype=float16)\n\n >>> r = np.rec.array(['abc','def'], names=['col1','col2'])\n >>> print(r.col1)\n abc\n\n >>> r.col1\n array('abc', dtype='<U3')\n\n >>> r.col2\n array('def', dtype='<U3')\n """\n\n if ((isinstance(obj, (type(None), str)) or hasattr(obj, 'readinto')) and\n formats is None and dtype is None):\n raise ValueError("Must define formats (or dtype) if object is "\n "None, string, or an open file")\n\n kwds = {}\n if dtype is not None:\n dtype = sb.dtype(dtype)\n elif formats is not None:\n dtype = format_parser(formats, names, titles,\n aligned, byteorder).dtype\n else:\n kwds = {'formats': formats,\n 'names': names,\n 'titles': titles,\n 'aligned': aligned,\n 'byteorder': byteorder\n }\n\n if obj is None:\n if shape is None:\n raise ValueError("Must define a shape if obj is None")\n return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)\n\n elif isinstance(obj, bytes):\n return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)\n\n elif isinstance(obj, (list, tuple)):\n if isinstance(obj[0], (tuple, list)):\n return fromrecords(obj, dtype=dtype, shape=shape, **kwds)\n else:\n return fromarrays(obj, dtype=dtype, shape=shape, **kwds)\n\n elif isinstance(obj, recarray):\n if dtype is not None and (obj.dtype != dtype):\n new = obj.view(dtype)\n else:\n new = obj\n if copy:\n new = new.copy()\n return new\n\n elif hasattr(obj, 'readinto'):\n return fromfile(obj, dtype=dtype, shape=shape, offset=offset)\n\n elif isinstance(obj, ndarray):\n if dtype is not None and (obj.dtype != dtype):\n new = obj.view(dtype)\n else:\n new = obj\n if copy:\n new = new.copy()\n return new.view(recarray)\n\n else:\n interface = getattr(obj, "__array_interface__", None)\n if interface is None or not isinstance(interface, dict):\n raise ValueError("Unknown input type")\n obj = sb.array(obj)\n if dtype is not None and (obj.dtype != dtype):\n obj = obj.view(dtype)\n return obj.view(recarray)\n
.venv\Lib\site-packages\numpy\_core\records.py
records.py
Python
37,856
0.95
0.163453
0.078261
python-kit
972
2024-01-05T13:03:03.670778
GPL-3.0
false
c4fe451f78cc42c1990de6ec0f5b7e89
# ruff: noqa: ANN401\n# pyright: reportSelfClsParameterName=false\nfrom collections.abc import Iterable, Sequence\nfrom typing import (\n Any,\n ClassVar,\n Literal,\n Protocol,\n SupportsIndex,\n TypeAlias,\n overload,\n type_check_only,\n)\n\nfrom _typeshed import StrOrBytesPath\nfrom typing_extensions import TypeVar\n\nimport numpy as np\nfrom numpy import _ByteOrder, _OrderKACF, _SupportsBuffer\nfrom numpy._typing import (\n ArrayLike,\n DTypeLike,\n NDArray,\n _AnyShape,\n _ArrayLikeVoid_co,\n _NestedSequence,\n _Shape,\n _ShapeLike,\n)\n\n__all__ = [\n "array",\n "find_duplicate",\n "format_parser",\n "fromarrays",\n "fromfile",\n "fromrecords",\n "fromstring",\n "recarray",\n "record",\n]\n\n_T = TypeVar("_T")\n_ScalarT = TypeVar("_ScalarT", bound=np.generic)\n_DTypeT_co = TypeVar("_DTypeT_co", bound=np.dtype, default=np.dtype, covariant=True)\n_ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)\n\n_RecArray: TypeAlias = recarray[_AnyShape, np.dtype[_ScalarT]]\n\n@type_check_only\nclass _SupportsReadInto(Protocol):\n def seek(self, offset: int, whence: int, /) -> object: ...\n def tell(self, /) -> int: ...\n def readinto(self, buffer: memoryview, /) -> int: ...\n\n###\n\n# exported in `numpy.rec`\nclass record(np.void):\n def __getattribute__(self, attr: str) -> Any: ...\n def __setattr__(self, attr: str, val: ArrayLike) -> None: ...\n def pprint(self) -> str: ...\n @overload\n def __getitem__(self, key: str | SupportsIndex) -> Any: ...\n @overload\n def __getitem__(self, key: list[str]) -> record: ...\n\n# exported in `numpy.rec`\nclass recarray(np.ndarray[_ShapeT_co, _DTypeT_co]):\n __name__: ClassVar[Literal["record"]] = "record"\n __module__: Literal["numpy"] = "numpy"\n @overload\n def __new__(\n subtype,\n shape: _ShapeLike,\n dtype: None = None,\n buf: _SupportsBuffer | None = None,\n offset: SupportsIndex = 0,\n strides: _ShapeLike | None = None,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n byteorder: _ByteOrder | None = None,\n aligned: bool = False,\n order: _OrderKACF = "C",\n ) -> _RecArray[record]: ...\n @overload\n def __new__(\n subtype,\n shape: _ShapeLike,\n dtype: DTypeLike,\n buf: _SupportsBuffer | None = None,\n offset: SupportsIndex = 0,\n strides: _ShapeLike | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n byteorder: None = None,\n aligned: Literal[False] = False,\n order: _OrderKACF = "C",\n ) -> _RecArray[Any]: ...\n def __array_finalize__(self, /, obj: object) -> None: ...\n def __getattribute__(self, attr: str, /) -> Any: ...\n def __setattr__(self, attr: str, val: ArrayLike, /) -> None: ...\n\n #\n @overload\n def field(self, /, attr: int | str, val: ArrayLike) -> None: ...\n @overload\n def field(self, /, attr: int | str, val: None = None) -> Any: ...\n\n# exported in `numpy.rec`\nclass format_parser:\n dtype: np.dtype[np.void]\n def __init__(\n self,\n /,\n formats: DTypeLike,\n names: str | Sequence[str] | None,\n titles: str | Sequence[str] | None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n ) -> None: ...\n\n# exported in `numpy.rec`\n@overload\ndef fromarrays(\n arrayList: Iterable[ArrayLike],\n dtype: DTypeLike | None = None,\n shape: _ShapeLike | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n) -> _RecArray[Any]: ...\n@overload\ndef fromarrays(\n arrayList: Iterable[ArrayLike],\n dtype: None = None,\n shape: _ShapeLike | None = None,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n) -> _RecArray[record]: ...\n\n@overload\ndef fromrecords(\n recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]],\n dtype: DTypeLike | None = None,\n shape: _ShapeLike | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n) -> _RecArray[record]: ...\n@overload\ndef fromrecords(\n recList: _ArrayLikeVoid_co | tuple[object, ...] | _NestedSequence[tuple[object, ...]],\n dtype: None = None,\n shape: _ShapeLike | None = None,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n) -> _RecArray[record]: ...\n\n# exported in `numpy.rec`\n@overload\ndef fromstring(\n datastring: _SupportsBuffer,\n dtype: DTypeLike,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n) -> _RecArray[record]: ...\n@overload\ndef fromstring(\n datastring: _SupportsBuffer,\n dtype: None = None,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n) -> _RecArray[record]: ...\n\n# exported in `numpy.rec`\n@overload\ndef fromfile(\n fd: StrOrBytesPath | _SupportsReadInto,\n dtype: DTypeLike,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n) -> _RecArray[Any]: ...\n@overload\ndef fromfile(\n fd: StrOrBytesPath | _SupportsReadInto,\n dtype: None = None,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n) -> _RecArray[record]: ...\n\n# exported in `numpy.rec`\n@overload\ndef array(\n obj: _ScalarT | NDArray[_ScalarT],\n dtype: None = None,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n copy: bool = True,\n) -> _RecArray[_ScalarT]: ...\n@overload\ndef array(\n obj: ArrayLike,\n dtype: DTypeLike,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n copy: bool = True,\n) -> _RecArray[Any]: ...\n@overload\ndef array(\n obj: ArrayLike,\n dtype: None = None,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n copy: bool = True,\n) -> _RecArray[record]: ...\n@overload\ndef array(\n obj: None,\n dtype: DTypeLike,\n shape: _ShapeLike,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n copy: bool = True,\n) -> _RecArray[Any]: ...\n@overload\ndef array(\n obj: None,\n dtype: None = None,\n *,\n shape: _ShapeLike,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n copy: bool = True,\n) -> _RecArray[record]: ...\n@overload\ndef array(\n obj: _SupportsReadInto,\n dtype: DTypeLike,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n formats: None = None,\n names: None = None,\n titles: None = None,\n aligned: bool = False,\n byteorder: None = None,\n copy: bool = True,\n) -> _RecArray[Any]: ...\n@overload\ndef array(\n obj: _SupportsReadInto,\n dtype: None = None,\n shape: _ShapeLike | None = None,\n offset: int = 0,\n strides: tuple[int, ...] | None = None,\n *,\n formats: DTypeLike,\n names: str | Sequence[str] | None = None,\n titles: str | Sequence[str] | None = None,\n aligned: bool = False,\n byteorder: _ByteOrder | None = None,\n copy: bool = True,\n) -> _RecArray[record]: ...\n\n# exported in `numpy.rec`\ndef find_duplicate(list: Iterable[_T]) -> list[_T]: ...\n
.venv\Lib\site-packages\numpy\_core\records.pyi
records.pyi
Other
9,268
0.95
0.108108
0.063291
awesome-app
314
2024-04-13T13:33:10.437196
BSD-3-Clause
false
bdcc9d3ad9713276da7f13fb9804aed3
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',\n 'stack', 'unstack', 'vstack']\n\nimport functools\nimport itertools\nimport operator\n\nfrom . import fromnumeric as _from_nx\nfrom . import numeric as _nx\nfrom . import overrides\nfrom .multiarray import array, asanyarray, normalize_axis_index\n\narray_function_dispatch = functools.partial(\n overrides.array_function_dispatch, module='numpy')\n\n\ndef _atleast_1d_dispatcher(*arys):\n return arys\n\n\n@array_function_dispatch(_atleast_1d_dispatcher)\ndef atleast_1d(*arys):\n """\n Convert inputs to arrays with at least one dimension.\n\n Scalar inputs are converted to 1-dimensional arrays, whilst\n higher-dimensional inputs are preserved.\n\n Parameters\n ----------\n arys1, arys2, ... : array_like\n One or more input arrays.\n\n Returns\n -------\n ret : ndarray\n An array, or tuple of arrays, each with ``a.ndim >= 1``.\n Copies are made only if necessary.\n\n See Also\n --------\n atleast_2d, atleast_3d\n\n Examples\n --------\n >>> import numpy as np\n >>> np.atleast_1d(1.0)\n array([1.])\n\n >>> x = np.arange(9.0).reshape(3,3)\n >>> np.atleast_1d(x)\n array([[0., 1., 2.],\n [3., 4., 5.],\n [6., 7., 8.]])\n >>> np.atleast_1d(x) is x\n True\n\n >>> np.atleast_1d(1, [3, 4])\n (array([1]), array([3, 4]))\n\n """\n if len(arys) == 1:\n result = asanyarray(arys[0])\n if result.ndim == 0:\n result = result.reshape(1)\n return result\n res = []\n for ary in arys:\n result = asanyarray(ary)\n if result.ndim == 0:\n result = result.reshape(1)\n res.append(result)\n return tuple(res)\n\n\ndef _atleast_2d_dispatcher(*arys):\n return arys\n\n\n@array_function_dispatch(_atleast_2d_dispatcher)\ndef atleast_2d(*arys):\n """\n View inputs as arrays with at least two dimensions.\n\n Parameters\n ----------\n arys1, arys2, ... : array_like\n One or more array-like sequences. Non-array inputs are converted\n to arrays. Arrays that already have two or more dimensions are\n preserved.\n\n Returns\n -------\n res, res2, ... : ndarray\n An array, or tuple of arrays, each with ``a.ndim >= 2``.\n Copies are avoided where possible, and views with two or more\n dimensions are returned.\n\n See Also\n --------\n atleast_1d, atleast_3d\n\n Examples\n --------\n >>> import numpy as np\n >>> np.atleast_2d(3.0)\n array([[3.]])\n\n >>> x = np.arange(3.0)\n >>> np.atleast_2d(x)\n array([[0., 1., 2.]])\n >>> np.atleast_2d(x).base is x\n True\n\n >>> np.atleast_2d(1, [1, 2], [[1, 2]])\n (array([[1]]), array([[1, 2]]), array([[1, 2]]))\n\n """\n res = []\n for ary in arys:\n ary = asanyarray(ary)\n if ary.ndim == 0:\n result = ary.reshape(1, 1)\n elif ary.ndim == 1:\n result = ary[_nx.newaxis, :]\n else:\n result = ary\n res.append(result)\n if len(res) == 1:\n return res[0]\n else:\n return tuple(res)\n\n\ndef _atleast_3d_dispatcher(*arys):\n return arys\n\n\n@array_function_dispatch(_atleast_3d_dispatcher)\ndef atleast_3d(*arys):\n """\n View inputs as arrays with at least three dimensions.\n\n Parameters\n ----------\n arys1, arys2, ... : array_like\n One or more array-like sequences. Non-array inputs are converted to\n arrays. Arrays that already have three or more dimensions are\n preserved.\n\n Returns\n -------\n res1, res2, ... : ndarray\n An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are\n avoided where possible, and views with three or more dimensions are\n returned. For example, a 1-D array of shape ``(N,)`` becomes a view\n of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a\n view of shape ``(M, N, 1)``.\n\n See Also\n --------\n atleast_1d, atleast_2d\n\n Examples\n --------\n >>> import numpy as np\n >>> np.atleast_3d(3.0)\n array([[[3.]]])\n\n >>> x = np.arange(3.0)\n >>> np.atleast_3d(x).shape\n (1, 3, 1)\n\n >>> x = np.arange(12.0).reshape(4,3)\n >>> np.atleast_3d(x).shape\n (4, 3, 1)\n >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself\n True\n\n >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):\n ... print(arr, arr.shape) # doctest: +SKIP\n ...\n [[[1]\n [2]]] (1, 2, 1)\n [[[1]\n [2]]] (1, 2, 1)\n [[[1 2]]] (1, 1, 2)\n\n """\n res = []\n for ary in arys:\n ary = asanyarray(ary)\n if ary.ndim == 0:\n result = ary.reshape(1, 1, 1)\n elif ary.ndim == 1:\n result = ary[_nx.newaxis, :, _nx.newaxis]\n elif ary.ndim == 2:\n result = ary[:, :, _nx.newaxis]\n else:\n result = ary\n res.append(result)\n if len(res) == 1:\n return res[0]\n else:\n return tuple(res)\n\n\ndef _arrays_for_stack_dispatcher(arrays):\n if not hasattr(arrays, "__getitem__"):\n raise TypeError('arrays to stack must be passed as a "sequence" type '\n 'such as list or tuple.')\n\n return tuple(arrays)\n\n\ndef _vhstack_dispatcher(tup, *, dtype=None, casting=None):\n return _arrays_for_stack_dispatcher(tup)\n\n\n@array_function_dispatch(_vhstack_dispatcher)\ndef vstack(tup, *, dtype=None, casting="same_kind"):\n """\n Stack arrays in sequence vertically (row wise).\n\n This is equivalent to concatenation along the first axis after 1-D arrays\n of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by\n `vsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the first axis.\n 1-D arrays must have the same length. In the case of a single\n array_like input, it will be treated as a sequence of arrays; i.e.,\n each element along the zeroth axis is treated as a separate array.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays, will be at least 2-D.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n hstack : Stack arrays in sequence horizontally (column wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n vsplit : Split an array into multiple sub-arrays vertically (row-wise).\n unstack : Split an array into a tuple of sub-arrays along an axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.vstack((a,b))\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> a = np.array([[1], [2], [3]])\n >>> b = np.array([[4], [5], [6]])\n >>> np.vstack((a,b))\n array([[1],\n [2],\n [3],\n [4],\n [5],\n [6]])\n\n """\n arrs = atleast_2d(*tup)\n if not isinstance(arrs, tuple):\n arrs = (arrs,)\n return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)\n\n\n@array_function_dispatch(_vhstack_dispatcher)\ndef hstack(tup, *, dtype=None, casting="same_kind"):\n """\n Stack arrays in sequence horizontally (column wise).\n\n This is equivalent to concatenation along the second axis, except for 1-D\n arrays where it concatenates along the first axis. Rebuilds arrays divided\n by `hsplit`.\n\n This function makes most sense for arrays with up to 3 dimensions. For\n instance, for pixel-data with a height (first axis), width (second axis),\n and r/g/b channels (third axis). The functions `concatenate`, `stack` and\n `block` provide more general stacking and concatenation operations.\n\n Parameters\n ----------\n tup : sequence of ndarrays\n The arrays must have the same shape along all but the second axis,\n except 1-D arrays which can be any length. In the case of a single\n array_like input, it will be treated as a sequence of arrays; i.e.,\n each element along the zeroth axis is treated as a separate array.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n Returns\n -------\n stacked : ndarray\n The array formed by stacking the given arrays.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n block : Assemble an nd-array from nested lists of blocks.\n vstack : Stack arrays in sequence vertically (row wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n hsplit : Split an array into multiple sub-arrays\n horizontally (column-wise).\n unstack : Split an array into a tuple of sub-arrays along an axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array((1,2,3))\n >>> b = np.array((4,5,6))\n >>> np.hstack((a,b))\n array([1, 2, 3, 4, 5, 6])\n >>> a = np.array([[1],[2],[3]])\n >>> b = np.array([[4],[5],[6]])\n >>> np.hstack((a,b))\n array([[1, 4],\n [2, 5],\n [3, 6]])\n\n """\n arrs = atleast_1d(*tup)\n if not isinstance(arrs, tuple):\n arrs = (arrs,)\n # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"\n if arrs and arrs[0].ndim == 1:\n return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)\n else:\n return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)\n\n\ndef _stack_dispatcher(arrays, axis=None, out=None, *,\n dtype=None, casting=None):\n arrays = _arrays_for_stack_dispatcher(arrays)\n if out is not None:\n # optimize for the typical case where only arrays is provided\n arrays = list(arrays)\n arrays.append(out)\n return arrays\n\n\n@array_function_dispatch(_stack_dispatcher)\ndef stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):\n """\n Join a sequence of arrays along a new axis.\n\n The ``axis`` parameter specifies the index of the new axis in the\n dimensions of the result. For example, if ``axis=0`` it will be the first\n dimension and if ``axis=-1`` it will be the last dimension.\n\n Parameters\n ----------\n arrays : sequence of ndarrays\n Each array must have the same shape. In the case of a single ndarray\n array_like input, it will be treated as a sequence of arrays; i.e.,\n each element along the zeroth axis is treated as a separate array.\n\n axis : int, optional\n The axis in the result array along which the input arrays are stacked.\n\n out : ndarray, optional\n If provided, the destination to place the result. The shape must be\n correct, matching that of what stack would have returned if no\n out argument were specified.\n\n dtype : str or dtype\n If provided, the destination array will have this dtype. Cannot be\n provided together with `out`.\n\n .. versionadded:: 1.24\n\n casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional\n Controls what kind of data casting may occur. Defaults to 'same_kind'.\n\n .. versionadded:: 1.24\n\n\n Returns\n -------\n stacked : ndarray\n The stacked array has one more dimension than the input arrays.\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n block : Assemble an nd-array from nested lists of blocks.\n split : Split array into a list of multiple sub-arrays of equal size.\n unstack : Split an array into a tuple of sub-arrays along an axis.\n\n Examples\n --------\n >>> import numpy as np\n >>> rng = np.random.default_rng()\n >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)]\n >>> np.stack(arrays, axis=0).shape\n (10, 3, 4)\n\n >>> np.stack(arrays, axis=1).shape\n (3, 10, 4)\n\n >>> np.stack(arrays, axis=2).shape\n (3, 4, 10)\n\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.stack((a, b))\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> np.stack((a, b), axis=-1)\n array([[1, 4],\n [2, 5],\n [3, 6]])\n\n """\n arrays = [asanyarray(arr) for arr in arrays]\n if not arrays:\n raise ValueError('need at least one array to stack')\n\n shapes = {arr.shape for arr in arrays}\n if len(shapes) != 1:\n raise ValueError('all input arrays must have the same shape')\n\n result_ndim = arrays[0].ndim + 1\n axis = normalize_axis_index(axis, result_ndim)\n\n sl = (slice(None),) * axis + (_nx.newaxis,)\n expanded_arrays = [arr[sl] for arr in arrays]\n return _nx.concatenate(expanded_arrays, axis=axis, out=out,\n dtype=dtype, casting=casting)\n\ndef _unstack_dispatcher(x, /, *, axis=None):\n return (x,)\n\n@array_function_dispatch(_unstack_dispatcher)\ndef unstack(x, /, *, axis=0):\n """\n Split an array into a sequence of arrays along the given axis.\n\n The ``axis`` parameter specifies the dimension along which the array will\n be split. For example, if ``axis=0`` (the default) it will be the first\n dimension and if ``axis=-1`` it will be the last dimension.\n\n The result is a tuple of arrays split along ``axis``.\n\n .. versionadded:: 2.1.0\n\n Parameters\n ----------\n x : ndarray\n The array to be unstacked.\n axis : int, optional\n Axis along which the array will be split. Default: ``0``.\n\n Returns\n -------\n unstacked : tuple of ndarrays\n The unstacked arrays.\n\n See Also\n --------\n stack : Join a sequence of arrays along a new axis.\n concatenate : Join a sequence of arrays along an existing axis.\n block : Assemble an nd-array from nested lists of blocks.\n split : Split array into a list of multiple sub-arrays of equal size.\n\n Notes\n -----\n ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e.,\n ``stack(unstack(x, axis=axis), axis=axis) == x``.\n\n This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since\n iterating on an array iterates along the first axis.\n\n Examples\n --------\n >>> arr = np.arange(24).reshape((2, 3, 4))\n >>> np.unstack(arr)\n (array([[ 0, 1, 2, 3],\n [ 4, 5, 6, 7],\n [ 8, 9, 10, 11]]),\n array([[12, 13, 14, 15],\n [16, 17, 18, 19],\n [20, 21, 22, 23]]))\n >>> np.unstack(arr, axis=1)\n (array([[ 0, 1, 2, 3],\n [12, 13, 14, 15]]),\n array([[ 4, 5, 6, 7],\n [16, 17, 18, 19]]),\n array([[ 8, 9, 10, 11],\n [20, 21, 22, 23]]))\n >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1)\n >>> arr2.shape\n (2, 3, 4)\n >>> np.all(arr == arr2)\n np.True_\n\n """\n if x.ndim == 0:\n raise ValueError("Input array must be at least 1-d.")\n return tuple(_nx.moveaxis(x, axis, 0))\n\n\n# Internal functions to eliminate the overhead of repeated dispatch in one of\n# the two possible paths inside np.block.\n# Use getattr to protect against __array_function__ being disabled.\n_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)\n_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)\n_concatenate = getattr(_from_nx.concatenate,\n '__wrapped__', _from_nx.concatenate)\n\n\ndef _block_format_index(index):\n """\n Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.\n """\n idx_str = ''.join(f'[{i}]' for i in index if i is not None)\n return 'arrays' + idx_str\n\n\ndef _block_check_depths_match(arrays, parent_index=[]):\n """\n Recursive function checking that the depths of nested lists in `arrays`\n all match. Mismatch raises a ValueError as described in the block\n docstring below.\n\n The entire index (rather than just the depth) needs to be calculated\n for each innermost list, in case an error needs to be raised, so that\n the index of the offending list can be printed as part of the error.\n\n Parameters\n ----------\n arrays : nested list of arrays\n The arrays to check\n parent_index : list of int\n The full index of `arrays` within the nested lists passed to\n `_block_check_depths_match` at the top of the recursion.\n\n Returns\n -------\n first_index : list of int\n The full index of an element from the bottom of the nesting in\n `arrays`. If any element at the bottom is an empty list, this will\n refer to it, and the last index along the empty axis will be None.\n max_arr_ndim : int\n The maximum of the ndims of the arrays nested in `arrays`.\n final_size: int\n The number of elements in the final array. This is used the motivate\n the choice of algorithm used using benchmarking wisdom.\n\n """\n if isinstance(arrays, tuple):\n # not strictly necessary, but saves us from:\n # - more than one way to do things - no point treating tuples like\n # lists\n # - horribly confusing behaviour that results when tuples are\n # treated like ndarray\n raise TypeError(\n f'{_block_format_index(parent_index)} is a tuple. '\n 'Only lists can be used to arrange blocks, and np.block does '\n 'not allow implicit conversion from tuple to ndarray.'\n )\n elif isinstance(arrays, list) and len(arrays) > 0:\n idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])\n for i, arr in enumerate(arrays))\n\n first_index, max_arr_ndim, final_size = next(idxs_ndims)\n for index, ndim, size in idxs_ndims:\n final_size += size\n if ndim > max_arr_ndim:\n max_arr_ndim = ndim\n if len(index) != len(first_index):\n raise ValueError(\n "List depths are mismatched. First element was at "\n f"depth {len(first_index)}, but there is an element at "\n f"depth {len(index)} ({_block_format_index(index)})"\n )\n # propagate our flag that indicates an empty list at the bottom\n if index[-1] is None:\n first_index = index\n\n return first_index, max_arr_ndim, final_size\n elif isinstance(arrays, list) and len(arrays) == 0:\n # We've 'bottomed out' on an empty list\n return parent_index + [None], 0, 0\n else:\n # We've 'bottomed out' - arrays is either a scalar or an array\n size = _size(arrays)\n return parent_index, _ndim(arrays), size\n\n\ndef _atleast_nd(a, ndim):\n # Ensures `a` has at least `ndim` dimensions by prepending\n # ones to `a.shape` as necessary\n return array(a, ndmin=ndim, copy=None, subok=True)\n\n\ndef _accumulate(values):\n return list(itertools.accumulate(values))\n\n\ndef _concatenate_shapes(shapes, axis):\n """Given array shapes, return the resulting shape and slices prefixes.\n\n These help in nested concatenation.\n\n Returns\n -------\n shape: tuple of int\n This tuple satisfies::\n\n shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)\n shape == concatenate(arrs, axis).shape\n\n slice_prefixes: tuple of (slice(start, end), )\n For a list of arrays being concatenated, this returns the slice\n in the larger array at axis that needs to be sliced into.\n\n For example, the following holds::\n\n ret = concatenate([a, b, c], axis)\n _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)\n\n ret[(slice(None),) * axis + sl_a] == a\n ret[(slice(None),) * axis + sl_b] == b\n ret[(slice(None),) * axis + sl_c] == c\n\n These are called slice prefixes since they are used in the recursive\n blocking algorithm to compute the left-most slices during the\n recursion. Therefore, they must be prepended to rest of the slice\n that was computed deeper in the recursion.\n\n These are returned as tuples to ensure that they can quickly be added\n to existing slice tuple without creating a new tuple every time.\n\n """\n # Cache a result that will be reused.\n shape_at_axis = [shape[axis] for shape in shapes]\n\n # Take a shape, any shape\n first_shape = shapes[0]\n first_shape_pre = first_shape[:axis]\n first_shape_post = first_shape[axis + 1:]\n\n if any(shape[:axis] != first_shape_pre or\n shape[axis + 1:] != first_shape_post for shape in shapes):\n raise ValueError(\n f'Mismatched array shapes in block along axis {axis}.')\n\n shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis + 1:])\n\n offsets_at_axis = _accumulate(shape_at_axis)\n slice_prefixes = [(slice(start, end),)\n for start, end in zip([0] + offsets_at_axis,\n offsets_at_axis)]\n return shape, slice_prefixes\n\n\ndef _block_info_recursion(arrays, max_depth, result_ndim, depth=0):\n """\n Returns the shape of the final array, along with a list\n of slices and a list of arrays that can be used for assignment inside the\n new array\n\n Parameters\n ----------\n arrays : nested list of arrays\n The arrays to check\n max_depth : list of int\n The number of nested lists\n result_ndim : int\n The number of dimensions in thefinal array.\n\n Returns\n -------\n shape : tuple of int\n The shape that the final array will take on.\n slices: list of tuple of slices\n The slices into the full array required for assignment. These are\n required to be prepended with ``(Ellipsis, )`` to obtain to correct\n final index.\n arrays: list of ndarray\n The data to assign to each slice of the full array\n\n """\n if depth < max_depth:\n shapes, slices, arrays = zip(\n *[_block_info_recursion(arr, max_depth, result_ndim, depth + 1)\n for arr in arrays])\n\n axis = result_ndim - max_depth + depth\n shape, slice_prefixes = _concatenate_shapes(shapes, axis)\n\n # Prepend the slice prefix and flatten the slices\n slices = [slice_prefix + the_slice\n for slice_prefix, inner_slices in zip(slice_prefixes, slices)\n for the_slice in inner_slices]\n\n # Flatten the array list\n arrays = functools.reduce(operator.add, arrays)\n\n return shape, slices, arrays\n else:\n # We've 'bottomed out' - arrays is either a scalar or an array\n # type(arrays) is not list\n # Return the slice and the array inside a list to be consistent with\n # the recursive case.\n arr = _atleast_nd(arrays, result_ndim)\n return arr.shape, [()], [arr]\n\n\ndef _block(arrays, max_depth, result_ndim, depth=0):\n """\n Internal implementation of block based on repeated concatenation.\n `arrays` is the argument passed to\n block. `max_depth` is the depth of nested lists within `arrays` and\n `result_ndim` is the greatest of the dimensions of the arrays in\n `arrays` and the depth of the lists in `arrays` (see block docstring\n for details).\n """\n if depth < max_depth:\n arrs = [_block(arr, max_depth, result_ndim, depth + 1)\n for arr in arrays]\n return _concatenate(arrs, axis=-(max_depth - depth))\n else:\n # We've 'bottomed out' - arrays is either a scalar or an array\n # type(arrays) is not list\n return _atleast_nd(arrays, result_ndim)\n\n\ndef _block_dispatcher(arrays):\n # Use type(...) is list to match the behavior of np.block(), which special\n # cases list specifically rather than allowing for generic iterables or\n # tuple. Also, we know that list.__array_function__ will never exist.\n if isinstance(arrays, list):\n for subarrays in arrays:\n yield from _block_dispatcher(subarrays)\n else:\n yield arrays\n\n\n@array_function_dispatch(_block_dispatcher)\ndef block(arrays):\n """\n Assemble an nd-array from nested lists of blocks.\n\n Blocks in the innermost lists are concatenated (see `concatenate`) along\n the last dimension (-1), then these are concatenated along the\n second-last dimension (-2), and so on until the outermost list is reached.\n\n Blocks can be of any dimension, but will not be broadcasted using\n the normal rules. Instead, leading axes of size 1 are inserted,\n to make ``block.ndim`` the same for all blocks. This is primarily useful\n for working with scalars, and means that code like ``np.block([v, 1])``\n is valid, where ``v.ndim == 1``.\n\n When the nested list is two levels deep, this allows block matrices to be\n constructed from their components.\n\n Parameters\n ----------\n arrays : nested list of array_like or scalars (but not tuples)\n If passed a single ndarray or scalar (a nested list of depth 0), this\n is returned unmodified (and not copied).\n\n Elements shapes must match along the appropriate axes (without\n broadcasting), but leading 1s will be prepended to the shape as\n necessary to make the dimensions match.\n\n Returns\n -------\n block_array : ndarray\n The array assembled from the given blocks.\n\n The dimensionality of the output is equal to the greatest of:\n\n * the dimensionality of all the inputs\n * the depth to which the input list is nested\n\n Raises\n ------\n ValueError\n * If list depths are mismatched - for instance, ``[[a, b], c]`` is\n illegal, and should be spelt ``[[a, b], [c]]``\n * If lists are empty - for instance, ``[[a, b], []]``\n\n See Also\n --------\n concatenate : Join a sequence of arrays along an existing axis.\n stack : Join a sequence of arrays along a new axis.\n vstack : Stack arrays in sequence vertically (row wise).\n hstack : Stack arrays in sequence horizontally (column wise).\n dstack : Stack arrays in sequence depth wise (along third axis).\n column_stack : Stack 1-D arrays as columns into a 2-D array.\n vsplit : Split an array into multiple sub-arrays vertically (row-wise).\n unstack : Split an array into a tuple of sub-arrays along an axis.\n\n Notes\n -----\n When called with only scalars, ``np.block`` is equivalent to an ndarray\n call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to\n ``np.array([[1, 2], [3, 4]])``.\n\n This function does not enforce that the blocks lie on a fixed grid.\n ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::\n\n AAAbb\n AAAbb\n cccDD\n\n But is also allowed to produce, for some ``a, b, c, d``::\n\n AAAbb\n AAAbb\n cDDDD\n\n Since concatenation happens along the last axis first, `block` is *not*\n capable of producing the following directly::\n\n AAAbb\n cccbb\n cccDD\n\n Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is\n equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.\n\n Examples\n --------\n The most common use of this function is to build a block matrix:\n\n >>> import numpy as np\n >>> A = np.eye(2) * 2\n >>> B = np.eye(3) * 3\n >>> np.block([\n ... [A, np.zeros((2, 3))],\n ... [np.ones((3, 2)), B ]\n ... ])\n array([[2., 0., 0., 0., 0.],\n [0., 2., 0., 0., 0.],\n [1., 1., 3., 0., 0.],\n [1., 1., 0., 3., 0.],\n [1., 1., 0., 0., 3.]])\n\n With a list of depth 1, `block` can be used as `hstack`:\n\n >>> np.block([1, 2, 3]) # hstack([1, 2, 3])\n array([1, 2, 3])\n\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.block([a, b, 10]) # hstack([a, b, 10])\n array([ 1, 2, 3, 4, 5, 6, 10])\n\n >>> A = np.ones((2, 2), int)\n >>> B = 2 * A\n >>> np.block([A, B]) # hstack([A, B])\n array([[1, 1, 2, 2],\n [1, 1, 2, 2]])\n\n With a list of depth 2, `block` can be used in place of `vstack`:\n\n >>> a = np.array([1, 2, 3])\n >>> b = np.array([4, 5, 6])\n >>> np.block([[a], [b]]) # vstack([a, b])\n array([[1, 2, 3],\n [4, 5, 6]])\n\n >>> A = np.ones((2, 2), int)\n >>> B = 2 * A\n >>> np.block([[A], [B]]) # vstack([A, B])\n array([[1, 1],\n [1, 1],\n [2, 2],\n [2, 2]])\n\n It can also be used in place of `atleast_1d` and `atleast_2d`:\n\n >>> a = np.array(0)\n >>> b = np.array([1])\n >>> np.block([a]) # atleast_1d(a)\n array([0])\n >>> np.block([b]) # atleast_1d(b)\n array([1])\n\n >>> np.block([[a]]) # atleast_2d(a)\n array([[0]])\n >>> np.block([[b]]) # atleast_2d(b)\n array([[1]])\n\n\n """\n arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)\n\n # It was found through benchmarking that making an array of final size\n # around 256x256 was faster by straight concatenation on a\n # i7-7700HQ processor and dual channel ram 2400MHz.\n # It didn't seem to matter heavily on the dtype used.\n #\n # A 2D array using repeated concatenation requires 2 copies of the array.\n #\n # The fastest algorithm will depend on the ratio of CPU power to memory\n # speed.\n # One can monitor the results of the benchmark\n # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d\n # to tune this parameter until a C version of the `_block_info_recursion`\n # algorithm is implemented which would likely be faster than the python\n # version.\n if list_ndim * final_size > (2 * 512 * 512):\n return _block_slicing(arrays, list_ndim, result_ndim)\n else:\n return _block_concatenate(arrays, list_ndim, result_ndim)\n\n\n# These helper functions are mostly used for testing.\n# They allow us to write tests that directly call `_block_slicing`\n# or `_block_concatenate` without blocking large arrays to force the wisdom\n# to trigger the desired path.\ndef _block_setup(arrays):\n """\n Returns\n (`arrays`, list_ndim, result_ndim, final_size)\n """\n bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)\n list_ndim = len(bottom_index)\n if bottom_index and bottom_index[-1] is None:\n raise ValueError(\n f'List at {_block_format_index(bottom_index)} cannot be empty'\n )\n result_ndim = max(arr_ndim, list_ndim)\n return arrays, list_ndim, result_ndim, final_size\n\n\ndef _block_slicing(arrays, list_ndim, result_ndim):\n shape, slices, arrays = _block_info_recursion(\n arrays, list_ndim, result_ndim)\n dtype = _nx.result_type(*[arr.dtype for arr in arrays])\n\n # Test preferring F only in the case that all input arrays are F\n F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)\n C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)\n order = 'F' if F_order and not C_order else 'C'\n result = _nx.empty(shape=shape, dtype=dtype, order=order)\n # Note: In a c implementation, the function\n # PyArray_CreateMultiSortedStridePerm could be used for more advanced\n # guessing of the desired order.\n\n for the_slice, arr in zip(slices, arrays):\n result[(Ellipsis,) + the_slice] = arr\n return result\n\n\ndef _block_concatenate(arrays, list_ndim, result_ndim):\n result = _block(arrays, list_ndim, result_ndim)\n if list_ndim == 0:\n # Catch an edge case where _block returns a view because\n # `arrays` is a single numpy array and not a list of numpy arrays.\n # This might copy scalars or lists twice, but this isn't a likely\n # usecase for those interested in performance\n result = result.copy()\n return result\n
.venv\Lib\site-packages\numpy\_core\shape_base.py
shape_base.py
Python
33,736
0.95
0.11022
0.073383
react-lib
512
2025-05-28T16:10:44.996036
GPL-3.0
false
82fc6b317d7a43d360a4be1cfe8bc1c2
from collections.abc import Sequence\nfrom typing import Any, SupportsIndex, TypeVar, overload\n\nfrom numpy import _CastingKind, generic\nfrom numpy._typing import ArrayLike, DTypeLike, NDArray, _ArrayLike, _DTypeLike\n\n__all__ = [\n "atleast_1d",\n "atleast_2d",\n "atleast_3d",\n "block",\n "hstack",\n "stack",\n "unstack",\n "vstack",\n]\n\n_ScalarT = TypeVar("_ScalarT", bound=generic)\n_ScalarT1 = TypeVar("_ScalarT1", bound=generic)\n_ScalarT2 = TypeVar("_ScalarT2", bound=generic)\n_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])\n\n###\n\n@overload\ndef atleast_1d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ...\n@overload\ndef atleast_1d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...\n@overload\ndef atleast_1d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ...\n@overload\ndef atleast_1d(a0: ArrayLike, /) -> NDArray[Any]: ...\n@overload\ndef atleast_1d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ...\n@overload\ndef atleast_1d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ...\n\n#\n@overload\ndef atleast_2d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ...\n@overload\ndef atleast_2d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...\n@overload\ndef atleast_2d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ...\n@overload\ndef atleast_2d(a0: ArrayLike, /) -> NDArray[Any]: ...\n@overload\ndef atleast_2d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ...\n@overload\ndef atleast_2d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ...\n\n#\n@overload\ndef atleast_3d(a0: _ArrayLike[_ScalarT], /) -> NDArray[_ScalarT]: ...\n@overload\ndef atleast_3d(a0: _ArrayLike[_ScalarT1], a1: _ArrayLike[_ScalarT2], /) -> tuple[NDArray[_ScalarT1], NDArray[_ScalarT2]]: ...\n@overload\ndef atleast_3d(a0: _ArrayLike[_ScalarT], a1: _ArrayLike[_ScalarT], /, *arys: _ArrayLike[_ScalarT]) -> tuple[NDArray[_ScalarT], ...]: ...\n@overload\ndef atleast_3d(a0: ArrayLike, /) -> NDArray[Any]: ...\n@overload\ndef atleast_3d(a0: ArrayLike, a1: ArrayLike, /) -> tuple[NDArray[Any], NDArray[Any]]: ...\n@overload\ndef atleast_3d(a0: ArrayLike, a1: ArrayLike, /, *ai: ArrayLike) -> tuple[NDArray[Any], ...]: ...\n\n#\n@overload\ndef vstack(\n tup: Sequence[_ArrayLike[_ScalarT]],\n *,\n dtype: None = ...,\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef vstack(\n tup: Sequence[ArrayLike],\n *,\n dtype: _DTypeLike[_ScalarT],\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef vstack(\n tup: Sequence[ArrayLike],\n *,\n dtype: DTypeLike = ...,\n casting: _CastingKind = ...\n) -> NDArray[Any]: ...\n\n@overload\ndef hstack(\n tup: Sequence[_ArrayLike[_ScalarT]],\n *,\n dtype: None = ...,\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef hstack(\n tup: Sequence[ArrayLike],\n *,\n dtype: _DTypeLike[_ScalarT],\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef hstack(\n tup: Sequence[ArrayLike],\n *,\n dtype: DTypeLike = ...,\n casting: _CastingKind = ...\n) -> NDArray[Any]: ...\n\n@overload\ndef stack(\n arrays: Sequence[_ArrayLike[_ScalarT]],\n axis: SupportsIndex = ...,\n out: None = ...,\n *,\n dtype: None = ...,\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef stack(\n arrays: Sequence[ArrayLike],\n axis: SupportsIndex = ...,\n out: None = ...,\n *,\n dtype: _DTypeLike[_ScalarT],\n casting: _CastingKind = ...\n) -> NDArray[_ScalarT]: ...\n@overload\ndef stack(\n arrays: Sequence[ArrayLike],\n axis: SupportsIndex = ...,\n out: None = ...,\n *,\n dtype: DTypeLike = ...,\n casting: _CastingKind = ...\n) -> NDArray[Any]: ...\n@overload\ndef stack(\n arrays: Sequence[ArrayLike],\n axis: SupportsIndex,\n out: _ArrayT,\n *,\n dtype: DTypeLike | None = None,\n casting: _CastingKind = "same_kind",\n) -> _ArrayT: ...\n@overload\ndef stack(\n arrays: Sequence[ArrayLike],\n axis: SupportsIndex = 0,\n *,\n out: _ArrayT,\n dtype: DTypeLike | None = None,\n casting: _CastingKind = "same_kind",\n) -> _ArrayT: ...\n\n@overload\ndef unstack(\n array: _ArrayLike[_ScalarT],\n /,\n *,\n axis: int = ...,\n) -> tuple[NDArray[_ScalarT], ...]: ...\n@overload\ndef unstack(\n array: ArrayLike,\n /,\n *,\n axis: int = ...,\n) -> tuple[NDArray[Any], ...]: ...\n\n@overload\ndef block(arrays: _ArrayLike[_ScalarT]) -> NDArray[_ScalarT]: ...\n@overload\ndef block(arrays: ArrayLike) -> NDArray[Any]: ...\n
.venv\Lib\site-packages\numpy\_core\shape_base.pyi
shape_base.pyi
Other
4,928
0.95
0.188571
0.104294
react-lib
357
2024-08-31T20:00:21.872165
GPL-3.0
false
84b6d1c38797881973993dbdbb1ef57c
"""\nThis module contains a set of functions for vectorized string\noperations.\n"""\n\nimport functools\nimport sys\n\nimport numpy as np\nfrom numpy import (\n add,\n equal,\n greater,\n greater_equal,\n less,\n less_equal,\n not_equal,\n)\nfrom numpy import (\n multiply as _multiply_ufunc,\n)\nfrom numpy._core.multiarray import _vec_string\nfrom numpy._core.overrides import array_function_dispatch, set_module\nfrom numpy._core.umath import (\n _center,\n _expandtabs,\n _expandtabs_length,\n _ljust,\n _lstrip_chars,\n _lstrip_whitespace,\n _partition,\n _partition_index,\n _replace,\n _rjust,\n _rpartition,\n _rpartition_index,\n _rstrip_chars,\n _rstrip_whitespace,\n _slice,\n _strip_chars,\n _strip_whitespace,\n _zfill,\n isalnum,\n isalpha,\n isdecimal,\n isdigit,\n islower,\n isnumeric,\n isspace,\n istitle,\n isupper,\n str_len,\n)\nfrom numpy._core.umath import (\n count as _count_ufunc,\n)\nfrom numpy._core.umath import (\n endswith as _endswith_ufunc,\n)\nfrom numpy._core.umath import (\n find as _find_ufunc,\n)\nfrom numpy._core.umath import (\n index as _index_ufunc,\n)\nfrom numpy._core.umath import (\n rfind as _rfind_ufunc,\n)\nfrom numpy._core.umath import (\n rindex as _rindex_ufunc,\n)\nfrom numpy._core.umath import (\n startswith as _startswith_ufunc,\n)\n\n\ndef _override___module__():\n for ufunc in [\n isalnum, isalpha, isdecimal, isdigit, islower, isnumeric, isspace,\n istitle, isupper, str_len,\n ]:\n ufunc.__module__ = "numpy.strings"\n ufunc.__qualname__ = ufunc.__name__\n\n\n_override___module__()\n\n\n__all__ = [\n # UFuncs\n "equal", "not_equal", "less", "less_equal", "greater", "greater_equal",\n "add", "multiply", "isalpha", "isdigit", "isspace", "isalnum", "islower",\n "isupper", "istitle", "isdecimal", "isnumeric", "str_len", "find",\n "rfind", "index", "rindex", "count", "startswith", "endswith", "lstrip",\n "rstrip", "strip", "replace", "expandtabs", "center", "ljust", "rjust",\n "zfill", "partition", "rpartition", "slice",\n\n # _vec_string - Will gradually become ufuncs as well\n "upper", "lower", "swapcase", "capitalize", "title",\n\n # _vec_string - Will probably not become ufuncs\n "mod", "decode", "encode", "translate",\n\n # Removed from namespace until behavior has been crystallized\n # "join", "split", "rsplit", "splitlines",\n]\n\n\nMAX = np.iinfo(np.int64).max\n\narray_function_dispatch = functools.partial(\n array_function_dispatch, module='numpy.strings')\n\n\ndef _get_num_chars(a):\n """\n Helper function that returns the number of characters per field in\n a string or unicode array. This is to abstract out the fact that\n for a unicode array this is itemsize / 4.\n """\n if issubclass(a.dtype.type, np.str_):\n return a.itemsize // 4\n return a.itemsize\n\n\ndef _to_bytes_or_str_array(result, output_dtype_like):\n """\n Helper function to cast a result back into an array\n with the appropriate dtype if an object array must be used\n as an intermediary.\n """\n output_dtype_like = np.asarray(output_dtype_like)\n if result.size == 0:\n # Calling asarray & tolist in an empty array would result\n # in losing shape information\n return result.astype(output_dtype_like.dtype)\n ret = np.asarray(result.tolist())\n if isinstance(output_dtype_like.dtype, np.dtypes.StringDType):\n return ret.astype(type(output_dtype_like.dtype))\n return ret.astype(type(output_dtype_like.dtype)(_get_num_chars(ret)))\n\n\ndef _clean_args(*args):\n """\n Helper function for delegating arguments to Python string\n functions.\n\n Many of the Python string operations that have optional arguments\n do not use 'None' to indicate a default value. In these cases,\n we need to remove all None arguments, and those following them.\n """\n newargs = []\n for chk in args:\n if chk is None:\n break\n newargs.append(chk)\n return newargs\n\n\ndef _multiply_dispatcher(a, i):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_multiply_dispatcher)\ndef multiply(a, i):\n """\n Return (a * i), that is string multiple concatenation,\n element-wise.\n\n Values in ``i`` of less than 0 are treated as 0 (which yields an\n empty string).\n\n Parameters\n ----------\n a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype\n\n i : array_like, with any integer dtype\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["a", "b", "c"])\n >>> np.strings.multiply(a, 3)\n array(['aaa', 'bbb', 'ccc'], dtype='<U3')\n >>> i = np.array([1, 2, 3])\n >>> np.strings.multiply(a, i)\n array(['a', 'bb', 'ccc'], dtype='<U3')\n >>> np.strings.multiply(np.array(['a']), i)\n array(['a', 'aa', 'aaa'], dtype='<U3')\n >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))\n >>> np.strings.multiply(a, 3)\n array([['aaa', 'bbb', 'ccc'],\n ['ddd', 'eee', 'fff']], dtype='<U3')\n >>> np.strings.multiply(a, i)\n array([['a', 'bb', 'ccc'],\n ['d', 'ee', 'fff']], dtype='<U3')\n\n """\n a = np.asanyarray(a)\n\n i = np.asanyarray(i)\n if not np.issubdtype(i.dtype, np.integer):\n raise TypeError(f"unsupported type {i.dtype} for operand 'i'")\n i = np.maximum(i, 0)\n\n # delegate to stringdtype loops that also do overflow checking\n if a.dtype.char == "T":\n return a * i\n\n a_len = str_len(a)\n\n # Ensure we can do a_len * i without overflow.\n if np.any(a_len > sys.maxsize / np.maximum(i, 1)):\n raise OverflowError("Overflow encountered in string multiply")\n\n buffersizes = a_len * i\n out_dtype = f"{a.dtype.char}{buffersizes.max()}"\n out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype)\n return _multiply_ufunc(a, i, out=out)\n\n\ndef _mod_dispatcher(a, values):\n return (a, values)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_mod_dispatcher)\ndef mod(a, values):\n """\n Return (a % i), that is pre-Python 2.6 string formatting\n (interpolation), element-wise for a pair of array_likes of str\n or unicode.\n\n Parameters\n ----------\n a : array_like, with `np.bytes_` or `np.str_` dtype\n\n values : array_like of values\n These values will be element-wise interpolated into the string.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["NumPy is a %s library"])\n >>> np.strings.mod(a, values=["Python"])\n array(['NumPy is a Python library'], dtype='<U25')\n\n >>> a = np.array([b'%d bytes', b'%d bits'])\n >>> values = np.array([8, 64])\n >>> np.strings.mod(a, values)\n array([b'8 bytes', b'64 bits'], dtype='|S7')\n\n """\n return _to_bytes_or_str_array(\n _vec_string(a, np.object_, '__mod__', (values,)), a)\n\n\n@set_module("numpy.strings")\ndef find(a, sub, start=0, end=None):\n """\n For each element, return the lowest index in the string where\n substring ``sub`` is found, such that ``sub`` is contained in the\n range [``start``, ``end``).\n\n Parameters\n ----------\n a : array_like, with ``StringDType``, ``bytes_`` or ``str_`` dtype\n\n sub : array_like, with `np.bytes_` or `np.str_` dtype\n The substring to search for.\n\n start, end : array_like, with any integer dtype\n The range to look in, interpreted as in slice notation.\n\n Returns\n -------\n y : ndarray\n Output array of ints\n\n See Also\n --------\n str.find\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["NumPy is a Python library"])\n >>> np.strings.find(a, "Python")\n array([11])\n\n """\n end = end if end is not None else MAX\n return _find_ufunc(a, sub, start, end)\n\n\n@set_module("numpy.strings")\ndef rfind(a, sub, start=0, end=None):\n """\n For each element, return the highest index in the string where\n substring ``sub`` is found, such that ``sub`` is contained in the\n range [``start``, ``end``).\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n The substring to search for.\n\n start, end : array_like, with any integer dtype\n The range to look in, interpreted as in slice notation.\n\n Returns\n -------\n y : ndarray\n Output array of ints\n\n See Also\n --------\n str.rfind\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["Computer Science"])\n >>> np.strings.rfind(a, "Science", start=0, end=None)\n array([9])\n >>> np.strings.rfind(a, "Science", start=0, end=8)\n array([-1])\n >>> b = np.array(["Computer Science", "Science"])\n >>> np.strings.rfind(b, "Science", start=0, end=None)\n array([9, 0])\n\n """\n end = end if end is not None else MAX\n return _rfind_ufunc(a, sub, start, end)\n\n\n@set_module("numpy.strings")\ndef index(a, sub, start=0, end=None):\n """\n Like `find`, but raises :exc:`ValueError` when the substring is not found.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n start, end : array_like, with any integer dtype, optional\n\n Returns\n -------\n out : ndarray\n Output array of ints.\n\n See Also\n --------\n find, str.index\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["Computer Science"])\n >>> np.strings.index(a, "Science", start=0, end=None)\n array([9])\n\n """\n end = end if end is not None else MAX\n return _index_ufunc(a, sub, start, end)\n\n\n@set_module("numpy.strings")\ndef rindex(a, sub, start=0, end=None):\n """\n Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is\n not found.\n\n Parameters\n ----------\n a : array-like, with `np.bytes_` or `np.str_` dtype\n\n sub : array-like, with `np.bytes_` or `np.str_` dtype\n\n start, end : array-like, with any integer dtype, optional\n\n Returns\n -------\n out : ndarray\n Output array of ints.\n\n See Also\n --------\n rfind, str.rindex\n\n Examples\n --------\n >>> a = np.array(["Computer Science"])\n >>> np.strings.rindex(a, "Science", start=0, end=None)\n array([9])\n\n """\n end = end if end is not None else MAX\n return _rindex_ufunc(a, sub, start, end)\n\n\n@set_module("numpy.strings")\ndef count(a, sub, start=0, end=None):\n """\n Returns an array with the number of non-overlapping occurrences of\n substring ``sub`` in the range [``start``, ``end``).\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n sub : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n The substring to search for.\n\n start, end : array_like, with any integer dtype\n The range to look in, interpreted as in slice notation.\n\n Returns\n -------\n y : ndarray\n Output array of ints\n\n See Also\n --------\n str.count\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.strings.count(c, 'A')\n array([3, 1, 1])\n >>> np.strings.count(c, 'aA')\n array([3, 1, 0])\n >>> np.strings.count(c, 'A', start=1, end=4)\n array([2, 1, 1])\n >>> np.strings.count(c, 'A', start=1, end=3)\n array([1, 0, 0])\n\n """\n end = end if end is not None else MAX\n return _count_ufunc(a, sub, start, end)\n\n\n@set_module("numpy.strings")\ndef startswith(a, prefix, start=0, end=None):\n """\n Returns a boolean array which is `True` where the string element\n in ``a`` starts with ``prefix``, otherwise `False`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n prefix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n start, end : array_like, with any integer dtype\n With ``start``, test beginning at that position. With ``end``,\n stop comparing at that position.\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See Also\n --------\n str.startswith\n\n Examples\n --------\n >>> import numpy as np\n >>> s = np.array(['foo', 'bar'])\n >>> s\n array(['foo', 'bar'], dtype='<U3')\n >>> np.strings.startswith(s, 'fo')\n array([True, False])\n >>> np.strings.startswith(s, 'o', start=1, end=2)\n array([True, False])\n\n """\n end = end if end is not None else MAX\n return _startswith_ufunc(a, prefix, start, end)\n\n\n@set_module("numpy.strings")\ndef endswith(a, suffix, start=0, end=None):\n """\n Returns a boolean array which is `True` where the string element\n in ``a`` ends with ``suffix``, otherwise `False`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n suffix : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n start, end : array_like, with any integer dtype\n With ``start``, test beginning at that position. With ``end``,\n stop comparing at that position.\n\n Returns\n -------\n out : ndarray\n Output array of bools\n\n See Also\n --------\n str.endswith\n\n Examples\n --------\n >>> import numpy as np\n >>> s = np.array(['foo', 'bar'])\n >>> s\n array(['foo', 'bar'], dtype='<U3')\n >>> np.strings.endswith(s, 'ar')\n array([False, True])\n >>> np.strings.endswith(s, 'a', start=1, end=2)\n array([False, True])\n\n """\n end = end if end is not None else MAX\n return _endswith_ufunc(a, suffix, start, end)\n\n\ndef _code_dispatcher(a, encoding=None, errors=None):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_code_dispatcher)\ndef decode(a, encoding=None, errors=None):\n r"""\n Calls :meth:`bytes.decode` element-wise.\n\n The set of available codecs comes from the Python standard library,\n and may be extended at runtime. For more information, see the\n :mod:`codecs` module.\n\n Parameters\n ----------\n a : array_like, with ``bytes_`` dtype\n\n encoding : str, optional\n The name of an encoding\n\n errors : str, optional\n Specifies how to handle encoding errors\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n :py:meth:`bytes.decode`\n\n Notes\n -----\n The type of the result will depend on the encoding specified.\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',\n ... b'\x81\x82\xc2\xc1\xc2\x82\x81'])\n >>> c\n array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',\n b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')\n >>> np.strings.decode(c, encoding='cp037')\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n\n """\n return _to_bytes_or_str_array(\n _vec_string(a, np.object_, 'decode', _clean_args(encoding, errors)),\n np.str_(''))\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_code_dispatcher)\ndef encode(a, encoding=None, errors=None):\n """\n Calls :meth:`str.encode` element-wise.\n\n The set of available codecs comes from the Python standard library,\n and may be extended at runtime. For more information, see the\n :mod:`codecs` module.\n\n Parameters\n ----------\n a : array_like, with ``StringDType`` or ``str_`` dtype\n\n encoding : str, optional\n The name of an encoding\n\n errors : str, optional\n Specifies how to handle encoding errors\n\n Returns\n -------\n out : ndarray\n\n See Also\n --------\n str.encode\n\n Notes\n -----\n The type of the result will depend on the encoding specified.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> np.strings.encode(a, encoding='cp037')\n array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',\n b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')\n\n """\n return _to_bytes_or_str_array(\n _vec_string(a, np.object_, 'encode', _clean_args(encoding, errors)),\n np.bytes_(b''))\n\n\ndef _expandtabs_dispatcher(a, tabsize=None):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_expandtabs_dispatcher)\ndef expandtabs(a, tabsize=8):\n """\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces.\n\n Calls :meth:`str.expandtabs` element-wise.\n\n Return a copy of each string element where all tab characters are\n replaced by one or more spaces, depending on the current column\n and the given `tabsize`. The column number is reset to zero after\n each newline occurring in the string. This doesn't understand other\n non-printing characters or escape sequences.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n tabsize : int, optional\n Replace tabs with `tabsize` number of spaces. If not given defaults\n to 8 spaces.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input type\n\n See Also\n --------\n str.expandtabs\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['\t\tHello\tworld'])\n >>> np.strings.expandtabs(a, tabsize=4) # doctest: +SKIP\n array([' Hello world'], dtype='<U21') # doctest: +SKIP\n\n """\n a = np.asanyarray(a)\n tabsize = np.asanyarray(tabsize)\n\n if a.dtype.char == "T":\n return _expandtabs(a, tabsize)\n\n buffersizes = _expandtabs_length(a, tabsize)\n out_dtype = f"{a.dtype.char}{buffersizes.max()}"\n out = np.empty_like(a, shape=buffersizes.shape, dtype=out_dtype)\n return _expandtabs(a, tabsize, out=out)\n\n\ndef _just_dispatcher(a, width, fillchar=None):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_just_dispatcher)\ndef center(a, width, fillchar=' '):\n """\n Return a copy of `a` with its elements centered in a string of\n length `width`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n width : array_like, with any integer dtype\n The length of the resulting strings, unless ``width < str_len(a)``.\n fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Optional padding character to use (default is space).\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.center\n\n Notes\n -----\n While it is possible for ``a`` and ``fillchar`` to have different dtypes,\n passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S"\n is not allowed, and a ``ValueError`` is raised.\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c\n array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')\n >>> np.strings.center(c, width=9)\n array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='<U9')\n >>> np.strings.center(c, width=9, fillchar='*')\n array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9')\n >>> np.strings.center(c, width=1)\n array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')\n\n """\n width = np.asanyarray(width)\n\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f"unsupported type {width.dtype} for operand 'width'")\n\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n\n if np.any(str_len(fillchar) != 1):\n raise TypeError(\n "The fill character must be exactly one character long")\n\n if np.result_type(a, fillchar).char == "T":\n return _center(a, width, fillchar)\n\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n out_dtype = f"{a.dtype.char}{width.max()}"\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n\n return _center(a, width, fillchar, out=out)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_just_dispatcher)\ndef ljust(a, width, fillchar=' '):\n """\n Return an array with the elements of `a` left-justified in a\n string of length `width`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n width : array_like, with any integer dtype\n The length of the resulting strings, unless ``width < str_len(a)``.\n fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Optional character to use for padding (default is space).\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.ljust\n\n Notes\n -----\n While it is possible for ``a`` and ``fillchar`` to have different dtypes,\n passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S"\n is not allowed, and a ``ValueError`` is raised.\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> np.strings.ljust(c, width=3)\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.strings.ljust(c, width=9)\n array(['aAaAaA ', ' aA ', 'abBABba '], dtype='<U9')\n\n """\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f"unsupported type {width.dtype} for operand 'width'")\n\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n\n if np.any(str_len(fillchar) != 1):\n raise TypeError(\n "The fill character must be exactly one character long")\n\n if np.result_type(a, fillchar).char == "T":\n return _ljust(a, width, fillchar)\n\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out_dtype = f"{a.dtype.char}{width.max()}"\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n\n return _ljust(a, width, fillchar, out=out)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_just_dispatcher)\ndef rjust(a, width, fillchar=' '):\n """\n Return an array with the elements of `a` right-justified in a\n string of length `width`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n width : array_like, with any integer dtype\n The length of the resulting strings, unless ``width < str_len(a)``.\n fillchar : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Optional padding character to use (default is space).\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.rjust\n\n Notes\n -----\n While it is possible for ``a`` and ``fillchar`` to have different dtypes,\n passing a non-ASCII character in ``fillchar`` when ``a`` is of dtype "S"\n is not allowed, and a ``ValueError`` is raised.\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> np.strings.rjust(a, width=3)\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.strings.rjust(a, width=9)\n array([' aAaAaA', ' aA ', ' abBABba'], dtype='<U9')\n\n """\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f"unsupported type {width.dtype} for operand 'width'")\n\n a = np.asanyarray(a)\n fillchar = np.asanyarray(fillchar)\n\n if np.any(str_len(fillchar) != 1):\n raise TypeError(\n "The fill character must be exactly one character long")\n\n if np.result_type(a, fillchar).char == "T":\n return _rjust(a, width, fillchar)\n\n fillchar = fillchar.astype(a.dtype, copy=False)\n width = np.maximum(str_len(a), width)\n shape = np.broadcast_shapes(a.shape, width.shape, fillchar.shape)\n out_dtype = f"{a.dtype.char}{width.max()}"\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n\n return _rjust(a, width, fillchar, out=out)\n\n\ndef _zfill_dispatcher(a, width):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_zfill_dispatcher)\ndef zfill(a, width):\n """\n Return the numeric string left-filled with zeros. A leading\n sign prefix (``+``/``-``) is handled by inserting the padding\n after the sign character rather than before.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n width : array_like, with any integer dtype\n Width of string to left-fill elements in `a`.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input type\n\n See Also\n --------\n str.zfill\n\n Examples\n --------\n >>> import numpy as np\n >>> np.strings.zfill(['1', '-1', '+1'], 3)\n array(['001', '-01', '+01'], dtype='<U3')\n\n """\n width = np.asanyarray(width)\n if not np.issubdtype(width.dtype, np.integer):\n raise TypeError(f"unsupported type {width.dtype} for operand 'width'")\n\n a = np.asanyarray(a)\n\n if a.dtype.char == "T":\n return _zfill(a, width)\n\n width = np.maximum(str_len(a), width)\n shape = np.broadcast_shapes(a.shape, width.shape)\n out_dtype = f"{a.dtype.char}{width.max()}"\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _zfill(a, width, out=out)\n\n\n@set_module("numpy.strings")\ndef lstrip(a, chars=None):\n """\n For each element in `a`, return a copy with the leading characters\n removed.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n chars : scalar with the same dtype as ``a``, optional\n The ``chars`` argument is a string specifying the set of\n characters to be removed. If ``None``, the ``chars``\n argument defaults to removing whitespace. The ``chars`` argument\n is not a prefix or suffix; rather, all combinations of its\n values are stripped.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.lstrip\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n # The 'a' variable is unstripped from c[1] because of leading whitespace.\n >>> np.strings.lstrip(c, 'a')\n array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')\n >>> np.strings.lstrip(c, 'A') # leaves c unchanged\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c, '')).all()\n np.False_\n >>> (np.strings.lstrip(c, ' ') == np.strings.lstrip(c)).all()\n np.True_\n\n """\n if chars is None:\n return _lstrip_whitespace(a)\n return _lstrip_chars(a, chars)\n\n\n@set_module("numpy.strings")\ndef rstrip(a, chars=None):\n """\n For each element in `a`, return a copy with the trailing characters\n removed.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n chars : scalar with the same dtype as ``a``, optional\n The ``chars`` argument is a string specifying the set of\n characters to be removed. If ``None``, the ``chars``\n argument defaults to removing whitespace. The ``chars`` argument\n is not a prefix or suffix; rather, all combinations of its\n values are stripped.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.rstrip\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['aAaAaA', 'abBABba'])\n >>> c\n array(['aAaAaA', 'abBABba'], dtype='<U7')\n >>> np.strings.rstrip(c, 'a')\n array(['aAaAaA', 'abBABb'], dtype='<U7')\n >>> np.strings.rstrip(c, 'A')\n array(['aAaAa', 'abBABba'], dtype='<U7')\n\n """\n if chars is None:\n return _rstrip_whitespace(a)\n return _rstrip_chars(a, chars)\n\n\n@set_module("numpy.strings")\ndef strip(a, chars=None):\n """\n For each element in `a`, return a copy with the leading and\n trailing characters removed.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n chars : scalar with the same dtype as ``a``, optional\n The ``chars`` argument is a string specifying the set of\n characters to be removed. If ``None``, the ``chars``\n argument defaults to removing whitespace. The ``chars`` argument\n is not a prefix or suffix; rather, all combinations of its\n values are stripped.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.strip\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> c\n array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')\n >>> np.strings.strip(c)\n array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')\n # 'a' unstripped from c[1] because of leading whitespace.\n >>> np.strings.strip(c, 'a')\n array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')\n # 'A' unstripped from c[1] because of trailing whitespace.\n >>> np.strings.strip(c, 'A')\n array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')\n\n """\n if chars is None:\n return _strip_whitespace(a)\n return _strip_chars(a, chars)\n\n\ndef _unary_op_dispatcher(a):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_unary_op_dispatcher)\ndef upper(a):\n """\n Return an array with the elements converted to uppercase.\n\n Calls :meth:`str.upper` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.upper\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['a1b c', '1bca', 'bca1']); c\n array(['a1b c', '1bca', 'bca1'], dtype='<U5')\n >>> np.strings.upper(c)\n array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')\n\n """\n a_arr = np.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'upper')\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_unary_op_dispatcher)\ndef lower(a):\n """\n Return an array with the elements converted to lowercase.\n\n Call :meth:`str.lower` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.lower\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c\n array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')\n >>> np.strings.lower(c)\n array(['a1b c', '1bca', 'bca1'], dtype='<U5')\n\n """\n a_arr = np.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'lower')\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_unary_op_dispatcher)\ndef swapcase(a):\n """\n Return element-wise a copy of the string with\n uppercase characters converted to lowercase and vice versa.\n\n Calls :meth:`str.swapcase` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.swapcase\n\n Examples\n --------\n >>> import numpy as np\n >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c\n array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],\n dtype='|S5')\n >>> np.strings.swapcase(c)\n array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],\n dtype='|S5')\n\n """\n a_arr = np.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'swapcase')\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_unary_op_dispatcher)\ndef capitalize(a):\n """\n Return a copy of ``a`` with only the first character of each element\n capitalized.\n\n Calls :meth:`str.capitalize` element-wise.\n\n For byte strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array of strings to capitalize.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.capitalize\n\n Examples\n --------\n >>> import numpy as np\n >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c\n array(['a1b2', '1b2a', 'b2a1', '2a1b'],\n dtype='|S4')\n >>> np.strings.capitalize(c)\n array(['A1b2', '1b2a', 'B2a1', '2a1b'],\n dtype='|S4')\n\n """\n a_arr = np.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'capitalize')\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_unary_op_dispatcher)\ndef title(a):\n """\n Return element-wise title cased version of string or unicode.\n\n Title case words start with uppercase characters, all remaining cased\n characters are lowercase.\n\n Calls :meth:`str.title` element-wise.\n\n For 8-bit strings, this method is locale-dependent.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.title\n\n Examples\n --------\n >>> import numpy as np\n >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c\n array(['a1b c', '1b ca', 'b ca1', 'ca1b'],\n dtype='|S5')\n >>> np.strings.title(c)\n array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],\n dtype='|S5')\n\n """\n a_arr = np.asarray(a)\n return _vec_string(a_arr, a_arr.dtype, 'title')\n\n\ndef _replace_dispatcher(a, old, new, count=None):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_replace_dispatcher)\ndef replace(a, old, new, count=-1):\n """\n For each element in ``a``, return a copy of the string with\n occurrences of substring ``old`` replaced by ``new``.\n\n Parameters\n ----------\n a : array_like, with ``bytes_`` or ``str_`` dtype\n\n old, new : array_like, with ``bytes_`` or ``str_`` dtype\n\n count : array_like, with ``int_`` dtype\n If the optional argument ``count`` is given, only the first\n ``count`` occurrences are replaced.\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.replace\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(["That is a mango", "Monkeys eat mangos"])\n >>> np.strings.replace(a, 'mango', 'banana')\n array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19')\n\n >>> a = np.array(["The dish is fresh", "This is it"])\n >>> np.strings.replace(a, 'is', 'was')\n array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')\n\n """\n count = np.asanyarray(count)\n if not np.issubdtype(count.dtype, np.integer):\n raise TypeError(f"unsupported type {count.dtype} for operand 'count'")\n\n arr = np.asanyarray(a)\n old_dtype = getattr(old, 'dtype', None)\n old = np.asanyarray(old)\n new_dtype = getattr(new, 'dtype', None)\n new = np.asanyarray(new)\n\n if np.result_type(arr, old, new).char == "T":\n return _replace(arr, old, new, count)\n\n a_dt = arr.dtype\n old = old.astype(old_dtype or a_dt, copy=False)\n new = new.astype(new_dtype or a_dt, copy=False)\n max_int64 = np.iinfo(np.int64).max\n counts = _count_ufunc(arr, old, 0, max_int64)\n counts = np.where(count < 0, counts, np.minimum(counts, count))\n buffersizes = str_len(arr) + counts * (str_len(new) - str_len(old))\n out_dtype = f"{arr.dtype.char}{buffersizes.max()}"\n out = np.empty_like(arr, shape=buffersizes.shape, dtype=out_dtype)\n\n return _replace(arr, old, new, counts, out=out)\n\n\ndef _join_dispatcher(sep, seq):\n return (sep, seq)\n\n\n@array_function_dispatch(_join_dispatcher)\ndef _join(sep, seq):\n """\n Return a string which is the concatenation of the strings in the\n sequence `seq`.\n\n Calls :meth:`str.join` element-wise.\n\n Parameters\n ----------\n sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n seq : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input types\n\n See Also\n --------\n str.join\n\n Examples\n --------\n >>> import numpy as np\n >>> np.strings.join('-', 'osd') # doctest: +SKIP\n array('o-s-d', dtype='<U5') # doctest: +SKIP\n\n >>> np.strings.join(['-', '.'], ['ghc', 'osd']) # doctest: +SKIP\n array(['g-h-c', 'o.s.d'], dtype='<U5') # doctest: +SKIP\n\n """\n return _to_bytes_or_str_array(\n _vec_string(sep, np.object_, 'join', (seq,)), seq)\n\n\ndef _split_dispatcher(a, sep=None, maxsplit=None):\n return (a,)\n\n\n@array_function_dispatch(_split_dispatcher)\ndef _split(a, sep=None, maxsplit=None):\n """\n For each element in `a`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n Calls :meth:`str.split` element-wise.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n sep : str or unicode, optional\n If `sep` is not specified or None, any whitespace string is a\n separator.\n\n maxsplit : int, optional\n If `maxsplit` is given, at most `maxsplit` splits are done.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array("Numpy is nice!")\n >>> np.strings.split(x, " ") # doctest: +SKIP\n array(list(['Numpy', 'is', 'nice!']), dtype=object) # doctest: +SKIP\n\n >>> np.strings.split(x, " ", 1) # doctest: +SKIP\n array(list(['Numpy', 'is nice!']), dtype=object) # doctest: +SKIP\n\n See Also\n --------\n str.split, rsplit\n\n """\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, np.object_, 'split', [sep] + _clean_args(maxsplit))\n\n\n@array_function_dispatch(_split_dispatcher)\ndef _rsplit(a, sep=None, maxsplit=None):\n """\n For each element in `a`, return a list of the words in the\n string, using `sep` as the delimiter string.\n\n Calls :meth:`str.rsplit` element-wise.\n\n Except for splitting from the right, `rsplit`\n behaves like `split`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n sep : str or unicode, optional\n If `sep` is not specified or None, any whitespace string\n is a separator.\n maxsplit : int, optional\n If `maxsplit` is given, at most `maxsplit` splits are done,\n the rightmost ones.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n See Also\n --------\n str.rsplit, split\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['aAaAaA', 'abBABba'])\n >>> np.strings.rsplit(a, 'A') # doctest: +SKIP\n array([list(['a', 'a', 'a', '']), # doctest: +SKIP\n list(['abB', 'Bba'])], dtype=object) # doctest: +SKIP\n\n """\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, np.object_, 'rsplit', [sep] + _clean_args(maxsplit))\n\n\ndef _splitlines_dispatcher(a, keepends=None):\n return (a,)\n\n\n@array_function_dispatch(_splitlines_dispatcher)\ndef _splitlines(a, keepends=None):\n """\n For each element in `a`, return a list of the lines in the\n element, breaking at line boundaries.\n\n Calls :meth:`str.splitlines` element-wise.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n\n keepends : bool, optional\n Line breaks are not included in the resulting list unless\n keepends is given and true.\n\n Returns\n -------\n out : ndarray\n Array of list objects\n\n See Also\n --------\n str.splitlines\n\n Examples\n --------\n >>> np.char.splitlines("first line\\nsecond line")\n array(list(['first line', 'second line']), dtype=object)\n >>> a = np.array(["first\\nsecond", "third\\nfourth"])\n >>> np.char.splitlines(a)\n array([list(['first', 'second']), list(['third', 'fourth'])], dtype=object)\n\n """\n return _vec_string(\n a, np.object_, 'splitlines', _clean_args(keepends))\n\n\ndef _partition_dispatcher(a, sep):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_partition_dispatcher)\ndef partition(a, sep):\n """\n Partition each element in ``a`` around ``sep``.\n\n For each element in ``a``, split the element at the first\n occurrence of ``sep``, and return a 3-tuple containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, the first item of\n the tuple will contain the whole string, and the second and third\n ones will be the empty string.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Separator to split each string element in ``a``.\n\n Returns\n -------\n out : 3-tuple:\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n part before the separator\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n separator\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n part after the separator\n\n See Also\n --------\n str.partition\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.array(["Numpy is nice!"])\n >>> np.strings.partition(x, " ")\n (array(['Numpy'], dtype='<U5'),\n array([' '], dtype='<U1'),\n array(['is nice!'], dtype='<U8'))\n\n """\n a = np.asanyarray(a)\n sep = np.asanyarray(sep)\n\n if np.result_type(a, sep).char == "T":\n return _partition(a, sep)\n\n sep = sep.astype(a.dtype, copy=False)\n pos = _find_ufunc(a, sep, 0, MAX)\n a_len = str_len(a)\n sep_len = str_len(sep)\n\n not_found = pos < 0\n buffersizes1 = np.where(not_found, a_len, pos)\n buffersizes3 = np.where(not_found, 0, a_len - pos - sep_len)\n\n out_dtype = ",".join([f"{a.dtype.char}{n}" for n in (\n buffersizes1.max(),\n 1 if np.all(not_found) else sep_len.max(),\n buffersizes3.max(),\n )])\n shape = np.broadcast_shapes(a.shape, sep.shape)\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _partition_index(a, sep, pos, out=(out["f0"], out["f1"], out["f2"]))\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_partition_dispatcher)\ndef rpartition(a, sep):\n """\n Partition (split) each element around the right-most separator.\n\n For each element in ``a``, split the element at the last\n occurrence of ``sep``, and return a 3-tuple containing the part\n before the separator, the separator itself, and the part after\n the separator. If the separator is not found, the third item of\n the tuple will contain the whole string, and the first and second\n ones will be the empty string.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n sep : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Separator to split each string element in ``a``.\n\n Returns\n -------\n out : 3-tuple:\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n part before the separator\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n separator\n - array with ``StringDType``, ``bytes_`` or ``str_`` dtype with the\n part after the separator\n\n See Also\n --------\n str.rpartition\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])\n >>> np.strings.rpartition(a, 'A')\n (array(['aAaAa', ' a', 'abB'], dtype='<U5'),\n array(['A', 'A', 'A'], dtype='<U1'),\n array(['', ' ', 'Bba'], dtype='<U3'))\n\n """\n a = np.asanyarray(a)\n sep = np.asanyarray(sep)\n\n if np.result_type(a, sep).char == "T":\n return _rpartition(a, sep)\n\n sep = sep.astype(a.dtype, copy=False)\n pos = _rfind_ufunc(a, sep, 0, MAX)\n a_len = str_len(a)\n sep_len = str_len(sep)\n\n not_found = pos < 0\n buffersizes1 = np.where(not_found, 0, pos)\n buffersizes3 = np.where(not_found, a_len, a_len - pos - sep_len)\n\n out_dtype = ",".join([f"{a.dtype.char}{n}" for n in (\n buffersizes1.max(),\n 1 if np.all(not_found) else sep_len.max(),\n buffersizes3.max(),\n )])\n shape = np.broadcast_shapes(a.shape, sep.shape)\n out = np.empty_like(a, shape=shape, dtype=out_dtype)\n return _rpartition_index(\n a, sep, pos, out=(out["f0"], out["f1"], out["f2"]))\n\n\ndef _translate_dispatcher(a, table, deletechars=None):\n return (a,)\n\n\n@set_module("numpy.strings")\n@array_function_dispatch(_translate_dispatcher)\ndef translate(a, table, deletechars=None):\n """\n For each element in `a`, return a copy of the string where all\n characters occurring in the optional argument `deletechars` are\n removed, and the remaining characters have been mapped through the\n given translation table.\n\n Calls :meth:`str.translate` element-wise.\n\n Parameters\n ----------\n a : array-like, with `np.bytes_` or `np.str_` dtype\n\n table : str of length 256\n\n deletechars : str\n\n Returns\n -------\n out : ndarray\n Output array of str or unicode, depending on input type\n\n See Also\n --------\n str.translate\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['a1b c', '1bca', 'bca1'])\n >>> table = a[0].maketrans('abc', '123')\n >>> deletechars = ' '\n >>> np.char.translate(a, table, deletechars)\n array(['112 3', '1231', '2311'], dtype='<U5')\n\n """\n a_arr = np.asarray(a)\n if issubclass(a_arr.dtype.type, np.str_):\n return _vec_string(\n a_arr, a_arr.dtype, 'translate', (table,))\n else:\n return _vec_string(\n a_arr,\n a_arr.dtype,\n 'translate',\n [table] + _clean_args(deletechars)\n )\n\n@set_module("numpy.strings")\ndef slice(a, start=None, stop=None, step=None, /):\n """\n Slice the strings in `a` by slices specified by `start`, `stop`, `step`.\n Like in the regular Python `slice` object, if only `start` is\n specified then it is interpreted as the `stop`.\n\n Parameters\n ----------\n a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype\n Input array\n\n start : None, an integer or an array of integers\n The start of the slice, broadcasted to `a`'s shape\n\n stop : None, an integer or an array of integers\n The end of the slice, broadcasted to `a`'s shape\n\n step : None, an integer or an array of integers\n The step for the slice, broadcasted to `a`'s shape\n\n Returns\n -------\n out : ndarray\n Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,\n depending on input type\n\n Examples\n --------\n >>> import numpy as np\n >>> a = np.array(['hello', 'world'])\n >>> np.strings.slice(a, 2)\n array(['he', 'wo'], dtype='<U5')\n\n >>> np.strings.slice(a, 1, 5, 2)\n array(['el', 'ol'], dtype='<U5')\n\n One can specify different start/stop/step for different array entries:\n\n >>> np.strings.slice(a, np.array([1, 2]), np.array([4, 5]))\n array(['ell', 'rld'], dtype='<U5')\n\n Negative slices have the same meaning as in regular Python:\n\n >>> b = np.array(['hello world', 'γεια σου κόσμε', '你好世界', '👋 🌍'],\n ... dtype=np.dtypes.StringDType())\n >>> np.strings.slice(b, -2)\n array(['hello wor', 'γεια σου κόσ', '你好', '👋'], dtype=StringDType())\n\n >>> np.strings.slice(b, [3, -10, 2, -3], [-1, -2, -1, 3])\n array(['lo worl', ' σου κόσ', '世', '👋 🌍'], dtype=StringDType())\n\n >>> np.strings.slice(b, None, None, -1)\n array(['dlrow olleh', 'εμσόκ υοσ αιεγ', '界世好你', '🌍 👋'],\n dtype=StringDType())\n\n """\n # Just like in the construction of a regular slice object, if only start\n # is specified then start will become stop, see logic in slice_new.\n if stop is None:\n stop = start\n start = None\n\n # adjust start, stop, step to be integers, see logic in PySlice_Unpack\n if step is None:\n step = 1\n step = np.asanyarray(step)\n if not np.issubdtype(step.dtype, np.integer):\n raise TypeError(f"unsupported type {step.dtype} for operand 'step'")\n if np.any(step == 0):\n raise ValueError("slice step cannot be zero")\n\n if start is None:\n start = np.where(step < 0, np.iinfo(np.intp).max, 0)\n\n if stop is None:\n stop = np.where(step < 0, np.iinfo(np.intp).min, np.iinfo(np.intp).max)\n\n return _slice(a, start, stop, step)\n
.venv\Lib\site-packages\numpy\_core\strings.py
strings.py
Python
52,465
0.75
0.067471
0.013268
react-lib
677
2023-08-02T10:08:07.401057
GPL-3.0
false
414afd2eed2576e17e2f44c873605299
from typing import TypeAlias, overload\n\nimport numpy as np\nfrom numpy._typing import NDArray, _AnyShape, _SupportsArray\nfrom numpy._typing import _ArrayLikeAnyString_co as UST_co\nfrom numpy._typing import _ArrayLikeBytes_co as S_co\nfrom numpy._typing import _ArrayLikeInt_co as i_co\nfrom numpy._typing import _ArrayLikeStr_co as U_co\nfrom numpy._typing import _ArrayLikeString_co as T_co\n\n__all__ = [\n "add",\n "capitalize",\n "center",\n "count",\n "decode",\n "encode",\n "endswith",\n "equal",\n "expandtabs",\n "find",\n "greater",\n "greater_equal",\n "index",\n "isalnum",\n "isalpha",\n "isdecimal",\n "isdigit",\n "islower",\n "isnumeric",\n "isspace",\n "istitle",\n "isupper",\n "less",\n "less_equal",\n "ljust",\n "lower",\n "lstrip",\n "mod",\n "multiply",\n "not_equal",\n "partition",\n "replace",\n "rfind",\n "rindex",\n "rjust",\n "rpartition",\n "rstrip",\n "startswith",\n "str_len",\n "strip",\n "swapcase",\n "title",\n "translate",\n "upper",\n "zfill",\n "slice",\n]\n\n_StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType]\n_StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType]\n_StringDTypeOrUnicodeArray: TypeAlias = np.ndarray[_AnyShape, np.dtype[np.str_]] | _StringDTypeArray\n\n@overload\ndef equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...\n@overload\ndef less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...\n@overload\ndef less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...\n\n@overload\ndef add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...\n@overload\ndef add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...\n@overload\ndef multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...\n@overload\ndef multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...\n@overload\ndef multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef mod(a: U_co, value: object) -> NDArray[np.str_]: ...\n@overload\ndef mod(a: S_co, value: object) -> NDArray[np.bytes_]: ...\n@overload\ndef mod(a: _StringDTypeSupportsArray, value: object) -> _StringDTypeArray: ...\n@overload\ndef mod(a: T_co, value: object) -> _StringDTypeOrUnicodeArray: ...\n\ndef isalpha(x: UST_co) -> NDArray[np.bool]: ...\ndef isalnum(a: UST_co) -> NDArray[np.bool]: ...\ndef isdigit(x: UST_co) -> NDArray[np.bool]: ...\ndef isspace(x: UST_co) -> NDArray[np.bool]: ...\ndef isdecimal(x: U_co | T_co) -> NDArray[np.bool]: ...\ndef isnumeric(x: U_co | T_co) -> NDArray[np.bool]: ...\ndef islower(a: UST_co) -> NDArray[np.bool]: ...\ndef istitle(a: UST_co) -> NDArray[np.bool]: ...\ndef isupper(a: UST_co) -> NDArray[np.bool]: ...\n\ndef str_len(x: UST_co) -> NDArray[np.int_]: ...\n\n@overload\ndef find(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef find(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef find(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef rfind(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef rfind(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef rfind(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef index(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef index(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef index(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef rindex(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef rindex(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef rindex(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef count(\n a: U_co,\n sub: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef count(\n a: S_co,\n sub: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n@overload\ndef count(\n a: T_co,\n sub: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.int_]: ...\n\n@overload\ndef startswith(\n a: U_co,\n prefix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef startswith(\n a: S_co,\n prefix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef startswith(\n a: T_co,\n prefix: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n\n@overload\ndef endswith(\n a: U_co,\n suffix: U_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef endswith(\n a: S_co,\n suffix: S_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n@overload\ndef endswith(\n a: T_co,\n suffix: T_co,\n start: i_co = ...,\n end: i_co | None = ...,\n) -> NDArray[np.bool]: ...\n\ndef decode(\n a: S_co,\n encoding: str | None = None,\n errors: str | None = None,\n) -> NDArray[np.str_]: ...\ndef encode(\n a: U_co | T_co,\n encoding: str | None = None,\n errors: str | None = None,\n) -> NDArray[np.bytes_]: ...\n\n@overload\ndef expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[np.str_]: ...\n@overload\ndef expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[np.bytes_]: ...\n@overload\ndef expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...\n@overload\ndef expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef center(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ...\n@overload\ndef center(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ...\n@overload\ndef center(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ...\n@overload\ndef center(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef ljust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ...\n@overload\ndef ljust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ...\n@overload\ndef ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ...\n@overload\ndef ljust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rjust(a: U_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.str_]: ...\n@overload\ndef rjust(a: S_co, width: i_co, fillchar: UST_co = " ") -> NDArray[np.bytes_]: ...\n@overload\ndef rjust(a: _StringDTypeSupportsArray, width: i_co, fillchar: UST_co = " ") -> _StringDTypeArray: ...\n@overload\ndef rjust(a: T_co, width: i_co, fillchar: UST_co = " ") -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef lstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ...\n@overload\ndef lstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ...\n@overload\ndef lstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ...\n@overload\ndef lstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rstrip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ...\n@overload\ndef rstrip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ...\n@overload\ndef rstrip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ...\n@overload\ndef rstrip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef strip(a: U_co, chars: U_co | None = None) -> NDArray[np.str_]: ...\n@overload\ndef strip(a: S_co, chars: S_co | None = None) -> NDArray[np.bytes_]: ...\n@overload\ndef strip(a: _StringDTypeSupportsArray, chars: T_co | None = None) -> _StringDTypeArray: ...\n@overload\ndef strip(a: T_co, chars: T_co | None = None) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef zfill(a: U_co, width: i_co) -> NDArray[np.str_]: ...\n@overload\ndef zfill(a: S_co, width: i_co) -> NDArray[np.bytes_]: ...\n@overload\ndef zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...\n@overload\ndef zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef upper(a: U_co) -> NDArray[np.str_]: ...\n@overload\ndef upper(a: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef lower(a: U_co) -> NDArray[np.str_]: ...\n@overload\ndef lower(a: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef swapcase(a: U_co) -> NDArray[np.str_]: ...\n@overload\ndef swapcase(a: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef capitalize(a: U_co) -> NDArray[np.str_]: ...\n@overload\ndef capitalize(a: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef title(a: U_co) -> NDArray[np.str_]: ...\n@overload\ndef title(a: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef title(a: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef replace(\n a: U_co,\n old: U_co,\n new: U_co,\n count: i_co = ...,\n) -> NDArray[np.str_]: ...\n@overload\ndef replace(\n a: S_co,\n old: S_co,\n new: S_co,\n count: i_co = ...,\n) -> NDArray[np.bytes_]: ...\n@overload\ndef replace(\n a: _StringDTypeSupportsArray,\n old: _StringDTypeSupportsArray,\n new: _StringDTypeSupportsArray,\n count: i_co = ...,\n) -> _StringDTypeArray: ...\n@overload\ndef replace(\n a: T_co,\n old: T_co,\n new: T_co,\n count: i_co = ...,\n) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef partition(a: U_co, sep: U_co) -> NDArray[np.str_]: ...\n@overload\ndef partition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef rpartition(a: U_co, sep: U_co) -> NDArray[np.str_]: ...\n@overload\ndef rpartition(a: S_co, sep: S_co) -> NDArray[np.bytes_]: ...\n@overload\ndef rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...\n@overload\ndef rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...\n\n@overload\ndef translate(\n a: U_co,\n table: str,\n deletechars: str | None = None,\n) -> NDArray[np.str_]: ...\n@overload\ndef translate(\n a: S_co,\n table: str,\n deletechars: str | None = None,\n) -> NDArray[np.bytes_]: ...\n@overload\ndef translate(\n a: _StringDTypeSupportsArray,\n table: str,\n deletechars: str | None = None,\n) -> _StringDTypeArray: ...\n@overload\ndef translate(\n a: T_co,\n table: str,\n deletechars: str | None = None,\n) -> _StringDTypeOrUnicodeArray: ...\n\n#\n@overload\ndef slice(a: U_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.str_]: ... # type: ignore[overload-overlap]\n@overload\ndef slice(a: S_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /) -> NDArray[np.bytes_]: ...\n@overload\ndef slice(\n a: _StringDTypeSupportsArray, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /\n) -> _StringDTypeArray: ...\n@overload\ndef slice(\n a: T_co, start: i_co | None = None, stop: i_co | None = None, step: i_co | None = None, /\n) -> _StringDTypeOrUnicodeArray: ...\n
.venv\Lib\site-packages\numpy\_core\strings.pyi
strings.pyi
Other
14,013
0.95
0.264188
0.002123
awesome-app
290
2024-06-26T10:54:44.824590
Apache-2.0
false
e13b962d5a7cfc6b164956b64636d6ac
"""\nCreate the numpy._core.umath namespace for backward compatibility. In v1.16\nthe multiarray and umath c-extension modules were merged into a single\n_multiarray_umath extension module. So we replicate the old namespace\nby importing from the extension module.\n\n"""\n\nimport numpy\n\nfrom . import _multiarray_umath\nfrom ._multiarray_umath import *\n\n# These imports are needed for backward compatibility,\n# do not change them. issue gh-11862\n# _ones_like is semi-public, on purpose not added to __all__\n# These imports are needed for the strip & replace implementations\nfrom ._multiarray_umath import (\n _UFUNC_API,\n _add_newdoc_ufunc,\n _center,\n _expandtabs,\n _expandtabs_length,\n _extobj_contextvar,\n _get_extobj_dict,\n _ljust,\n _lstrip_chars,\n _lstrip_whitespace,\n _make_extobj,\n _ones_like,\n _partition,\n _partition_index,\n _replace,\n _rjust,\n _rpartition,\n _rpartition_index,\n _rstrip_chars,\n _rstrip_whitespace,\n _slice,\n _strip_chars,\n _strip_whitespace,\n _zfill,\n)\n\n__all__ = [\n 'absolute', 'add',\n 'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',\n 'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',\n 'conjugate', 'copysign', 'cos', 'cosh', 'bitwise_count', 'deg2rad',\n 'degrees', 'divide', 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2',\n 'expm1', 'fabs', 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin',\n 'fmod', 'frexp', 'frompyfunc', 'gcd', 'greater', 'greater_equal',\n 'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat',\n 'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10',\n 'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not',\n 'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf',\n 'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive',\n 'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',\n 'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square',\n 'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat']\n
.venv\Lib\site-packages\numpy\_core\umath.py
umath.py
Python
2,190
0.95
0.05
0.072727
vue-tools
172
2024-11-22T07:28:41.887589
GPL-3.0
false
3c01b8cc67783ad630f9a746ab5012c4
from numpy import (\n absolute,\n add,\n arccos,\n arccosh,\n arcsin,\n arcsinh,\n arctan,\n arctan2,\n arctanh,\n bitwise_and,\n bitwise_count,\n bitwise_or,\n bitwise_xor,\n cbrt,\n ceil,\n conj,\n conjugate,\n copysign,\n cos,\n cosh,\n deg2rad,\n degrees,\n divide,\n divmod,\n e,\n equal,\n euler_gamma,\n exp,\n exp2,\n expm1,\n fabs,\n float_power,\n floor,\n floor_divide,\n fmax,\n fmin,\n fmod,\n frexp,\n frompyfunc,\n gcd,\n greater,\n greater_equal,\n heaviside,\n hypot,\n invert,\n isfinite,\n isinf,\n isnan,\n isnat,\n lcm,\n ldexp,\n left_shift,\n less,\n less_equal,\n log,\n log1p,\n log2,\n log10,\n logaddexp,\n logaddexp2,\n logical_and,\n logical_not,\n logical_or,\n logical_xor,\n matvec,\n maximum,\n minimum,\n mod,\n modf,\n multiply,\n negative,\n nextafter,\n not_equal,\n pi,\n positive,\n power,\n rad2deg,\n radians,\n reciprocal,\n remainder,\n right_shift,\n rint,\n sign,\n signbit,\n sin,\n sinh,\n spacing,\n sqrt,\n square,\n subtract,\n tan,\n tanh,\n true_divide,\n trunc,\n vecdot,\n vecmat,\n)\n\n__all__ = [\n "absolute",\n "add",\n "arccos",\n "arccosh",\n "arcsin",\n "arcsinh",\n "arctan",\n "arctan2",\n "arctanh",\n "bitwise_and",\n "bitwise_count",\n "bitwise_or",\n "bitwise_xor",\n "cbrt",\n "ceil",\n "conj",\n "conjugate",\n "copysign",\n "cos",\n "cosh",\n "deg2rad",\n "degrees",\n "divide",\n "divmod",\n "e",\n "equal",\n "euler_gamma",\n "exp",\n "exp2",\n "expm1",\n "fabs",\n "float_power",\n "floor",\n "floor_divide",\n "fmax",\n "fmin",\n "fmod",\n "frexp",\n "frompyfunc",\n "gcd",\n "greater",\n "greater_equal",\n "heaviside",\n "hypot",\n "invert",\n "isfinite",\n "isinf",\n "isnan",\n "isnat",\n "lcm",\n "ldexp",\n "left_shift",\n "less",\n "less_equal",\n "log",\n "log1p",\n "log2",\n "log10",\n "logaddexp",\n "logaddexp2",\n "logical_and",\n "logical_not",\n "logical_or",\n "logical_xor",\n "matvec",\n "maximum",\n "minimum",\n "mod",\n "modf",\n "multiply",\n "negative",\n "nextafter",\n "not_equal",\n "pi",\n "positive",\n "power",\n "rad2deg",\n "radians",\n "reciprocal",\n "remainder",\n "right_shift",\n "rint",\n "sign",\n "signbit",\n "sin",\n "sinh",\n "spacing",\n "sqrt",\n "square",\n "subtract",\n "tan",\n "tanh",\n "true_divide",\n "trunc",\n "vecdot",\n "vecmat",\n]\n
.venv\Lib\site-packages\numpy\_core\umath.pyi
umath.pyi
Other
2,840
0.85
0
0
react-lib
178
2023-10-11T11:13:40.783793
Apache-2.0
false
82ae63f03248984c7273c328164ab02f
from .overrides import get_array_function_like_doc as get_array_function_like_doc\n\ndef refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...\n
.venv\Lib\site-packages\numpy\_core\_add_newdocs.pyi
_add_newdocs.pyi
Other
171
0.85
0.333333
0
vue-tools
63
2024-06-22T18:58:48.820127
MIT
false
2a1d81b1743028667859959de814ff20
"""\nThis file is separate from ``_add_newdocs.py`` so that it can be mocked out by\nour sphinx ``conf.py`` during doc builds, where we want to avoid showing\nplatform-dependent information.\n"""\nimport os\nimport sys\n\nfrom numpy._core import dtype\nfrom numpy._core import numerictypes as _numerictypes\nfrom numpy._core.function_base import add_newdoc\n\n##############################################################################\n#\n# Documentation for concrete scalar classes\n#\n##############################################################################\n\ndef numeric_type_aliases(aliases):\n def type_aliases_gen():\n for alias, doc in aliases:\n try:\n alias_type = getattr(_numerictypes, alias)\n except AttributeError:\n # The set of aliases that actually exist varies between platforms\n pass\n else:\n yield (alias_type, alias, doc)\n return list(type_aliases_gen())\n\n\npossible_aliases = numeric_type_aliases([\n ('int8', '8-bit signed integer (``-128`` to ``127``)'),\n ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),\n ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),\n ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),\n ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),\n ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),\n ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),\n ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),\n ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),\n ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),\n ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),\n ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),\n ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),\n ('float96', '96-bit extended-precision floating-point number type'),\n ('float128', '128-bit extended-precision floating-point number type'),\n ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),\n ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),\n ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),\n ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),\n ])\n\n\ndef _get_platform_and_machine():\n try:\n system, _, _, _, machine = os.uname()\n except AttributeError:\n system = sys.platform\n if system == 'win32':\n machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \\n or os.environ.get('PROCESSOR_ARCHITECTURE', '')\n else:\n machine = 'unknown'\n return system, machine\n\n\n_system, _machine = _get_platform_and_machine()\n_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"\n\n\ndef add_newdoc_for_scalar_type(obj, fixed_aliases, doc):\n # note: `:field: value` is rST syntax which renders as field lists.\n o = getattr(_numerictypes, obj)\n\n character_code = dtype(o).char\n canonical_name_doc = "" if obj == o.__name__ else \\n f":Canonical name: `numpy.{obj}`\n "\n if fixed_aliases:\n alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "\n for alias in fixed_aliases)\n else:\n alias_doc = ''\n alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "\n for (alias_type, alias, doc) in possible_aliases if alias_type is o)\n\n docstring = f"""\n {doc.strip()}\n\n :Character code: ``'{character_code}'``\n {canonical_name_doc}{alias_doc}\n """\n\n add_newdoc('numpy._core.numerictypes', obj, docstring)\n\n\n_bool_docstring = (\n """\n Boolean type (True or False), stored as a byte.\n\n .. warning::\n\n The :class:`bool` type is not a subclass of the :class:`int_` type\n (the :class:`bool` is not even a number type). This is different\n than Python's default implementation of :class:`bool` as a\n sub-class of :class:`int`.\n """\n)\n\nadd_newdoc_for_scalar_type('bool', [], _bool_docstring)\n\nadd_newdoc_for_scalar_type('bool_', [], _bool_docstring)\n\nadd_newdoc_for_scalar_type('byte', [],\n """\n Signed integer type, compatible with C ``char``.\n """)\n\nadd_newdoc_for_scalar_type('short', [],\n """\n Signed integer type, compatible with C ``short``.\n """)\n\nadd_newdoc_for_scalar_type('intc', [],\n """\n Signed integer type, compatible with C ``int``.\n """)\n\n# TODO: These docs probably need an if to highlight the default rather than\n# the C-types (and be correct).\nadd_newdoc_for_scalar_type('int_', [],\n """\n Default signed integer type, 64bit on 64bit systems and 32bit on 32bit\n systems.\n """)\n\nadd_newdoc_for_scalar_type('longlong', [],\n """\n Signed integer type, compatible with C ``long long``.\n """)\n\nadd_newdoc_for_scalar_type('ubyte', [],\n """\n Unsigned integer type, compatible with C ``unsigned char``.\n """)\n\nadd_newdoc_for_scalar_type('ushort', [],\n """\n Unsigned integer type, compatible with C ``unsigned short``.\n """)\n\nadd_newdoc_for_scalar_type('uintc', [],\n """\n Unsigned integer type, compatible with C ``unsigned int``.\n """)\n\nadd_newdoc_for_scalar_type('uint', [],\n """\n Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit\n systems.\n """)\n\nadd_newdoc_for_scalar_type('ulonglong', [],\n """\n Signed integer type, compatible with C ``unsigned long long``.\n """)\n\nadd_newdoc_for_scalar_type('half', [],\n """\n Half-precision floating-point number type.\n """)\n\nadd_newdoc_for_scalar_type('single', [],\n """\n Single-precision floating-point number type, compatible with C ``float``.\n """)\n\nadd_newdoc_for_scalar_type('double', [],\n """\n Double-precision floating-point number type, compatible with Python\n :class:`float` and C ``double``.\n """)\n\nadd_newdoc_for_scalar_type('longdouble', [],\n """\n Extended-precision floating-point number type, compatible with C\n ``long double`` but not necessarily with IEEE 754 quadruple-precision.\n """)\n\nadd_newdoc_for_scalar_type('csingle', [],\n """\n Complex number type composed of two single-precision floating-point\n numbers.\n """)\n\nadd_newdoc_for_scalar_type('cdouble', [],\n """\n Complex number type composed of two double-precision floating-point\n numbers, compatible with Python :class:`complex`.\n """)\n\nadd_newdoc_for_scalar_type('clongdouble', [],\n """\n Complex number type composed of two extended-precision floating-point\n numbers.\n """)\n\nadd_newdoc_for_scalar_type('object_', [],\n """\n Any Python object.\n """)\n\nadd_newdoc_for_scalar_type('str_', [],\n r"""\n A unicode string.\n\n This type strips trailing null codepoints.\n\n >>> s = np.str_("abc\x00")\n >>> s\n 'abc'\n\n Unlike the builtin :class:`str`, this supports the\n :ref:`python:bufferobjects`, exposing its contents as UCS4:\n\n >>> m = memoryview(np.str_("abc"))\n >>> m.format\n '3w'\n >>> m.tobytes()\n b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'\n """)\n\nadd_newdoc_for_scalar_type('bytes_', [],\n r"""\n A byte string.\n\n When used in arrays, this type strips trailing null bytes.\n """)\n\nadd_newdoc_for_scalar_type('void', [],\n r"""\n np.void(length_or_data, /, dtype=None)\n\n Create a new structured or unstructured void scalar.\n\n Parameters\n ----------\n length_or_data : int, array-like, bytes-like, object\n One of multiple meanings (see notes). The length or\n bytes data of an unstructured void. Or alternatively,\n the data to be stored in the new scalar when `dtype`\n is provided.\n This can be an array-like, in which case an array may\n be returned.\n dtype : dtype, optional\n If provided the dtype of the new scalar. This dtype must\n be "void" dtype (i.e. a structured or unstructured void,\n see also :ref:`defining-structured-types`).\n\n .. versionadded:: 1.24\n\n Notes\n -----\n For historical reasons and because void scalars can represent both\n arbitrary byte data and structured dtypes, the void constructor\n has three calling conventions:\n\n 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five\n ``\0`` bytes. The 5 can be a Python or NumPy integer.\n 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.\n The dtype itemsize will match the byte string length, here ``"V10"``.\n 3. When a ``dtype=`` is passed the call is roughly the same as an\n array creation. However, a void scalar rather than array is returned.\n\n Please see the examples which show all three different conventions.\n\n Examples\n --------\n >>> np.void(5)\n np.void(b'\x00\x00\x00\x00\x00')\n >>> np.void(b'abcd')\n np.void(b'\x61\x62\x63\x64')\n >>> np.void((3.2, b'eggs'), dtype="d,S5")\n np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])\n >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])\n np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])\n\n """)\n\nadd_newdoc_for_scalar_type('datetime64', [],\n """\n If created from a 64-bit integer, it represents an offset from\n ``1970-01-01T00:00:00``.\n If created from string, the string can be in ISO 8601 date\n or datetime format.\n\n When parsing a string to create a datetime object, if the string contains\n a trailing timezone (A 'Z' or a timezone offset), the timezone will be\n dropped and a User Warning is given.\n\n Datetime64 objects should be considered to be UTC and therefore have an\n offset of +0000.\n\n >>> np.datetime64(10, 'Y')\n np.datetime64('1980')\n >>> np.datetime64('1980', 'Y')\n np.datetime64('1980')\n >>> np.datetime64(10, 'D')\n np.datetime64('1970-01-11')\n\n See :ref:`arrays.datetime` for more information.\n """)\n\nadd_newdoc_for_scalar_type('timedelta64', [],\n """\n A timedelta stored as a 64-bit integer.\n\n See :ref:`arrays.datetime` for more information.\n """)\n\nadd_newdoc('numpy._core.numerictypes', "integer", ('is_integer',\n """\n integer.is_integer() -> bool\n\n Return ``True`` if the number is finite with integral value.\n\n .. versionadded:: 1.22\n\n Examples\n --------\n >>> import numpy as np\n >>> np.int64(-2).is_integer()\n True\n >>> np.uint32(5).is_integer()\n True\n """))\n\n# TODO: work out how to put this on the base class, np.floating\nfor float_name in ('half', 'single', 'double', 'longdouble'):\n add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',\n f"""\n {float_name}.as_integer_ratio() -> (int, int)\n\n Return a pair of integers, whose ratio is exactly equal to the original\n floating point number, and with a positive denominator.\n Raise `OverflowError` on infinities and a `ValueError` on NaNs.\n\n >>> np.{float_name}(10.0).as_integer_ratio()\n (10, 1)\n >>> np.{float_name}(0.0).as_integer_ratio()\n (0, 1)\n >>> np.{float_name}(-.25).as_integer_ratio()\n (-1, 4)\n """))\n\n add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',\n f"""\n {float_name}.is_integer() -> bool\n\n Return ``True`` if the floating point number is finite with integral\n value, and ``False`` otherwise.\n\n .. versionadded:: 1.22\n\n Examples\n --------\n >>> np.{float_name}(-2.0).is_integer()\n True\n >>> np.{float_name}(3.2).is_integer()\n False\n """))\n\nfor int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',\n 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):\n # Add negative examples for signed cases by checking typecode\n add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',\n f"""\n {int_name}.bit_count() -> int\n\n Computes the number of 1-bits in the absolute value of the input.\n Analogous to the builtin `int.bit_count` or ``popcount`` in C++.\n\n Examples\n --------\n >>> np.{int_name}(127).bit_count()\n 7""" +\n (f"""\n >>> np.{int_name}(-127).bit_count()\n 7\n """ if dtype(int_name).char.islower() else "")))\n
.venv\Lib\site-packages\numpy\_core\_add_newdocs_scalars.py
_add_newdocs_scalars.py
Python
12,990
0.95
0.087179
0.035032
awesome-app
502
2023-08-01T22:04:32.461439
BSD-3-Clause
false
cf2707671a9e224b36b5aa3bf3c1eaf6
from collections.abc import Iterable\nfrom typing import Final\n\nimport numpy as np\n\npossible_aliases: Final[list[tuple[type[np.number], str, str]]] = ...\n_system: Final[str] = ...\n_machine: Final[str] = ...\n_doc_alias_string: Final[str] = ...\n_bool_docstring: Final[str] = ...\nint_name: str = ...\nfloat_name: str = ...\n\ndef numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...\ndef add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...\ndef _get_platform_and_machine() -> tuple[str, str]: ...\n
.venv\Lib\site-packages\numpy\_core\_add_newdocs_scalars.pyi
_add_newdocs_scalars.pyi
Other
589
0.85
0.1875
0
react-lib
986
2023-11-15T02:27:55.268621
MIT
false
517491d587464246187acf165900f285
"""\nFunctions in the ``as*array`` family that promote array-likes into arrays.\n\n`require` fits this category despite its name not matching this pattern.\n"""\nfrom .multiarray import array, asanyarray\nfrom .overrides import (\n array_function_dispatch,\n finalize_array_function_like,\n set_module,\n)\n\n__all__ = ["require"]\n\n\nPOSSIBLE_FLAGS = {\n 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',\n 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',\n 'A': 'A', 'ALIGNED': 'A',\n 'W': 'W', 'WRITEABLE': 'W',\n 'O': 'O', 'OWNDATA': 'O',\n 'E': 'E', 'ENSUREARRAY': 'E'\n}\n\n\n@finalize_array_function_like\n@set_module('numpy')\ndef require(a, dtype=None, requirements=None, *, like=None):\n """\n Return an ndarray of the provided type that satisfies requirements.\n\n This function is useful to be sure that an array with the correct flags\n is returned for passing to compiled code (perhaps through ctypes).\n\n Parameters\n ----------\n a : array_like\n The object to be converted to a type-and-requirement-satisfying array.\n dtype : data-type\n The required data-type. If None preserve the current dtype. If your\n application requires the data to be in native byteorder, include\n a byteorder specification as a part of the dtype specification.\n requirements : str or sequence of str\n The requirements list can be any of the following\n\n * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array\n * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array\n * 'ALIGNED' ('A') - ensure a data-type aligned array\n * 'WRITEABLE' ('W') - ensure a writable array\n * 'OWNDATA' ('O') - ensure an array that owns its own data\n * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass\n ${ARRAY_FUNCTION_LIKE}\n\n .. versionadded:: 1.20.0\n\n Returns\n -------\n out : ndarray\n Array with specified requirements and type if given.\n\n See Also\n --------\n asarray : Convert input to an ndarray.\n asanyarray : Convert to an ndarray, but pass through ndarray subclasses.\n ascontiguousarray : Convert input to a contiguous array.\n asfortranarray : Convert input to an ndarray with column-major\n memory order.\n ndarray.flags : Information about the memory layout of the array.\n\n Notes\n -----\n The returned array will be guaranteed to have the listed requirements\n by making a copy if needed.\n\n Examples\n --------\n >>> import numpy as np\n >>> x = np.arange(6).reshape(2,3)\n >>> x.flags\n C_CONTIGUOUS : True\n F_CONTIGUOUS : False\n OWNDATA : False\n WRITEABLE : True\n ALIGNED : True\n WRITEBACKIFCOPY : False\n\n >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])\n >>> y.flags\n C_CONTIGUOUS : False\n F_CONTIGUOUS : True\n OWNDATA : True\n WRITEABLE : True\n ALIGNED : True\n WRITEBACKIFCOPY : False\n\n """\n if like is not None:\n return _require_with_like(\n like,\n a,\n dtype=dtype,\n requirements=requirements,\n )\n\n if not requirements:\n return asanyarray(a, dtype=dtype)\n\n requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}\n\n if 'E' in requirements:\n requirements.remove('E')\n subok = False\n else:\n subok = True\n\n order = 'A'\n if requirements >= {'C', 'F'}:\n raise ValueError('Cannot specify both "C" and "F" order')\n elif 'F' in requirements:\n order = 'F'\n requirements.remove('F')\n elif 'C' in requirements:\n order = 'C'\n requirements.remove('C')\n\n arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)\n\n for prop in requirements:\n if not arr.flags[prop]:\n return arr.copy(order)\n return arr\n\n\n_require_with_like = array_function_dispatch()(require)\n
.venv\Lib\site-packages\numpy\_core\_asarray.py
_asarray.py
Python
4,045
0.85
0.089552
0.054545
react-lib
602
2025-01-03T04:28:01.491478
MIT
false
ef8976c101722a9568fd5b117dda8f31
from collections.abc import Iterable\nfrom typing import Any, Literal, TypeAlias, TypeVar, overload\n\nfrom numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc\n\n_ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])\n\n_Requirements: TypeAlias = Literal[\n "C", "C_CONTIGUOUS", "CONTIGUOUS",\n "F", "F_CONTIGUOUS", "FORTRAN",\n "A", "ALIGNED",\n "W", "WRITEABLE",\n "O", "OWNDATA"\n]\n_E: TypeAlias = Literal["E", "ENSUREARRAY"]\n_RequirementsWithE: TypeAlias = _Requirements | _E\n\n@overload\ndef require(\n a: _ArrayT,\n dtype: None = ...,\n requirements: _Requirements | Iterable[_Requirements] | None = ...,\n *,\n like: _SupportsArrayFunc = ...\n) -> _ArrayT: ...\n@overload\ndef require(\n a: object,\n dtype: DTypeLike = ...,\n requirements: _E | Iterable[_RequirementsWithE] = ...,\n *,\n like: _SupportsArrayFunc = ...\n) -> NDArray[Any]: ...\n@overload\ndef require(\n a: object,\n dtype: DTypeLike = ...,\n requirements: _Requirements | Iterable[_Requirements] | None = ...,\n *,\n like: _SupportsArrayFunc = ...\n) -> NDArray[Any]: ...\n
.venv\Lib\site-packages\numpy\_core\_asarray.pyi
_asarray.pyi
Other
1,114
0.85
0.073171
0.081081
react-lib
519
2024-02-25T16:34:01.569434
GPL-3.0
false
17b3cd37d08437206c31bd4f6b765031
"""\nA place for code to be called from the implementation of np.dtype\n\nString handling is much easier to do correctly in python.\n"""\nimport numpy as np\n\n_kind_to_stem = {\n 'u': 'uint',\n 'i': 'int',\n 'c': 'complex',\n 'f': 'float',\n 'b': 'bool',\n 'V': 'void',\n 'O': 'object',\n 'M': 'datetime',\n 'm': 'timedelta',\n 'S': 'bytes',\n 'U': 'str',\n}\n\n\ndef _kind_name(dtype):\n try:\n return _kind_to_stem[dtype.kind]\n except KeyError as e:\n raise RuntimeError(\n f"internal dtype error, unknown kind {dtype.kind!r}"\n ) from None\n\n\ndef __str__(dtype):\n if dtype.fields is not None:\n return _struct_str(dtype, include_align=True)\n elif dtype.subdtype:\n return _subarray_str(dtype)\n elif issubclass(dtype.type, np.flexible) or not dtype.isnative:\n return dtype.str\n else:\n return dtype.name\n\n\ndef __repr__(dtype):\n arg_str = _construction_repr(dtype, include_align=False)\n if dtype.isalignedstruct:\n arg_str = arg_str + ", align=True"\n return f"dtype({arg_str})"\n\n\ndef _unpack_field(dtype, offset, title=None):\n """\n Helper function to normalize the items in dtype.fields.\n\n Call as:\n\n dtype, offset, title = _unpack_field(*dtype.fields[name])\n """\n return dtype, offset, title\n\n\ndef _isunsized(dtype):\n # PyDataType_ISUNSIZED\n return dtype.itemsize == 0\n\n\ndef _construction_repr(dtype, include_align=False, short=False):\n """\n Creates a string repr of the dtype, excluding the 'dtype()' part\n surrounding the object. This object may be a string, a list, or\n a dict depending on the nature of the dtype. This\n is the object passed as the first parameter to the dtype\n constructor, and if no additional constructor parameters are\n given, will reproduce the exact memory layout.\n\n Parameters\n ----------\n short : bool\n If true, this creates a shorter repr using 'kind' and 'itemsize',\n instead of the longer type name.\n\n include_align : bool\n If true, this includes the 'align=True' parameter\n inside the struct dtype construction dict when needed. Use this flag\n if you want a proper repr string without the 'dtype()' part around it.\n\n If false, this does not preserve the\n 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for\n struct arrays like the regular repr does, because the 'align'\n flag is not part of first dtype constructor parameter. This\n mode is intended for a full 'repr', where the 'align=True' is\n provided as the second parameter.\n """\n if dtype.fields is not None:\n return _struct_str(dtype, include_align=include_align)\n elif dtype.subdtype:\n return _subarray_str(dtype)\n else:\n return _scalar_str(dtype, short=short)\n\n\ndef _scalar_str(dtype, short):\n byteorder = _byte_order_str(dtype)\n\n if dtype.type == np.bool:\n if short:\n return "'?'"\n else:\n return "'bool'"\n\n elif dtype.type == np.object_:\n # The object reference may be different sizes on different\n # platforms, so it should never include the itemsize here.\n return "'O'"\n\n elif dtype.type == np.bytes_:\n if _isunsized(dtype):\n return "'S'"\n else:\n return "'S%d'" % dtype.itemsize\n\n elif dtype.type == np.str_:\n if _isunsized(dtype):\n return f"'{byteorder}U'"\n else:\n return "'%sU%d'" % (byteorder, dtype.itemsize / 4)\n\n elif dtype.type == str:\n return "'T'"\n\n elif not type(dtype)._legacy:\n return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"\n\n # unlike the other types, subclasses of void are preserved - but\n # historically the repr does not actually reveal the subclass\n elif issubclass(dtype.type, np.void):\n if _isunsized(dtype):\n return "'V'"\n else:\n return "'V%d'" % dtype.itemsize\n\n elif dtype.type == np.datetime64:\n return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'"\n\n elif dtype.type == np.timedelta64:\n return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'"\n\n elif dtype.isbuiltin == 2:\n return dtype.type.__name__\n\n elif np.issubdtype(dtype, np.number):\n # Short repr with endianness, like '<f8'\n if short or dtype.byteorder not in ('=', '|'):\n return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)\n\n # Longer repr, like 'float64'\n else:\n return "'%s%d'" % (_kind_name(dtype), 8 * dtype.itemsize)\n\n else:\n raise RuntimeError(\n "Internal error: NumPy dtype unrecognized type number")\n\n\ndef _byte_order_str(dtype):\n """ Normalize byteorder to '<' or '>' """\n # hack to obtain the native and swapped byte order characters\n swapped = np.dtype(int).newbyteorder('S')\n native = swapped.newbyteorder('S')\n\n byteorder = dtype.byteorder\n if byteorder == '=':\n return native.byteorder\n if byteorder == 'S':\n # TODO: this path can never be reached\n return swapped.byteorder\n elif byteorder == '|':\n return ''\n else:\n return byteorder\n\n\ndef _datetime_metadata_str(dtype):\n # TODO: this duplicates the C metastr_to_unicode functionality\n unit, count = np.datetime_data(dtype)\n if unit == 'generic':\n return ''\n elif count == 1:\n return f'[{unit}]'\n else:\n return f'[{count}{unit}]'\n\n\ndef _struct_dict_str(dtype, includealignedflag):\n # unpack the fields dictionary into ls\n names = dtype.names\n fld_dtypes = []\n offsets = []\n titles = []\n for name in names:\n fld_dtype, offset, title = _unpack_field(*dtype.fields[name])\n fld_dtypes.append(fld_dtype)\n offsets.append(offset)\n titles.append(title)\n\n # Build up a string to make the dictionary\n\n if np._core.arrayprint._get_legacy_print_mode() <= 121:\n colon = ":"\n fieldsep = ","\n else:\n colon = ": "\n fieldsep = ", "\n\n # First, the names\n ret = "{'names'%s[" % colon\n ret += fieldsep.join(repr(name) for name in names)\n\n # Second, the formats\n ret += f"], 'formats'{colon}["\n ret += fieldsep.join(\n _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)\n\n # Third, the offsets\n ret += f"], 'offsets'{colon}["\n ret += fieldsep.join("%d" % offset for offset in offsets)\n\n # Fourth, the titles\n if any(title is not None for title in titles):\n ret += f"], 'titles'{colon}["\n ret += fieldsep.join(repr(title) for title in titles)\n\n # Fifth, the itemsize\n ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)\n\n if (includealignedflag and dtype.isalignedstruct):\n # Finally, the aligned flag\n ret += ", 'aligned'%sTrue}" % colon\n else:\n ret += "}"\n\n return ret\n\n\ndef _aligned_offset(offset, alignment):\n # round up offset:\n return - (-offset // alignment) * alignment\n\n\ndef _is_packed(dtype):\n """\n Checks whether the structured data type in 'dtype'\n has a simple layout, where all the fields are in order,\n and follow each other with no alignment padding.\n\n When this returns true, the dtype can be reconstructed\n from a list of the field names and dtypes with no additional\n dtype parameters.\n\n Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.\n """\n align = dtype.isalignedstruct\n max_alignment = 1\n total_offset = 0\n for name in dtype.names:\n fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])\n\n if align:\n total_offset = _aligned_offset(total_offset, fld_dtype.alignment)\n max_alignment = max(max_alignment, fld_dtype.alignment)\n\n if fld_offset != total_offset:\n return False\n total_offset += fld_dtype.itemsize\n\n if align:\n total_offset = _aligned_offset(total_offset, max_alignment)\n\n return total_offset == dtype.itemsize\n\n\ndef _struct_list_str(dtype):\n items = []\n for name in dtype.names:\n fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])\n\n item = "("\n if title is not None:\n item += f"({title!r}, {name!r}), "\n else:\n item += f"{name!r}, "\n # Special case subarray handling here\n if fld_dtype.subdtype is not None:\n base, shape = fld_dtype.subdtype\n item += f"{_construction_repr(base, short=True)}, {shape}"\n else:\n item += _construction_repr(fld_dtype, short=True)\n\n item += ")"\n items.append(item)\n\n return "[" + ", ".join(items) + "]"\n\n\ndef _struct_str(dtype, include_align):\n # The list str representation can't include the 'align=' flag,\n # so if it is requested and the struct has the aligned flag set,\n # we must use the dict str instead.\n if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):\n sub = _struct_list_str(dtype)\n\n else:\n sub = _struct_dict_str(dtype, include_align)\n\n # If the data type isn't the default, void, show it\n if dtype.type != np.void:\n return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})"\n else:\n return sub\n\n\ndef _subarray_str(dtype):\n base, shape = dtype.subdtype\n return f"({_construction_repr(base, short=True)}, {shape})"\n\n\ndef _name_includes_bit_suffix(dtype):\n if dtype.type == np.object_:\n # pointer size varies by system, best to omit it\n return False\n elif dtype.type == np.bool:\n # implied\n return False\n elif dtype.type is None:\n return True\n elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):\n # unspecified\n return False\n else:\n return True\n\n\ndef _name_get(dtype):\n # provides dtype.name.__get__, documented as returning a "bit name"\n\n if dtype.isbuiltin == 2:\n # user dtypes don't promise to do anything special\n return dtype.type.__name__\n\n if not type(dtype)._legacy:\n name = type(dtype).__name__\n\n elif issubclass(dtype.type, np.void):\n # historically, void subclasses preserve their name, eg `record64`\n name = dtype.type.__name__\n else:\n name = _kind_name(dtype)\n\n # append bit counts\n if _name_includes_bit_suffix(dtype):\n name += f"{dtype.itemsize * 8}"\n\n # append metadata to datetimes\n if dtype.type in (np.datetime64, np.timedelta64):\n name += _datetime_metadata_str(dtype)\n\n return name\n
.venv\Lib\site-packages\numpy\_core\_dtype.py
_dtype.py
Python
10,913
0.95
0.166667
0.112281
vue-tools
669
2025-06-18T02:08:20.362922
Apache-2.0
false
39a1a9b2b80154498745cf4f81ea396a
from typing import Final, TypeAlias, TypedDict, overload, type_check_only\nfrom typing import Literal as L\n\nfrom typing_extensions import ReadOnly, TypeVar\n\nimport numpy as np\n\n###\n\n_T = TypeVar("_T")\n\n_Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"]\n\n@type_check_only\nclass _KindToStemType(TypedDict):\n u: ReadOnly[L["uint"]]\n i: ReadOnly[L["int"]]\n c: ReadOnly[L["complex"]]\n f: ReadOnly[L["float"]]\n b: ReadOnly[L["bool"]]\n V: ReadOnly[L["void"]]\n O: ReadOnly[L["object"]]\n M: ReadOnly[L["datetime"]]\n m: ReadOnly[L["timedelta"]]\n S: ReadOnly[L["bytes"]]\n U: ReadOnly[L["str"]]\n\n###\n\n_kind_to_stem: Final[_KindToStemType] = ...\n\n#\ndef _kind_name(dtype: np.dtype) -> _Name: ...\ndef __str__(dtype: np.dtype) -> str: ...\ndef __repr__(dtype: np.dtype) -> str: ...\n\n#\ndef _isunsized(dtype: np.dtype) -> bool: ...\ndef _is_packed(dtype: np.dtype) -> bool: ...\ndef _name_includes_bit_suffix(dtype: np.dtype) -> bool: ...\n\n#\ndef _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ...\ndef _scalar_str(dtype: np.dtype, short: bool) -> str: ...\ndef _byte_order_str(dtype: np.dtype) -> str: ...\ndef _datetime_metadata_str(dtype: np.dtype) -> str: ...\ndef _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ...\ndef _struct_list_str(dtype: np.dtype) -> str: ...\ndef _struct_str(dtype: np.dtype, include_align: bool) -> str: ...\ndef _subarray_str(dtype: np.dtype) -> str: ...\ndef _name_get(dtype: np.dtype) -> str: ...\n\n#\n@overload\ndef _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ...\n@overload\ndef _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ...\ndef _aligned_offset(offset: int, alignment: int) -> int: ...\n
.venv\Lib\site-packages\numpy\_core\_dtype.pyi
_dtype.pyi
Other
1,909
0.95
0.327586
0.130435
vue-tools
916
2024-08-14T09:00:40.841222
MIT
false
129efdc74435f45db2d52a3e73e541b0
"""\nConversion from ctypes to dtype.\n\nIn an ideal world, we could achieve this through the PEP3118 buffer protocol,\nsomething like::\n\n def dtype_from_ctypes_type(t):\n # needed to ensure that the shape of `t` is within memoryview.format\n class DummyStruct(ctypes.Structure):\n _fields_ = [('a', t)]\n\n # empty to avoid memory allocation\n ctype_0 = (DummyStruct * 0)()\n mv = memoryview(ctype_0)\n\n # convert the struct, and slice back out the field\n return _dtype_from_pep3118(mv.format)['a']\n\nUnfortunately, this fails because:\n\n* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)\n* PEP3118 cannot represent unions, but both numpy and ctypes can\n* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)\n"""\n\n# We delay-import ctypes for distributions that do not include it.\n# While this module is not used unless the user passes in ctypes\n# members, it is eagerly imported from numpy/_core/__init__.py.\nimport numpy as np\n\n\ndef _from_ctypes_array(t):\n return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))\n\n\ndef _from_ctypes_structure(t):\n for item in t._fields_:\n if len(item) > 2:\n raise TypeError(\n "ctypes bitfields have no dtype equivalent")\n\n if hasattr(t, "_pack_"):\n import ctypes\n formats = []\n offsets = []\n names = []\n current_offset = 0\n for fname, ftyp in t._fields_:\n names.append(fname)\n formats.append(dtype_from_ctypes_type(ftyp))\n # Each type has a default offset, this is platform dependent\n # for some types.\n effective_pack = min(t._pack_, ctypes.alignment(ftyp))\n current_offset = (\n (current_offset + effective_pack - 1) // effective_pack\n ) * effective_pack\n offsets.append(current_offset)\n current_offset += ctypes.sizeof(ftyp)\n\n return np.dtype({\n "formats": formats,\n "offsets": offsets,\n "names": names,\n "itemsize": ctypes.sizeof(t)})\n else:\n fields = []\n for fname, ftyp in t._fields_:\n fields.append((fname, dtype_from_ctypes_type(ftyp)))\n\n # by default, ctypes structs are aligned\n return np.dtype(fields, align=True)\n\n\ndef _from_ctypes_scalar(t):\n """\n Return the dtype type with endianness included if it's the case\n """\n if getattr(t, '__ctype_be__', None) is t:\n return np.dtype('>' + t._type_)\n elif getattr(t, '__ctype_le__', None) is t:\n return np.dtype('<' + t._type_)\n else:\n return np.dtype(t._type_)\n\n\ndef _from_ctypes_union(t):\n import ctypes\n formats = []\n offsets = []\n names = []\n for fname, ftyp in t._fields_:\n names.append(fname)\n formats.append(dtype_from_ctypes_type(ftyp))\n offsets.append(0) # Union fields are offset to 0\n\n return np.dtype({\n "formats": formats,\n "offsets": offsets,\n "names": names,\n "itemsize": ctypes.sizeof(t)})\n\n\ndef dtype_from_ctypes_type(t):\n """\n Construct a dtype object from a ctypes type\n """\n import _ctypes\n if issubclass(t, _ctypes.Array):\n return _from_ctypes_array(t)\n elif issubclass(t, _ctypes._Pointer):\n raise TypeError("ctypes pointers have no dtype equivalent")\n elif issubclass(t, _ctypes.Structure):\n return _from_ctypes_structure(t)\n elif issubclass(t, _ctypes.Union):\n return _from_ctypes_union(t)\n elif isinstance(getattr(t, '_type_', None), str):\n return _from_ctypes_scalar(t)\n else:\n raise NotImplementedError(\n f"Unknown ctypes type {t.__name__}")\n
.venv\Lib\site-packages\numpy\_core\_dtype_ctypes.py
_dtype_ctypes.py
Python
3,846
0.95
0.15
0.121212
react-lib
470
2024-03-10T04:10:25.283083
BSD-3-Clause
false
892d3f75bf2571bb2f7de0dd6d5d3189
import _ctypes\nimport ctypes as ct\nfrom typing import Any, overload\n\nimport numpy as np\n\n#\n@overload\ndef dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...\n@overload\ndef dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...\n\n# NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see\n# https://github.com/numpy/numpy/issues/28360\n\n#\ndef _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ...\ndef _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ...\ndef _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ...\n\n# keep in sync with `dtype_from_ctypes_type` (minus the first overload)\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...\n@overload\ndef _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...\n
.venv\Lib\site-packages\numpy\_core\_dtype_ctypes.pyi
_dtype_ctypes.pyi
Other
3,765
0.95
0.433735
0.064103
vue-tools
252
2023-10-07T05:05:35.140002
BSD-3-Clause
false
d8cbbb30a1dae25a89dd71e48ae4cc30
"""\nVarious richly-typed exceptions, that also help us deal with string formatting\nin python where it's easier.\n\nBy putting the formatting in `__str__`, we also avoid paying the cost for\nusers who silence the exceptions.\n"""\n\ndef _unpack_tuple(tup):\n if len(tup) == 1:\n return tup[0]\n else:\n return tup\n\n\ndef _display_as_base(cls):\n """\n A decorator that makes an exception class look like its base.\n\n We use this to hide subclasses that are implementation details - the user\n should catch the base type, which is what the traceback will show them.\n\n Classes decorated with this decorator are subject to removal without a\n deprecation warning.\n """\n assert issubclass(cls, Exception)\n cls.__name__ = cls.__base__.__name__\n return cls\n\n\nclass UFuncTypeError(TypeError):\n """ Base class for all ufunc exceptions """\n def __init__(self, ufunc):\n self.ufunc = ufunc\n\n\n@_display_as_base\nclass _UFuncNoLoopError(UFuncTypeError):\n """ Thrown when a ufunc loop cannot be found """\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc)\n self.dtypes = tuple(dtypes)\n\n def __str__(self):\n return (\n f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature "\n f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} "\n f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}"\n )\n\n\n@_display_as_base\nclass _UFuncBinaryResolutionError(_UFuncNoLoopError):\n """ Thrown when a binary resolution fails """\n def __init__(self, ufunc, dtypes):\n super().__init__(ufunc, dtypes)\n assert len(self.dtypes) == 2\n\n def __str__(self):\n return (\n "ufunc {!r} cannot use operands with types {!r} and {!r}"\n ).format(\n self.ufunc.__name__, *self.dtypes\n )\n\n\n@_display_as_base\nclass _UFuncCastingError(UFuncTypeError):\n def __init__(self, ufunc, casting, from_, to):\n super().__init__(ufunc)\n self.casting = casting\n self.from_ = from_\n self.to = to\n\n\n@_display_as_base\nclass _UFuncInputCastingError(_UFuncCastingError):\n """ Thrown when a ufunc input cannot be casted """\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.in_i = i\n\n def __str__(self):\n # only show the number if more than one input exists\n i_str = f"{self.in_i} " if self.ufunc.nin != 1 else ""\n return (\n f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from "\n f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"\n )\n\n\n@_display_as_base\nclass _UFuncOutputCastingError(_UFuncCastingError):\n """ Thrown when a ufunc output cannot be casted """\n def __init__(self, ufunc, casting, from_, to, i):\n super().__init__(ufunc, casting, from_, to)\n self.out_i = i\n\n def __str__(self):\n # only show the number if more than one output exists\n i_str = f"{self.out_i} " if self.ufunc.nout != 1 else ""\n return (\n f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from "\n f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"\n )\n\n\n@_display_as_base\nclass _ArrayMemoryError(MemoryError):\n """ Thrown when an array cannot be allocated"""\n def __init__(self, shape, dtype):\n self.shape = shape\n self.dtype = dtype\n\n @property\n def _total_size(self):\n num_bytes = self.dtype.itemsize\n for dim in self.shape:\n num_bytes *= dim\n return num_bytes\n\n @staticmethod\n def _size_to_string(num_bytes):\n """ Convert a number of bytes into a binary size string """\n\n # https://en.wikipedia.org/wiki/Binary_prefix\n LOG2_STEP = 10\n STEP = 1024\n units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']\n\n unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP\n unit_val = 1 << (unit_i * LOG2_STEP)\n n_units = num_bytes / unit_val\n del unit_val\n\n # ensure we pick a unit that is correct after rounding\n if round(n_units) == STEP:\n unit_i += 1\n n_units /= STEP\n\n # deal with sizes so large that we don't have units for them\n if unit_i >= len(units):\n new_unit_i = len(units) - 1\n n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)\n unit_i = new_unit_i\n\n unit_name = units[unit_i]\n # format with a sensible number of digits\n if unit_i == 0:\n # no decimal point on bytes\n return f'{n_units:.0f} {unit_name}'\n elif round(n_units) < 1000:\n # 3 significant figures, if none are dropped to the left of the .\n return f'{n_units:#.3g} {unit_name}'\n else:\n # just give all the digits otherwise\n return f'{n_units:#.0f} {unit_name}'\n\n def __str__(self):\n size_str = self._size_to_string(self._total_size)\n return (f"Unable to allocate {size_str} for an array with shape "\n f"{self.shape} and data type {self.dtype}")\n
.venv\Lib\site-packages\numpy\_core\_exceptions.py
_exceptions.py
Python
5,321
0.95
0.246914
0.069231
python-kit
338
2025-04-16T12:03:13.480017
BSD-3-Clause
false
e282f1f797153aa857726446a5eb85a4
from collections.abc import Iterable\nfrom typing import Any, Final, TypeVar, overload\n\nimport numpy as np\nfrom numpy import _CastingKind\nfrom numpy._utils import set_module as set_module\n\n###\n\n_T = TypeVar("_T")\n_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])\n_ExceptionT = TypeVar("_ExceptionT", bound=Exception)\n\n###\n\nclass UFuncTypeError(TypeError):\n ufunc: Final[np.ufunc]\n def __init__(self, /, ufunc: np.ufunc) -> None: ...\n\nclass _UFuncNoLoopError(UFuncTypeError):\n dtypes: tuple[np.dtype, ...]\n def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...\n\nclass _UFuncBinaryResolutionError(_UFuncNoLoopError):\n dtypes: tuple[np.dtype, np.dtype]\n def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...\n\nclass _UFuncCastingError(UFuncTypeError):\n casting: Final[_CastingKind]\n from_: Final[np.dtype]\n to: Final[np.dtype]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...\n\nclass _UFuncInputCastingError(_UFuncCastingError):\n in_i: Final[int]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...\n\nclass _UFuncOutputCastingError(_UFuncCastingError):\n out_i: Final[int]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...\n\nclass _ArrayMemoryError(MemoryError):\n shape: tuple[int, ...]\n dtype: np.dtype\n def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...\n @property\n def _total_size(self) -> int: ...\n @staticmethod\n def _size_to_string(num_bytes: int) -> str: ...\n\n@overload\ndef _unpack_tuple(tup: tuple[_T]) -> _T: ...\n@overload\ndef _unpack_tuple(tup: _TupleT) -> _TupleT: ...\ndef _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...\n
.venv\Lib\site-packages\numpy\_core\_exceptions.pyi
_exceptions.pyi
Other
1,955
0.95
0.345455
0.046512
react-lib
752
2024-05-09T20:05:10.535395
MIT
false
471ee4b6aff3edae76d803e6cfb77d07
"""\nA place for internal code\n\nSome things are more easily handled Python.\n\n"""\nimport ast\nimport math\nimport re\nimport sys\nimport warnings\n\nfrom numpy import _NoValue\nfrom numpy.exceptions import DTypePromotionError\n\nfrom .multiarray import StringDType, array, dtype, promote_types\n\ntry:\n import ctypes\nexcept ImportError:\n ctypes = None\n\nIS_PYPY = sys.implementation.name == 'pypy'\n\nif sys.byteorder == 'little':\n _nbo = '<'\nelse:\n _nbo = '>'\n\ndef _makenames_list(adict, align):\n allfields = []\n\n for fname, obj in adict.items():\n n = len(obj)\n if not isinstance(obj, tuple) or n not in (2, 3):\n raise ValueError("entry not a 2- or 3- tuple")\n if n > 2 and obj[2] == fname:\n continue\n num = int(obj[1])\n if num < 0:\n raise ValueError("invalid offset.")\n format = dtype(obj[0], align=align)\n if n > 2:\n title = obj[2]\n else:\n title = None\n allfields.append((fname, format, num, title))\n # sort by offsets\n allfields.sort(key=lambda x: x[2])\n names = [x[0] for x in allfields]\n formats = [x[1] for x in allfields]\n offsets = [x[2] for x in allfields]\n titles = [x[3] for x in allfields]\n\n return names, formats, offsets, titles\n\n# Called in PyArray_DescrConverter function when\n# a dictionary without "names" and "formats"\n# fields is used as a data-type descriptor.\ndef _usefields(adict, align):\n try:\n names = adict[-1]\n except KeyError:\n names = None\n if names is None:\n names, formats, offsets, titles = _makenames_list(adict, align)\n else:\n formats = []\n offsets = []\n titles = []\n for name in names:\n res = adict[name]\n formats.append(res[0])\n offsets.append(res[1])\n if len(res) > 2:\n titles.append(res[2])\n else:\n titles.append(None)\n\n return dtype({"names": names,\n "formats": formats,\n "offsets": offsets,\n "titles": titles}, align)\n\n\n# construct an array_protocol descriptor list\n# from the fields attribute of a descriptor\n# This calls itself recursively but should eventually hit\n# a descriptor that has no fields and then return\n# a simple typestring\n\ndef _array_descr(descriptor):\n fields = descriptor.fields\n if fields is None:\n subdtype = descriptor.subdtype\n if subdtype is None:\n if descriptor.metadata is None:\n return descriptor.str\n else:\n new = descriptor.metadata.copy()\n if new:\n return (descriptor.str, new)\n else:\n return descriptor.str\n else:\n return (_array_descr(subdtype[0]), subdtype[1])\n\n names = descriptor.names\n ordered_fields = [fields[x] + (x,) for x in names]\n result = []\n offset = 0\n for field in ordered_fields:\n if field[1] > offset:\n num = field[1] - offset\n result.append(('', f'|V{num}'))\n offset += num\n elif field[1] < offset:\n raise ValueError(\n "dtype.descr is not defined for types with overlapping or "\n "out-of-order fields")\n if len(field) > 3:\n name = (field[2], field[3])\n else:\n name = field[2]\n if field[0].subdtype:\n tup = (name, _array_descr(field[0].subdtype[0]),\n field[0].subdtype[1])\n else:\n tup = (name, _array_descr(field[0]))\n offset += field[0].itemsize\n result.append(tup)\n\n if descriptor.itemsize > offset:\n num = descriptor.itemsize - offset\n result.append(('', f'|V{num}'))\n\n return result\n\n\n# format_re was originally from numarray by J. Todd Miller\n\nformat_re = re.compile(r'(?P<order1>[<>|=]?)'\n r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'\n r'(?P<order2>[<>|=]?)'\n r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')\nsep_re = re.compile(r'\s*,\s*')\nspace_re = re.compile(r'\s+$')\n\n# astr is a string (perhaps comma separated)\n\n_convorder = {'=': _nbo}\n\ndef _commastring(astr):\n startindex = 0\n result = []\n islist = False\n while startindex < len(astr):\n mo = format_re.match(astr, pos=startindex)\n try:\n (order1, repeats, order2, dtype) = mo.groups()\n except (TypeError, AttributeError):\n raise ValueError(\n f'format number {len(result) + 1} of "{astr}" is not recognized'\n ) from None\n startindex = mo.end()\n # Separator or ending padding\n if startindex < len(astr):\n if space_re.match(astr, pos=startindex):\n startindex = len(astr)\n else:\n mo = sep_re.match(astr, pos=startindex)\n if not mo:\n raise ValueError(\n 'format number %d of "%s" is not recognized' %\n (len(result) + 1, astr))\n startindex = mo.end()\n islist = True\n\n if order2 == '':\n order = order1\n elif order1 == '':\n order = order2\n else:\n order1 = _convorder.get(order1, order1)\n order2 = _convorder.get(order2, order2)\n if (order1 != order2):\n raise ValueError(\n f'inconsistent byte-order specification {order1} and {order2}')\n order = order1\n\n if order in ('|', '=', _nbo):\n order = ''\n dtype = order + dtype\n if repeats == '':\n newitem = dtype\n else:\n if (repeats[0] == "(" and repeats[-1] == ")"\n and repeats[1:-1].strip() != ""\n and "," not in repeats):\n warnings.warn(\n 'Passing in a parenthesized single number for repeats '\n 'is deprecated; pass either a single number or indicate '\n 'a tuple with a comma, like "(2,)".', DeprecationWarning,\n stacklevel=2)\n newitem = (dtype, ast.literal_eval(repeats))\n\n result.append(newitem)\n\n return result if islist else result[0]\n\nclass dummy_ctype:\n\n def __init__(self, cls):\n self._cls = cls\n\n def __mul__(self, other):\n return self\n\n def __call__(self, *other):\n return self._cls(other)\n\n def __eq__(self, other):\n return self._cls == other._cls\n\n def __ne__(self, other):\n return self._cls != other._cls\n\ndef _getintp_ctype():\n val = _getintp_ctype.cache\n if val is not None:\n return val\n if ctypes is None:\n import numpy as np\n val = dummy_ctype(np.intp)\n else:\n char = dtype('n').char\n if char == 'i':\n val = ctypes.c_int\n elif char == 'l':\n val = ctypes.c_long\n elif char == 'q':\n val = ctypes.c_longlong\n else:\n val = ctypes.c_long\n _getintp_ctype.cache = val\n return val\n\n\n_getintp_ctype.cache = None\n\n# Used for .ctypes attribute of ndarray\n\nclass _missing_ctypes:\n def cast(self, num, obj):\n return num.value\n\n class c_void_p:\n def __init__(self, ptr):\n self.value = ptr\n\n\nclass _ctypes:\n def __init__(self, array, ptr=None):\n self._arr = array\n\n if ctypes:\n self._ctypes = ctypes\n self._data = self._ctypes.c_void_p(ptr)\n else:\n # fake a pointer-like object that holds onto the reference\n self._ctypes = _missing_ctypes()\n self._data = self._ctypes.c_void_p(ptr)\n self._data._objects = array\n\n if self._arr.ndim == 0:\n self._zerod = True\n else:\n self._zerod = False\n\n def data_as(self, obj):\n """\n Return the data pointer cast to a particular c-types object.\n For example, calling ``self._as_parameter_`` is equivalent to\n ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use\n the data as a pointer to a ctypes array of floating-point data:\n ``self.data_as(ctypes.POINTER(ctypes.c_double))``.\n\n The returned pointer will keep a reference to the array.\n """\n # _ctypes.cast function causes a circular reference of self._data in\n # self._data._objects. Attributes of self._data cannot be released\n # until gc.collect is called. Make a copy of the pointer first then\n # let it hold the array reference. This is a workaround to circumvent\n # the CPython bug https://bugs.python.org/issue12836.\n ptr = self._ctypes.cast(self._data, obj)\n ptr._arr = self._arr\n return ptr\n\n def shape_as(self, obj):\n """\n Return the shape tuple as an array of some other c-types\n type. For example: ``self.shape_as(ctypes.c_short)``.\n """\n if self._zerod:\n return None\n return (obj * self._arr.ndim)(*self._arr.shape)\n\n def strides_as(self, obj):\n """\n Return the strides tuple as an array of some other\n c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.\n """\n if self._zerod:\n return None\n return (obj * self._arr.ndim)(*self._arr.strides)\n\n @property\n def data(self):\n """\n A pointer to the memory area of the array as a Python integer.\n This memory area may contain data that is not aligned, or not in\n correct byte-order. The memory area may not even be writeable.\n The array flags and data-type of this array should be respected\n when passing this attribute to arbitrary C-code to avoid trouble\n that can include Python crashing. User Beware! The value of this\n attribute is exactly the same as:\n ``self._array_interface_['data'][0]``.\n\n Note that unlike ``data_as``, a reference won't be kept to the array:\n code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a\n pointer to a deallocated array, and should be spelt\n ``(a + b).ctypes.data_as(ctypes.c_void_p)``\n """\n return self._data.value\n\n @property\n def shape(self):\n """\n (c_intp*self.ndim): A ctypes array of length self.ndim where\n the basetype is the C-integer corresponding to ``dtype('p')`` on this\n platform (see `~numpy.ctypeslib.c_intp`). This base-type could be\n `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on\n the platform. The ctypes array contains the shape of\n the underlying array.\n """\n return self.shape_as(_getintp_ctype())\n\n @property\n def strides(self):\n """\n (c_intp*self.ndim): A ctypes array of length self.ndim where\n the basetype is the same as for the shape attribute. This ctypes\n array contains the strides information from the underlying array.\n This strides information is important for showing how many bytes\n must be jumped to get to the next element in the array.\n """\n return self.strides_as(_getintp_ctype())\n\n @property\n def _as_parameter_(self):\n """\n Overrides the ctypes semi-magic method\n\n Enables `c_func(some_array.ctypes)`\n """\n return self.data_as(ctypes.c_void_p)\n\n # Numpy 1.21.0, 2021-05-18\n\n def get_data(self):\n """Deprecated getter for the `_ctypes.data` property.\n\n .. deprecated:: 1.21\n """\n warnings.warn('"get_data" is deprecated. Use "data" instead',\n DeprecationWarning, stacklevel=2)\n return self.data\n\n def get_shape(self):\n """Deprecated getter for the `_ctypes.shape` property.\n\n .. deprecated:: 1.21\n """\n warnings.warn('"get_shape" is deprecated. Use "shape" instead',\n DeprecationWarning, stacklevel=2)\n return self.shape\n\n def get_strides(self):\n """Deprecated getter for the `_ctypes.strides` property.\n\n .. deprecated:: 1.21\n """\n warnings.warn('"get_strides" is deprecated. Use "strides" instead',\n DeprecationWarning, stacklevel=2)\n return self.strides\n\n def get_as_parameter(self):\n """Deprecated getter for the `_ctypes._as_parameter_` property.\n\n .. deprecated:: 1.21\n """\n warnings.warn(\n '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',\n DeprecationWarning, stacklevel=2,\n )\n return self._as_parameter_\n\n\ndef _newnames(datatype, order):\n """\n Given a datatype and an order object, return a new names tuple, with the\n order indicated\n """\n oldnames = datatype.names\n nameslist = list(oldnames)\n if isinstance(order, str):\n order = [order]\n seen = set()\n if isinstance(order, (list, tuple)):\n for name in order:\n try:\n nameslist.remove(name)\n except ValueError:\n if name in seen:\n raise ValueError(f"duplicate field name: {name}") from None\n else:\n raise ValueError(f"unknown field name: {name}") from None\n seen.add(name)\n return tuple(list(order) + nameslist)\n raise ValueError(f"unsupported order value: {order}")\n\ndef _copy_fields(ary):\n """Return copy of structured array with padding between fields removed.\n\n Parameters\n ----------\n ary : ndarray\n Structured array from which to remove padding bytes\n\n Returns\n -------\n ary_copy : ndarray\n Copy of ary with padding bytes removed\n """\n dt = ary.dtype\n copy_dtype = {'names': dt.names,\n 'formats': [dt.fields[name][0] for name in dt.names]}\n return array(ary, dtype=copy_dtype, copy=True)\n\ndef _promote_fields(dt1, dt2):\n """ Perform type promotion for two structured dtypes.\n\n Parameters\n ----------\n dt1 : structured dtype\n First dtype.\n dt2 : structured dtype\n Second dtype.\n\n Returns\n -------\n out : dtype\n The promoted dtype\n\n Notes\n -----\n If one of the inputs is aligned, the result will be. The titles of\n both descriptors must match (point to the same field).\n """\n # Both must be structured and have the same names in the same order\n if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:\n raise DTypePromotionError(\n f"field names `{dt1.names}` and `{dt2.names}` mismatch.")\n\n # if both are identical, we can (maybe!) just return the same dtype.\n identical = dt1 is dt2\n new_fields = []\n for name in dt1.names:\n field1 = dt1.fields[name]\n field2 = dt2.fields[name]\n new_descr = promote_types(field1[0], field2[0])\n identical = identical and new_descr is field1[0]\n\n # Check that the titles match (if given):\n if field1[2:] != field2[2:]:\n raise DTypePromotionError(\n f"field titles of field '{name}' mismatch")\n if len(field1) == 2:\n new_fields.append((name, new_descr))\n else:\n new_fields.append(((field1[2], name), new_descr))\n\n res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)\n\n # Might as well preserve identity (and metadata) if the dtype is identical\n # and the itemsize, offsets are also unmodified. This could probably be\n # sped up, but also probably just be removed entirely.\n if identical and res.itemsize == dt1.itemsize:\n for name in dt1.names:\n if dt1.fields[name][1] != res.fields[name][1]:\n return res # the dtype changed.\n return dt1\n\n return res\n\n\ndef _getfield_is_safe(oldtype, newtype, offset):\n """ Checks safety of getfield for object arrays.\n\n As in _view_is_safe, we need to check that memory containing objects is not\n reinterpreted as a non-object datatype and vice versa.\n\n Parameters\n ----------\n oldtype : data-type\n Data type of the original ndarray.\n newtype : data-type\n Data type of the field being accessed by ndarray.getfield\n offset : int\n Offset of the field being accessed by ndarray.getfield\n\n Raises\n ------\n TypeError\n If the field access is invalid\n\n """\n if newtype.hasobject or oldtype.hasobject:\n if offset == 0 and newtype == oldtype:\n return\n if oldtype.names is not None:\n for name in oldtype.names:\n if (oldtype.fields[name][1] == offset and\n oldtype.fields[name][0] == newtype):\n return\n raise TypeError("Cannot get/set field of an object array")\n return\n\ndef _view_is_safe(oldtype, newtype):\n """ Checks safety of a view involving object arrays, for example when\n doing::\n\n np.zeros(10, dtype=oldtype).view(newtype)\n\n Parameters\n ----------\n oldtype : data-type\n Data type of original ndarray\n newtype : data-type\n Data type of the view\n\n Raises\n ------\n TypeError\n If the new type is incompatible with the old type.\n\n """\n\n # if the types are equivalent, there is no problem.\n # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))\n if oldtype == newtype:\n return\n\n if newtype.hasobject or oldtype.hasobject:\n raise TypeError("Cannot change data-type for array of references.")\n return\n\n\n# Given a string containing a PEP 3118 format specifier,\n# construct a NumPy dtype\n\n_pep3118_native_map = {\n '?': '?',\n 'c': 'S1',\n 'b': 'b',\n 'B': 'B',\n 'h': 'h',\n 'H': 'H',\n 'i': 'i',\n 'I': 'I',\n 'l': 'l',\n 'L': 'L',\n 'q': 'q',\n 'Q': 'Q',\n 'e': 'e',\n 'f': 'f',\n 'd': 'd',\n 'g': 'g',\n 'Zf': 'F',\n 'Zd': 'D',\n 'Zg': 'G',\n 's': 'S',\n 'w': 'U',\n 'O': 'O',\n 'x': 'V', # padding\n}\n_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())\n\n_pep3118_standard_map = {\n '?': '?',\n 'c': 'S1',\n 'b': 'b',\n 'B': 'B',\n 'h': 'i2',\n 'H': 'u2',\n 'i': 'i4',\n 'I': 'u4',\n 'l': 'i4',\n 'L': 'u4',\n 'q': 'i8',\n 'Q': 'u8',\n 'e': 'f2',\n 'f': 'f',\n 'd': 'd',\n 'Zf': 'F',\n 'Zd': 'D',\n 's': 'S',\n 'w': 'U',\n 'O': 'O',\n 'x': 'V', # padding\n}\n_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())\n\n_pep3118_unsupported_map = {\n 'u': 'UCS-2 strings',\n '&': 'pointers',\n 't': 'bitfields',\n 'X': 'function pointers',\n}\n\nclass _Stream:\n def __init__(self, s):\n self.s = s\n self.byteorder = '@'\n\n def advance(self, n):\n res = self.s[:n]\n self.s = self.s[n:]\n return res\n\n def consume(self, c):\n if self.s[:len(c)] == c:\n self.advance(len(c))\n return True\n return False\n\n def consume_until(self, c):\n if callable(c):\n i = 0\n while i < len(self.s) and not c(self.s[i]):\n i = i + 1\n return self.advance(i)\n else:\n i = self.s.index(c)\n res = self.advance(i)\n self.advance(len(c))\n return res\n\n @property\n def next(self):\n return self.s[0]\n\n def __bool__(self):\n return bool(self.s)\n\n\ndef _dtype_from_pep3118(spec):\n stream = _Stream(spec)\n dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)\n return dtype\n\ndef __dtype_from_pep3118(stream, is_subdtype):\n field_spec = {\n 'names': [],\n 'formats': [],\n 'offsets': [],\n 'itemsize': 0\n }\n offset = 0\n common_alignment = 1\n is_padding = False\n\n # Parse spec\n while stream:\n value = None\n\n # End of structure, bail out to upper level\n if stream.consume('}'):\n break\n\n # Sub-arrays (1)\n shape = None\n if stream.consume('('):\n shape = stream.consume_until(')')\n shape = tuple(map(int, shape.split(',')))\n\n # Byte order\n if stream.next in ('@', '=', '<', '>', '^', '!'):\n byteorder = stream.advance(1)\n if byteorder == '!':\n byteorder = '>'\n stream.byteorder = byteorder\n\n # Byte order characters also control native vs. standard type sizes\n if stream.byteorder in ('@', '^'):\n type_map = _pep3118_native_map\n type_map_chars = _pep3118_native_typechars\n else:\n type_map = _pep3118_standard_map\n type_map_chars = _pep3118_standard_typechars\n\n # Item sizes\n itemsize_str = stream.consume_until(lambda c: not c.isdigit())\n if itemsize_str:\n itemsize = int(itemsize_str)\n else:\n itemsize = 1\n\n # Data types\n is_padding = False\n\n if stream.consume('T{'):\n value, align = __dtype_from_pep3118(\n stream, is_subdtype=True)\n elif stream.next in type_map_chars:\n if stream.next == 'Z':\n typechar = stream.advance(2)\n else:\n typechar = stream.advance(1)\n\n is_padding = (typechar == 'x')\n dtypechar = type_map[typechar]\n if dtypechar in 'USV':\n dtypechar += '%d' % itemsize\n itemsize = 1\n numpy_byteorder = {'@': '=', '^': '='}.get(\n stream.byteorder, stream.byteorder)\n value = dtype(numpy_byteorder + dtypechar)\n align = value.alignment\n elif stream.next in _pep3118_unsupported_map:\n desc = _pep3118_unsupported_map[stream.next]\n raise NotImplementedError(\n f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})")\n else:\n raise ValueError(\n f"Unknown PEP 3118 data type specifier {stream.s!r}"\n )\n\n #\n # Native alignment may require padding\n #\n # Here we assume that the presence of a '@' character implicitly\n # implies that the start of the array is *already* aligned.\n #\n extra_offset = 0\n if stream.byteorder == '@':\n start_padding = (-offset) % align\n intra_padding = (-value.itemsize) % align\n\n offset += start_padding\n\n if intra_padding != 0:\n if itemsize > 1 or (shape is not None and _prod(shape) > 1):\n # Inject internal padding to the end of the sub-item\n value = _add_trailing_padding(value, intra_padding)\n else:\n # We can postpone the injection of internal padding,\n # as the item appears at most once\n extra_offset += intra_padding\n\n # Update common alignment\n common_alignment = _lcm(align, common_alignment)\n\n # Convert itemsize to sub-array\n if itemsize != 1:\n value = dtype((value, (itemsize,)))\n\n # Sub-arrays (2)\n if shape is not None:\n value = dtype((value, shape))\n\n # Field name\n if stream.consume(':'):\n name = stream.consume_until(':')\n else:\n name = None\n\n if not (is_padding and name is None):\n if name is not None and name in field_spec['names']:\n raise RuntimeError(\n f"Duplicate field name '{name}' in PEP3118 format"\n )\n field_spec['names'].append(name)\n field_spec['formats'].append(value)\n field_spec['offsets'].append(offset)\n\n offset += value.itemsize\n offset += extra_offset\n\n field_spec['itemsize'] = offset\n\n # extra final padding for aligned types\n if stream.byteorder == '@':\n field_spec['itemsize'] += (-offset) % common_alignment\n\n # Check if this was a simple 1-item type, and unwrap it\n if (field_spec['names'] == [None]\n and field_spec['offsets'][0] == 0\n and field_spec['itemsize'] == field_spec['formats'][0].itemsize\n and not is_subdtype):\n ret = field_spec['formats'][0]\n else:\n _fix_names(field_spec)\n ret = dtype(field_spec)\n\n # Finished\n return ret, common_alignment\n\ndef _fix_names(field_spec):\n """ Replace names which are None with the next unused f%d name """\n names = field_spec['names']\n for i, name in enumerate(names):\n if name is not None:\n continue\n\n j = 0\n while True:\n name = f'f{j}'\n if name not in names:\n break\n j = j + 1\n names[i] = name\n\ndef _add_trailing_padding(value, padding):\n """Inject the specified number of padding bytes at the end of a dtype"""\n if value.fields is None:\n field_spec = {\n 'names': ['f0'],\n 'formats': [value],\n 'offsets': [0],\n 'itemsize': value.itemsize\n }\n else:\n fields = value.fields\n names = value.names\n field_spec = {\n 'names': names,\n 'formats': [fields[name][0] for name in names],\n 'offsets': [fields[name][1] for name in names],\n 'itemsize': value.itemsize\n }\n\n field_spec['itemsize'] += padding\n return dtype(field_spec)\n\ndef _prod(a):\n p = 1\n for x in a:\n p *= x\n return p\n\ndef _gcd(a, b):\n """Calculate the greatest common divisor of a and b"""\n if not (math.isfinite(a) and math.isfinite(b)):\n raise ValueError('Can only find greatest common divisor of '\n f'finite arguments, found "{a}" and "{b}"')\n while b:\n a, b = b, a % b\n return a\n\ndef _lcm(a, b):\n return a // _gcd(a, b) * b\n\ndef array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):\n """ Format the error message for when __array_ufunc__ gives up. """\n args_string = ', '.join([f'{arg!r}' for arg in inputs] +\n [f'{k}={v!r}'\n for k, v in kwargs.items()])\n args = inputs + kwargs.get('out', ())\n types_string = ', '.join(repr(type(arg).__name__) for arg in args)\n return ('operand type(s) all returned NotImplemented from '\n f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'\n )\n\n\ndef array_function_errmsg_formatter(public_api, types):\n """ Format the error message for when __array_ufunc__ gives up. """\n func_name = f'{public_api.__module__}.{public_api.__name__}'\n return (f"no implementation found for '{func_name}' on types that implement "\n f'__array_function__: {list(types)}')\n\n\ndef _ufunc_doc_signature_formatter(ufunc):\n """\n Builds a signature string which resembles PEP 457\n\n This is used to construct the first line of the docstring\n """\n\n # input arguments are simple\n if ufunc.nin == 1:\n in_args = 'x'\n else:\n in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin))\n\n # output arguments are both keyword or positional\n if ufunc.nout == 0:\n out_args = ', /, out=()'\n elif ufunc.nout == 1:\n out_args = ', /, out=None'\n else:\n out_args = '[, {positional}], / [, out={default}]'.format(\n positional=', '.join(\n f'out{i + 1}' for i in range(ufunc.nout)),\n default=repr((None,) * ufunc.nout)\n )\n\n # keyword only args depend on whether this is a gufunc\n kwargs = (\n ", casting='same_kind'"\n ", order='K'"\n ", dtype=None"\n ", subok=True"\n )\n\n # NOTE: gufuncs may or may not support the `axis` parameter\n if ufunc.signature is None:\n kwargs = f", where=True{kwargs}[, signature]"\n else:\n kwargs += "[, signature, axes, axis]"\n\n # join all the parts together\n return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'\n\n\ndef npy_ctypes_check(cls):\n # determine if a class comes from ctypes, in order to work around\n # a bug in the buffer protocol for those objects, bpo-10746\n try:\n # ctypes class are new-style, so have an __mro__. This probably fails\n # for ctypes classes with multiple inheritance.\n if IS_PYPY:\n # (..., _ctypes.basics._CData, Bufferable, object)\n ctype_base = cls.__mro__[-3]\n else:\n # # (..., _ctypes._CData, object)\n ctype_base = cls.__mro__[-2]\n # right now, they're part of the _ctypes module\n return '_ctypes' in ctype_base.__module__\n except Exception:\n return False\n\n# used to handle the _NoValue default argument for na_object\n# in the C implementation of the __reduce__ method for stringdtype\ndef _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):\n if na_object is _NoValue:\n return StringDType(coerce=coerce)\n return StringDType(coerce=coerce, na_object=na_object)\n
.venv\Lib\site-packages\numpy\_core\_internal.py
_internal.py
Python
29,939
0.95
0.201461
0.082614
vue-tools
436
2024-04-22T19:48:22.303877
MIT
false
8345580ded38c5762393f9da868d8f3c
import ctypes as ct\nimport re\nfrom collections.abc import Callable, Iterable\nfrom typing import Any, Final, Generic, Self, overload\n\nfrom typing_extensions import TypeVar, deprecated\n\nimport numpy as np\nimport numpy.typing as npt\nfrom numpy.ctypeslib import c_intp\n\n_CastT = TypeVar("_CastT", bound=ct._CanCastTo)\n_T_co = TypeVar("_T_co", covariant=True)\n_CT = TypeVar("_CT", bound=ct._CData)\n_PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True)\n\n###\n\nIS_PYPY: Final[bool] = ...\n\nformat_re: Final[re.Pattern[str]] = ...\nsep_re: Final[re.Pattern[str]] = ...\nspace_re: Final[re.Pattern[str]] = ...\n\n###\n\n# TODO: Let the likes of `shape_as` and `strides_as` return `None`\n# for 0D arrays once we've got shape-support\n\nclass _ctypes(Generic[_PT_co]):\n @overload\n def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ...\n @overload\n def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ...\n\n #\n @property\n def data(self) -> _PT_co: ...\n @property\n def shape(self) -> ct.Array[c_intp]: ...\n @property\n def strides(self) -> ct.Array[c_intp]: ...\n @property\n def _as_parameter_(self) -> ct.c_void_p: ...\n\n #\n def data_as(self, /, obj: type[_CastT]) -> _CastT: ...\n def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...\n def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...\n\n #\n @deprecated('"get_data" is deprecated. Use "data" instead')\n def get_data(self, /) -> _PT_co: ...\n @deprecated('"get_shape" is deprecated. Use "shape" instead')\n def get_shape(self, /) -> ct.Array[c_intp]: ...\n @deprecated('"get_strides" is deprecated. Use "strides" instead')\n def get_strides(self, /) -> ct.Array[c_intp]: ...\n @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')\n def get_as_parameter(self, /) -> ct.c_void_p: ...\n\nclass dummy_ctype(Generic[_T_co]):\n _cls: type[_T_co]\n\n def __init__(self, /, cls: type[_T_co]) -> None: ...\n def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]\n def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]\n def __mul__(self, other: object, /) -> Self: ...\n def __call__(self, /, *other: object) -> _T_co: ...\n\ndef array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ...\ndef array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ...\ndef npy_ctypes_check(cls: type) -> bool: ...\n
.venv\Lib\site-packages\numpy\_core\_internal.pyi
_internal.pyi
Other
2,726
0.95
0.333333
0.122807
python-kit
130
2024-09-23T18:48:19.235763
GPL-3.0
false
e57da0f32222dc4a72676dd16ebfa6c6
"""\nMachine arithmetic - determine the parameters of the\nfloating-point arithmetic system\n\nAuthor: Pearu Peterson, September 2003\n\n"""\n__all__ = ['MachAr']\n\nfrom ._ufunc_config import errstate\nfrom .fromnumeric import any\n\n# Need to speed this up...especially for longdouble\n\n# Deprecated 2021-10-20, NumPy 1.22\nclass MachAr:\n """\n Diagnosing machine parameters.\n\n Attributes\n ----------\n ibeta : int\n Radix in which numbers are represented.\n it : int\n Number of base-`ibeta` digits in the floating point mantissa M.\n machep : int\n Exponent of the smallest (most negative) power of `ibeta` that,\n added to 1.0, gives something different from 1.0\n eps : float\n Floating-point number ``beta**machep`` (floating point precision)\n negep : int\n Exponent of the smallest power of `ibeta` that, subtracted\n from 1.0, gives something different from 1.0.\n epsneg : float\n Floating-point number ``beta**negep``.\n iexp : int\n Number of bits in the exponent (including its sign and bias).\n minexp : int\n Smallest (most negative) power of `ibeta` consistent with there\n being no leading zeros in the mantissa.\n xmin : float\n Floating-point number ``beta**minexp`` (the smallest [in\n magnitude] positive floating point number with full precision).\n maxexp : int\n Smallest (positive) power of `ibeta` that causes overflow.\n xmax : float\n ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]\n usable floating value).\n irnd : int\n In ``range(6)``, information on what kind of rounding is done\n in addition, and on how underflow is handled.\n ngrd : int\n Number of 'guard digits' used when truncating the product\n of two mantissas to fit the representation.\n epsilon : float\n Same as `eps`.\n tiny : float\n An alias for `smallest_normal`, kept for backwards compatibility.\n huge : float\n Same as `xmax`.\n precision : float\n ``- int(-log10(eps))``\n resolution : float\n ``- 10**(-precision)``\n smallest_normal : float\n The smallest positive floating point number with 1 as leading bit in\n the mantissa following IEEE-754. Same as `xmin`.\n smallest_subnormal : float\n The smallest positive floating point number with 0 as leading bit in\n the mantissa following IEEE-754.\n\n Parameters\n ----------\n float_conv : function, optional\n Function that converts an integer or integer array to a float\n or float array. Default is `float`.\n int_conv : function, optional\n Function that converts a float or float array to an integer or\n integer array. Default is `int`.\n float_to_float : function, optional\n Function that converts a float array to float. Default is `float`.\n Note that this does not seem to do anything useful in the current\n implementation.\n float_to_str : function, optional\n Function that converts a single float to a string. Default is\n ``lambda v:'%24.16e' %v``.\n title : str, optional\n Title that is printed in the string representation of `MachAr`.\n\n See Also\n --------\n finfo : Machine limits for floating point types.\n iinfo : Machine limits for integer types.\n\n References\n ----------\n .. [1] Press, Teukolsky, Vetterling and Flannery,\n "Numerical Recipes in C++," 2nd ed,\n Cambridge University Press, 2002, p. 31.\n\n """\n\n def __init__(self, float_conv=float, int_conv=int,\n float_to_float=float,\n float_to_str=lambda v: f'{v:24.16e}',\n title='Python floating point number'):\n """\n\n float_conv - convert integer to float (array)\n int_conv - convert float (array) to integer\n float_to_float - convert float array to float\n float_to_str - convert array float to str\n title - description of used floating point numbers\n\n """\n # We ignore all errors here because we are purposely triggering\n # underflow to detect the properties of the running arch.\n with errstate(under='ignore'):\n self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)\n\n def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):\n max_iterN = 10000\n msg = "Did not converge after %d tries with %s"\n one = float_conv(1)\n two = one + one\n zero = one - one\n\n # Do we really need to do this? Aren't they 2 and 2.0?\n # Determine ibeta and beta\n a = one\n for _ in range(max_iterN):\n a = a + a\n temp = a + one\n temp1 = temp - a\n if any(temp1 - one != zero):\n break\n else:\n raise RuntimeError(msg % (_, one.dtype))\n b = one\n for _ in range(max_iterN):\n b = b + b\n temp = a + b\n itemp = int_conv(temp - a)\n if any(itemp != 0):\n break\n else:\n raise RuntimeError(msg % (_, one.dtype))\n ibeta = itemp\n beta = float_conv(ibeta)\n\n # Determine it and irnd\n it = -1\n b = one\n for _ in range(max_iterN):\n it = it + 1\n b = b * beta\n temp = b + one\n temp1 = temp - b\n if any(temp1 - one != zero):\n break\n else:\n raise RuntimeError(msg % (_, one.dtype))\n\n betah = beta / two\n a = one\n for _ in range(max_iterN):\n a = a + a\n temp = a + one\n temp1 = temp - a\n if any(temp1 - one != zero):\n break\n else:\n raise RuntimeError(msg % (_, one.dtype))\n temp = a + betah\n irnd = 0\n if any(temp - a != zero):\n irnd = 1\n tempa = a + beta\n temp = tempa + betah\n if irnd == 0 and any(temp - tempa != zero):\n irnd = 2\n\n # Determine negep and epsneg\n negep = it + 3\n betain = one / beta\n a = one\n for i in range(negep):\n a = a * betain\n b = a\n for _ in range(max_iterN):\n temp = one - a\n if any(temp - one != zero):\n break\n a = a * beta\n negep = negep - 1\n # Prevent infinite loop on PPC with gcc 4.0:\n if negep < 0:\n raise RuntimeError("could not determine machine tolerance "\n "for 'negep', locals() -> %s" % (locals()))\n else:\n raise RuntimeError(msg % (_, one.dtype))\n negep = -negep\n epsneg = a\n\n # Determine machep and eps\n machep = - it - 3\n a = b\n\n for _ in range(max_iterN):\n temp = one + a\n if any(temp - one != zero):\n break\n a = a * beta\n machep = machep + 1\n else:\n raise RuntimeError(msg % (_, one.dtype))\n eps = a\n\n # Determine ngrd\n ngrd = 0\n temp = one + eps\n if irnd == 0 and any(temp * one - one != zero):\n ngrd = 1\n\n # Determine iexp\n i = 0\n k = 1\n z = betain\n t = one + eps\n nxres = 0\n for _ in range(max_iterN):\n y = z\n z = y * y\n a = z * one # Check here for underflow\n temp = z * t\n if any(a + a == zero) or any(abs(z) >= y):\n break\n temp1 = temp * betain\n if any(temp1 * beta == z):\n break\n i = i + 1\n k = k + k\n else:\n raise RuntimeError(msg % (_, one.dtype))\n if ibeta != 10:\n iexp = i + 1\n mx = k + k\n else:\n iexp = 2\n iz = ibeta\n while k >= iz:\n iz = iz * ibeta\n iexp = iexp + 1\n mx = iz + iz - 1\n\n # Determine minexp and xmin\n for _ in range(max_iterN):\n xmin = y\n y = y * betain\n a = y * one\n temp = y * t\n if any((a + a) != zero) and any(abs(y) < xmin):\n k = k + 1\n temp1 = temp * betain\n if any(temp1 * beta == y) and any(temp != y):\n nxres = 3\n xmin = y\n break\n else:\n break\n else:\n raise RuntimeError(msg % (_, one.dtype))\n minexp = -k\n\n # Determine maxexp, xmax\n if mx <= k + k - 3 and ibeta != 10:\n mx = mx + mx\n iexp = iexp + 1\n maxexp = mx + minexp\n irnd = irnd + nxres\n if irnd >= 2:\n maxexp = maxexp - 2\n i = maxexp + minexp\n if ibeta == 2 and not i:\n maxexp = maxexp - 1\n if i > 20:\n maxexp = maxexp - 1\n if any(a != y):\n maxexp = maxexp - 2\n xmax = one - epsneg\n if any(xmax * one != xmax):\n xmax = one - beta * epsneg\n xmax = xmax / (xmin * beta * beta * beta)\n i = maxexp + minexp + 3\n for j in range(i):\n if ibeta == 2:\n xmax = xmax + xmax\n else:\n xmax = xmax * beta\n\n smallest_subnormal = abs(xmin / beta ** (it))\n\n self.ibeta = ibeta\n self.it = it\n self.negep = negep\n self.epsneg = float_to_float(epsneg)\n self._str_epsneg = float_to_str(epsneg)\n self.machep = machep\n self.eps = float_to_float(eps)\n self._str_eps = float_to_str(eps)\n self.ngrd = ngrd\n self.iexp = iexp\n self.minexp = minexp\n self.xmin = float_to_float(xmin)\n self._str_xmin = float_to_str(xmin)\n self.maxexp = maxexp\n self.xmax = float_to_float(xmax)\n self._str_xmax = float_to_str(xmax)\n self.irnd = irnd\n\n self.title = title\n # Commonly used parameters\n self.epsilon = self.eps\n self.tiny = self.xmin\n self.huge = self.xmax\n self.smallest_normal = self.xmin\n self._str_smallest_normal = float_to_str(self.xmin)\n self.smallest_subnormal = float_to_float(smallest_subnormal)\n self._str_smallest_subnormal = float_to_str(smallest_subnormal)\n\n import math\n self.precision = int(-math.log10(float_to_float(self.eps)))\n ten = two + two + two + two + two\n resolution = ten ** (-self.precision)\n self.resolution = float_to_float(resolution)\n self._str_resolution = float_to_str(resolution)\n\n def __str__(self):\n fmt = (\n 'Machine parameters for %(title)s\n'\n '---------------------------------------------------------------------\n'\n 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'\n 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'\n 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'\n 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'\n 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'\n 'smallest_normal=%(smallest_normal)s '\n 'smallest_subnormal=%(smallest_subnormal)s\n'\n '---------------------------------------------------------------------\n'\n )\n return fmt % self.__dict__\n\n\nif __name__ == '__main__':\n print(MachAr())\n
.venv\Lib\site-packages\numpy\_core\_machar.py
_machar.py
Python
11,924
0.95
0.140845
0.046296
node-utils
386
2023-11-01T22:44:13.041783
Apache-2.0
false
feeeac0e5366c1a39870d61c654f8560
from collections.abc import Iterable\nfrom typing import Any, Final, TypeVar, overload\n\nimport numpy as np\nfrom numpy import _CastingKind\nfrom numpy._utils import set_module as set_module\n\n###\n\n_T = TypeVar("_T")\n_TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])\n_ExceptionT = TypeVar("_ExceptionT", bound=Exception)\n\n###\n\nclass UFuncTypeError(TypeError):\n ufunc: Final[np.ufunc]\n def __init__(self, /, ufunc: np.ufunc) -> None: ...\n\nclass _UFuncNoLoopError(UFuncTypeError):\n dtypes: tuple[np.dtype, ...]\n def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...\n\nclass _UFuncBinaryResolutionError(_UFuncNoLoopError):\n dtypes: tuple[np.dtype, np.dtype]\n def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...\n\nclass _UFuncCastingError(UFuncTypeError):\n casting: Final[_CastingKind]\n from_: Final[np.dtype]\n to: Final[np.dtype]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...\n\nclass _UFuncInputCastingError(_UFuncCastingError):\n in_i: Final[int]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...\n\nclass _UFuncOutputCastingError(_UFuncCastingError):\n out_i: Final[int]\n def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...\n\nclass _ArrayMemoryError(MemoryError):\n shape: tuple[int, ...]\n dtype: np.dtype\n def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...\n @property\n def _total_size(self) -> int: ...\n @staticmethod\n def _size_to_string(num_bytes: int) -> str: ...\n\n@overload\ndef _unpack_tuple(tup: tuple[_T]) -> _T: ...\n@overload\ndef _unpack_tuple(tup: _TupleT) -> _TupleT: ...\ndef _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...\n
.venv\Lib\site-packages\numpy\_core\_machar.pyi
_machar.pyi
Other
1,955
0.95
0.345455
0.046512
awesome-app
90
2024-07-07T03:37:19.175192
MIT
false
471ee4b6aff3edae76d803e6cfb77d07
"""\nArray methods which are called by both the C-code for the method\nand the Python code for the NumPy-namespace function\n\n"""\nimport os\nimport pickle\nimport warnings\nfrom contextlib import nullcontext\n\nimport numpy as np\nfrom numpy._core import multiarray as mu\nfrom numpy._core import numerictypes as nt\nfrom numpy._core import umath as um\nfrom numpy._core.multiarray import asanyarray\nfrom numpy._globals import _NoValue\n\n# save those O(100) nanoseconds!\nbool_dt = mu.dtype("bool")\numr_maximum = um.maximum.reduce\numr_minimum = um.minimum.reduce\numr_sum = um.add.reduce\numr_prod = um.multiply.reduce\numr_bitwise_count = um.bitwise_count\numr_any = um.logical_or.reduce\numr_all = um.logical_and.reduce\n\n# Complex types to -> (2,)float view for fast-path computation in _var()\n_complex_to_float = {\n nt.dtype(nt.csingle): nt.dtype(nt.single),\n nt.dtype(nt.cdouble): nt.dtype(nt.double),\n}\n# Special case for windows: ensure double takes precedence\nif nt.dtype(nt.longdouble) != nt.dtype(nt.double):\n _complex_to_float.update({\n nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble),\n })\n\n# avoid keyword arguments to speed up parsing, saves about 15%-20% for very\n# small reductions\ndef _amax(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_maximum(a, axis, None, out, keepdims, initial, where)\n\ndef _amin(a, axis=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_minimum(a, axis, None, out, keepdims, initial, where)\n\ndef _sum(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_sum(a, axis, dtype, out, keepdims, initial, where)\n\ndef _prod(a, axis=None, dtype=None, out=None, keepdims=False,\n initial=_NoValue, where=True):\n return umr_prod(a, axis, dtype, out, keepdims, initial, where)\n\ndef _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):\n # By default, return a boolean for any and all\n if dtype is None:\n dtype = bool_dt\n # Parsing keyword arguments is currently fairly slow, so avoid it for now\n if where is True:\n return umr_any(a, axis, dtype, out, keepdims)\n return umr_any(a, axis, dtype, out, keepdims, where=where)\n\ndef _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):\n # By default, return a boolean for any and all\n if dtype is None:\n dtype = bool_dt\n # Parsing keyword arguments is currently fairly slow, so avoid it for now\n if where is True:\n return umr_all(a, axis, dtype, out, keepdims)\n return umr_all(a, axis, dtype, out, keepdims, where=where)\n\ndef _count_reduce_items(arr, axis, keepdims=False, where=True):\n # fast-path for the default case\n if where is True:\n # no boolean mask given, calculate items according to axis\n if axis is None:\n axis = tuple(range(arr.ndim))\n elif not isinstance(axis, tuple):\n axis = (axis,)\n items = 1\n for ax in axis:\n items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]\n items = nt.intp(items)\n else:\n # TODO: Optimize case when `where` is broadcast along a non-reduction\n # axis and full sum is more excessive than needed.\n\n # guarded to protect circular imports\n from numpy.lib._stride_tricks_impl import broadcast_to\n # count True values in (potentially broadcasted) boolean mask\n items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,\n keepdims)\n return items\n\ndef _clip(a, min=None, max=None, out=None, **kwargs):\n if a.dtype.kind in "iu":\n # If min/max is a Python integer, deal with out-of-bound values here.\n # (This enforces NEP 50 rules as no value based promotion is done.)\n if type(min) is int and min <= np.iinfo(a.dtype).min:\n min = None\n if type(max) is int and max >= np.iinfo(a.dtype).max:\n max = None\n\n if min is None and max is None:\n # return identity\n return um.positive(a, out=out, **kwargs)\n elif min is None:\n return um.minimum(a, max, out=out, **kwargs)\n elif max is None:\n return um.maximum(a, min, out=out, **kwargs)\n else:\n return um.clip(a, min, max, out=out, **kwargs)\n\ndef _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):\n arr = asanyarray(a)\n\n is_float16_result = False\n\n rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)\n if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):\n warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None:\n if issubclass(arr.dtype.type, (nt.integer, nt.bool)):\n dtype = mu.dtype('f8')\n elif issubclass(arr.dtype.type, nt.float16):\n dtype = mu.dtype('f4')\n is_float16_result = True\n\n ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n if is_float16_result and out is None:\n ret = arr.dtype.type(ret)\n elif hasattr(ret, 'dtype'):\n if is_float16_result:\n ret = arr.dtype.type(ret / rcount)\n else:\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,\n where=True, mean=None):\n arr = asanyarray(a)\n\n rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)\n # Make this warning show up on top.\n if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):\n warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,\n stacklevel=2)\n\n # Cast bool, unsigned int, and int to float64 by default\n if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):\n dtype = mu.dtype('f8')\n\n if mean is not None:\n arrmean = mean\n else:\n # Compute the mean.\n # Note that if dtype is not of inexact type then arraymean will\n # not be either.\n arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)\n # The shape of rcount has to match arrmean to not change the shape of\n # out in broadcasting. Otherwise, it cannot be stored back to arrmean.\n if rcount.ndim == 0:\n # fast-path for default case when where is True\n div = rcount\n else:\n # matching rcount to arrmean when where is specified as array\n div = rcount.reshape(arrmean.shape)\n if isinstance(arrmean, mu.ndarray):\n arrmean = um.true_divide(arrmean, div, out=arrmean,\n casting='unsafe', subok=False)\n elif hasattr(arrmean, "dtype"):\n arrmean = arrmean.dtype.type(arrmean / rcount)\n else:\n arrmean = arrmean / rcount\n\n # Compute sum of squared deviations from mean\n # Note that x may not be inexact and that we need it to be an array,\n # not a scalar.\n x = asanyarray(arr - arrmean)\n\n if issubclass(arr.dtype.type, (nt.floating, nt.integer)):\n x = um.multiply(x, x, out=x)\n # Fast-paths for built-in complex types\n elif x.dtype in _complex_to_float:\n xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))\n um.multiply(xv, xv, out=xv)\n x = um.add(xv[..., 0], xv[..., 1], out=x.real).real\n # Most general case; includes handling object arrays containing imaginary\n # numbers and complex types with non-native byteorder\n else:\n x = um.multiply(x, um.conjugate(x), out=x).real\n\n ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)\n\n # Compute degrees of freedom and make sure it is not negative.\n rcount = um.maximum(rcount - ddof, 0)\n\n # divide by degrees of freedom\n if isinstance(ret, mu.ndarray):\n ret = um.true_divide(\n ret, rcount, out=ret, casting='unsafe', subok=False)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(ret / rcount)\n else:\n ret = ret / rcount\n\n return ret\n\ndef _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,\n where=True, mean=None):\n ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,\n keepdims=keepdims, where=where, mean=mean)\n\n if isinstance(ret, mu.ndarray):\n ret = um.sqrt(ret, out=ret)\n elif hasattr(ret, 'dtype'):\n ret = ret.dtype.type(um.sqrt(ret))\n else:\n ret = um.sqrt(ret)\n\n return ret\n\ndef _ptp(a, axis=None, out=None, keepdims=False):\n return um.subtract(\n umr_maximum(a, axis, None, out, keepdims),\n umr_minimum(a, axis, None, None, keepdims),\n out\n )\n\ndef _dump(self, file, protocol=2):\n if hasattr(file, 'write'):\n ctx = nullcontext(file)\n else:\n ctx = open(os.fspath(file), "wb")\n with ctx as f:\n pickle.dump(self, f, protocol=protocol)\n\ndef _dumps(self, protocol=2):\n return pickle.dumps(self, protocol=protocol)\n\ndef _bitwise_count(a, out=None, *, where=True, casting='same_kind',\n order='K', dtype=None, subok=True):\n return umr_bitwise_count(a, out, where=where, casting=casting,\n order=order, dtype=dtype, subok=subok)\n
.venv\Lib\site-packages\numpy\_core\_methods.py
_methods.py
Python
9,685
0.95
0.231373
0.165138
awesome-app
46
2024-12-03T08:24:02.197085
BSD-3-Clause
false
2740e3b74074983adc0a5335b8fbbf31
from collections.abc import Callable\nfrom typing import Any, Concatenate, TypeAlias\n\nimport numpy as np\n\nfrom . import _exceptions as _exceptions\n\n###\n\n_Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any]\n\n###\n\nbool_dt: np.dtype[np.bool] = ...\numr_maximum: _Reduce2 = ...\numr_minimum: _Reduce2 = ...\numr_sum: _Reduce2 = ...\numr_prod: _Reduce2 = ...\numr_bitwise_count = np.bitwise_count\numr_any: _Reduce2 = ...\numr_all: _Reduce2 = ...\n_complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ...\n
.venv\Lib\site-packages\numpy\_core\_methods.pyi
_methods.pyi
Other
548
0.95
0
0.125
react-lib
628
2024-12-04T06:01:33.963665
GPL-3.0
false
8c911b120ba9a6bb15acbb74016b8158
!<arch>\n/ -1 0 256 `\n
.venv\Lib\site-packages\numpy\_core\_multiarray_tests.cp313-win_amd64.lib
_multiarray_tests.cp313-win_amd64.lib
Other
2,418
0.8
0
0
vue-tools
155
2024-01-17T22:04:25.879772
GPL-3.0
true
9033940c038962dabe3f2fd4fafe2b7c
MZ
.venv\Lib\site-packages\numpy\_core\_multiarray_tests.cp313-win_amd64.pyd
_multiarray_tests.cp313-win_amd64.pyd
Other
63,488
0.95
0.032448
0.00597
awesome-app
155
2023-08-05T07:19:26.393389
GPL-3.0
true
62248b2455df94d63ece8573bc62da6b
!<arch>\n/ -1 0 210 `\n
.venv\Lib\site-packages\numpy\_core\_multiarray_umath.cp313-win_amd64.lib
_multiarray_umath.cp313-win_amd64.lib
Other
2,192
0.8
0
0
vue-tools
824
2025-03-05T11:18:33.755178
GPL-3.0
false
267cc0beae0ae05dd3e6f5a8f5e2fccd
!<arch>\n/ -1 0 218 `\n
.venv\Lib\site-packages\numpy\_core\_operand_flag_tests.cp313-win_amd64.lib
_operand_flag_tests.cp313-win_amd64.lib
Other
2,228
0.8
0
0
python-kit
203
2024-12-23T13:56:46.579417
BSD-3-Clause
true
db45bc3d7acf2174f201130ca9ec0689
MZ
.venv\Lib\site-packages\numpy\_core\_operand_flag_tests.cp313-win_amd64.pyd
_operand_flag_tests.cp313-win_amd64.pyd
Other
12,288
0.95
0.016667
0
python-kit
452
2024-09-06T02:22:41.208382
MIT
true
cd75a241970013476e679f1d0153a86f
!<arch>\n/ -1 0 202 `\n
.venv\Lib\site-packages\numpy\_core\_rational_tests.cp313-win_amd64.lib
_rational_tests.cp313-win_amd64.lib
Other
2,156
0.8
0
0
vue-tools
822
2023-11-15T08:37:50.377033
Apache-2.0
true
23cf9c903fbdaa046a67d04fa1414d7d
MZ
.venv\Lib\site-packages\numpy\_core\_rational_tests.cp313-win_amd64.pyd
_rational_tests.cp313-win_amd64.pyd
Other
39,936
0.95
0.004274
0.012876
python-kit
236
2023-10-28T16:46:04.441700
BSD-3-Clause
true
39db811a75a5f13101e727aeeb38fdd6
!<arch>\n/ -1 0 162 `\n
.venv\Lib\site-packages\numpy\_core\_simd.cp313-win_amd64.lib
_simd.cp313-win_amd64.lib
Other
1,976
0.8
0
0
react-lib
323
2025-01-12T08:58:26.029617
Apache-2.0
false
c4d7ad23c66cd2e4e08068dc8919730b
from types import ModuleType\nfrom typing import TypedDict, type_check_only\n\n# NOTE: these 5 are only defined on systems with an intel processor\nSSE42: ModuleType | None = ...\nFMA3: ModuleType | None = ...\nAVX2: ModuleType | None = ...\nAVX512F: ModuleType | None = ...\nAVX512_SKX: ModuleType | None = ...\n\nbaseline: ModuleType | None = ...\n\n@type_check_only\nclass SimdTargets(TypedDict):\n SSE42: ModuleType | None\n AVX2: ModuleType | None\n FMA3: ModuleType | None\n AVX512F: ModuleType | None\n AVX512_SKX: ModuleType | None\n baseline: ModuleType | None\n\ntargets: SimdTargets = ...\n\ndef clear_floatstatus() -> None: ...\ndef get_floatstatus() -> int: ...\n
.venv\Lib\site-packages\numpy\_core\_simd.pyi
_simd.pyi
Other
694
0.95
0.12
0.05
react-lib
128
2025-03-06T12:52:34.452603
Apache-2.0
false
2d5d712cd9758943395ee4e62b31e3a6
"""\nString-handling utilities to avoid locale-dependence.\n\nUsed primarily to generate type name aliases.\n"""\n# "import string" is costly to import!\n# Construct the translation tables directly\n# "A" = chr(65), "a" = chr(97)\n_all_chars = tuple(map(chr, range(256)))\n_ascii_upper = _all_chars[65:65 + 26]\n_ascii_lower = _all_chars[97:97 + 26]\nLOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:]\nUPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:]\n\n\ndef english_lower(s):\n """ Apply English case rules to convert ASCII strings to all lower case.\n\n This is an internal utility function to replace calls to str.lower() such\n that we can avoid changing behavior with changing locales. In particular,\n Turkish has distinct dotted and dotless variants of the Latin letter "I" in\n both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.\n\n Parameters\n ----------\n s : str\n\n Returns\n -------\n lowered : str\n\n Examples\n --------\n >>> from numpy._core.numerictypes import english_lower\n >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')\n 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'\n >>> english_lower('')\n ''\n """\n lowered = s.translate(LOWER_TABLE)\n return lowered\n\n\ndef english_upper(s):\n """ Apply English case rules to convert ASCII strings to all upper case.\n\n This is an internal utility function to replace calls to str.upper() such\n that we can avoid changing behavior with changing locales. In particular,\n Turkish has distinct dotted and dotless variants of the Latin letter "I" in\n both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.\n\n Parameters\n ----------\n s : str\n\n Returns\n -------\n uppered : str\n\n Examples\n --------\n >>> from numpy._core.numerictypes import english_upper\n >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'\n >>> english_upper('')\n ''\n """\n uppered = s.translate(UPPER_TABLE)\n return uppered\n\n\ndef english_capitalize(s):\n """ Apply English case rules to convert the first character of an ASCII\n string to upper case.\n\n This is an internal utility function to replace calls to str.capitalize()\n such that we can avoid changing behavior with changing locales.\n\n Parameters\n ----------\n s : str\n\n Returns\n -------\n capitalized : str\n\n Examples\n --------\n >>> from numpy._core.numerictypes import english_capitalize\n >>> english_capitalize('int8')\n 'Int8'\n >>> english_capitalize('Int8')\n 'Int8'\n >>> english_capitalize('')\n ''\n """\n if s:\n return english_upper(s[0]) + s[1:]\n else:\n return s\n
.venv\Lib\site-packages\numpy\_core\_string_helpers.py
_string_helpers.py
Python
2,945
0.95
0.07
0.037037
react-lib
635
2024-09-05T08:43:00.609139
GPL-3.0
false
1a3a8b66f1e7f6e51c7ce3724d6275e8
from typing import Final\n\n_all_chars: Final[tuple[str, ...]] = ...\n_ascii_upper: Final[tuple[str, ...]] = ...\n_ascii_lower: Final[tuple[str, ...]] = ...\n\nLOWER_TABLE: Final[tuple[str, ...]] = ...\nUPPER_TABLE: Final[tuple[str, ...]] = ...\n\ndef english_lower(s: str) -> str: ...\ndef english_upper(s: str) -> str: ...\ndef english_capitalize(s: str) -> str: ...\n
.venv\Lib\site-packages\numpy\_core\_string_helpers.pyi
_string_helpers.pyi
Other
370
0.85
0.25
0
node-utils
64
2024-04-01T16:39:21.022748
BSD-3-Clause
false
1c8fed39509d4ecad6e1b47a739f99b9
!<arch>\n/ -1 0 218 `\n
.venv\Lib\site-packages\numpy\_core\_struct_ufunc_tests.cp313-win_amd64.lib
_struct_ufunc_tests.cp313-win_amd64.lib
Other
2,228
0.8
0
0
node-utils
86
2025-03-05T18:06:35.958062
MIT
true
f84a82856ffcb6f72e13e1ac6897e8c0
MZ
.venv\Lib\site-packages\numpy\_core\_struct_ufunc_tests.cp313-win_amd64.pyd
_struct_ufunc_tests.cp313-win_amd64.pyd
Other
14,336
0.95
0.014493
0.014493
awesome-app
246
2025-06-17T09:46:44.497193
Apache-2.0
true
a1f4cda13512e5db0845fd31f2674bf7
"""\nDue to compatibility, numpy has a very large number of different naming\nconventions for the scalar types (those subclassing from `numpy.generic`).\nThis file produces a convoluted set of dictionaries mapping names to types,\nand sometimes other mappings too.\n\n.. data:: allTypes\n A dictionary of names to types that will be exposed as attributes through\n ``np._core.numerictypes.*``\n\n.. data:: sctypeDict\n Similar to `allTypes`, but maps a broader set of aliases to their types.\n\n.. data:: sctypes\n A dictionary keyed by a "type group" string, providing a list of types\n under that group.\n\n"""\n\nimport numpy._core.multiarray as ma\nfrom numpy._core.multiarray import dtype, typeinfo\n\n######################################\n# Building `sctypeDict` and `allTypes`\n######################################\n\nsctypeDict = {}\nallTypes = {}\nc_names_dict = {}\n\n_abstract_type_names = {\n "generic", "integer", "inexact", "floating", "number",\n "flexible", "character", "complexfloating", "unsignedinteger",\n "signedinteger"\n}\n\nfor _abstract_type_name in _abstract_type_names:\n allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)\n\nfor k, v in typeinfo.items():\n if k.startswith("NPY_") and v not in c_names_dict:\n c_names_dict[k[4:]] = v\n else:\n concrete_type = v.type\n allTypes[k] = concrete_type\n sctypeDict[k] = concrete_type\n\n_aliases = {\n "double": "float64",\n "cdouble": "complex128",\n "single": "float32",\n "csingle": "complex64",\n "half": "float16",\n "bool_": "bool",\n # Default integer:\n "int_": "intp",\n "uint": "uintp",\n}\n\nfor k, v in _aliases.items():\n sctypeDict[k] = allTypes[v]\n allTypes[k] = allTypes[v]\n\n# extra aliases are added only to `sctypeDict`\n# to support dtype name access, such as`np.dtype("float")`\n_extra_aliases = {\n "float": "float64",\n "complex": "complex128",\n "object": "object_",\n "bytes": "bytes_",\n "a": "bytes_",\n "int": "int_",\n "str": "str_",\n "unicode": "str_",\n}\n\nfor k, v in _extra_aliases.items():\n sctypeDict[k] = allTypes[v]\n\n# include extended precision sized aliases\nfor is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:\n longdouble_type: type = allTypes[full_name]\n\n bits: int = dtype(longdouble_type).itemsize * 8\n base_name: str = "complex" if is_complex else "float"\n extended_prec_name: str = f"{base_name}{bits}"\n if extended_prec_name not in allTypes:\n sctypeDict[extended_prec_name] = longdouble_type\n allTypes[extended_prec_name] = longdouble_type\n\n\n####################\n# Building `sctypes`\n####################\n\nsctypes = {"int": set(), "uint": set(), "float": set(),\n "complex": set(), "others": set()}\n\nfor type_info in typeinfo.values():\n if type_info.kind in ["M", "m"]: # exclude timedelta and datetime\n continue\n\n concrete_type = type_info.type\n\n # find proper group for each concrete type\n for type_group, abstract_type in [\n ("int", ma.signedinteger), ("uint", ma.unsignedinteger),\n ("float", ma.floating), ("complex", ma.complexfloating),\n ("others", ma.generic)\n ]:\n if issubclass(concrete_type, abstract_type):\n sctypes[type_group].add(concrete_type)\n break\n\n# sort sctype groups by bitsize\nfor sctype_key in sctypes.keys():\n sctype_list = list(sctypes[sctype_key])\n sctype_list.sort(key=lambda x: dtype(x).itemsize)\n sctypes[sctype_key] = sctype_list\n
.venv\Lib\site-packages\numpy\_core\_type_aliases.py
_type_aliases.py
Python
3,608
0.95
0.12605
0.125
vue-tools
960
2024-09-14T15:35:10.019347
BSD-3-Clause
false
ea60b444435ebcee365dd91721173200
from collections.abc import Collection\nfrom typing import Final, TypeAlias, TypedDict, type_check_only\nfrom typing import Literal as L\n\nimport numpy as np\n\n__all__ = (\n "_abstract_type_names",\n "_aliases",\n "_extra_aliases",\n "allTypes",\n "c_names_dict",\n "sctypeDict",\n "sctypes",\n)\n\nsctypeDict: Final[dict[str, type[np.generic]]]\nallTypes: Final[dict[str, type[np.generic]]]\n\n@type_check_only\nclass _CNamesDict(TypedDict):\n BOOL: np.dtype[np.bool]\n HALF: np.dtype[np.half]\n FLOAT: np.dtype[np.single]\n DOUBLE: np.dtype[np.double]\n LONGDOUBLE: np.dtype[np.longdouble]\n CFLOAT: np.dtype[np.csingle]\n CDOUBLE: np.dtype[np.cdouble]\n CLONGDOUBLE: np.dtype[np.clongdouble]\n STRING: np.dtype[np.bytes_]\n UNICODE: np.dtype[np.str_]\n VOID: np.dtype[np.void]\n OBJECT: np.dtype[np.object_]\n DATETIME: np.dtype[np.datetime64]\n TIMEDELTA: np.dtype[np.timedelta64]\n BYTE: np.dtype[np.byte]\n UBYTE: np.dtype[np.ubyte]\n SHORT: np.dtype[np.short]\n USHORT: np.dtype[np.ushort]\n INT: np.dtype[np.intc]\n UINT: np.dtype[np.uintc]\n LONG: np.dtype[np.long]\n ULONG: np.dtype[np.ulong]\n LONGLONG: np.dtype[np.longlong]\n ULONGLONG: np.dtype[np.ulonglong]\n\nc_names_dict: Final[_CNamesDict]\n\n_AbstractTypeName: TypeAlias = L[\n "generic",\n "flexible",\n "character",\n "number",\n "integer",\n "inexact",\n "unsignedinteger",\n "signedinteger",\n "floating",\n "complexfloating",\n]\n_abstract_type_names: Final[set[_AbstractTypeName]]\n\n@type_check_only\nclass _AliasesType(TypedDict):\n double: L["float64"]\n cdouble: L["complex128"]\n single: L["float32"]\n csingle: L["complex64"]\n half: L["float16"]\n bool_: L["bool"]\n int_: L["intp"]\n uint: L["intp"]\n\n_aliases: Final[_AliasesType]\n\n@type_check_only\nclass _ExtraAliasesType(TypedDict):\n float: L["float64"]\n complex: L["complex128"]\n object: L["object_"]\n bytes: L["bytes_"]\n a: L["bytes_"]\n int: L["int_"]\n str: L["str_"]\n unicode: L["str_"]\n\n_extra_aliases: Final[_ExtraAliasesType]\n\n@type_check_only\nclass _SCTypes(TypedDict):\n int: Collection[type[np.signedinteger]]\n uint: Collection[type[np.unsignedinteger]]\n float: Collection[type[np.floating]]\n complex: Collection[type[np.complexfloating]]\n others: Collection[type[np.flexible | np.bool | np.object_]]\n\nsctypes: Final[_SCTypes]\n
.venv\Lib\site-packages\numpy\_core\_type_aliases.pyi
_type_aliases.pyi
Other
2,485
0.85
0.041237
0
node-utils
327
2023-08-02T03:40:40.715009
GPL-3.0
false
26d9adb59380208fb7ab838fa4df952c
"""\nFunctions for changing global ufunc configuration\n\nThis provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and\n`_extobj_contextvar` from umath.\n"""\nimport functools\n\nfrom numpy._utils import set_module\n\nfrom .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj\n\n__all__ = [\n "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",\n "errstate"\n]\n\n\n@set_module('numpy')\ndef seterr(all=None, divide=None, over=None, under=None, invalid=None):\n """\n Set how floating-point errors are handled.\n\n Note that operations on integer scalar types (such as `int16`) are\n handled like floating point, and are affected by these settings.\n\n Parameters\n ----------\n all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Set treatment for all types of floating-point errors at once:\n\n - ignore: Take no action when the exception occurs.\n - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`\n module).\n - raise: Raise a :exc:`FloatingPointError`.\n - call: Call a function specified using the `seterrcall` function.\n - print: Print a warning directly to ``stdout``.\n - log: Record error in a Log object specified by `seterrcall`.\n\n The default is not to change the current behavior.\n divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for division by zero.\n over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point overflow.\n under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for floating-point underflow.\n invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional\n Treatment for invalid floating-point operation.\n\n Returns\n -------\n old_settings : dict\n Dictionary containing the old settings.\n\n See also\n --------\n seterrcall : Set a callback function for the 'call' mode.\n geterr, geterrcall, errstate\n\n Notes\n -----\n The floating-point exceptions are defined in the IEEE 754 standard [1]_:\n\n - Division by zero: infinite result obtained from finite numbers.\n - Overflow: result too large to be expressed.\n - Underflow: result so close to zero that some precision\n was lost.\n - Invalid operation: result is not an expressible number, typically\n indicates that a NaN was produced.\n\n .. [1] https://en.wikipedia.org/wiki/IEEE_754\n\n Examples\n --------\n >>> import numpy as np\n >>> orig_settings = np.seterr(all='ignore') # seterr to known value\n >>> np.int16(32000) * np.int16(3)\n np.int16(30464)\n >>> np.seterr(over='raise')\n {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}\n >>> old_settings = np.seterr(all='warn', over='raise')\n >>> np.int16(32000) * np.int16(3)\n Traceback (most recent call last):\n File "<stdin>", line 1, in <module>\n FloatingPointError: overflow encountered in scalar multiply\n\n >>> old_settings = np.seterr(all='print')\n >>> np.geterr()\n {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}\n >>> np.int16(32000) * np.int16(3)\n np.int16(30464)\n >>> np.seterr(**orig_settings) # restore original\n {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}\n\n """\n\n old = _get_extobj_dict()\n # The errstate doesn't include call and bufsize, so pop them:\n old.pop("call", None)\n old.pop("bufsize", None)\n\n extobj = _make_extobj(\n all=all, divide=divide, over=over, under=under, invalid=invalid)\n _extobj_contextvar.set(extobj)\n return old\n\n\n@set_module('numpy')\ndef geterr():\n """\n Get the current way of handling floating-point errors.\n\n Returns\n -------\n res : dict\n A dictionary with keys "divide", "over", "under", and "invalid",\n whose values are from the strings "ignore", "print", "log", "warn",\n "raise", and "call". The keys represent possible floating-point\n exceptions, and the values define how these exceptions are handled.\n\n See Also\n --------\n geterrcall, seterr, seterrcall\n\n Notes\n -----\n For complete documentation of the types of floating-point exceptions and\n treatment options, see `seterr`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.geterr()\n {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}\n >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP\n array([nan, 1., 1.])\n RuntimeWarning: invalid value encountered in divide\n\n >>> oldsettings = np.seterr(all='warn', invalid='raise')\n >>> np.geterr()\n {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}\n >>> np.arange(3.) / np.arange(3.)\n Traceback (most recent call last):\n ...\n FloatingPointError: invalid value encountered in divide\n >>> oldsettings = np.seterr(**oldsettings) # restore original\n\n """\n res = _get_extobj_dict()\n # The "geterr" doesn't include call and bufsize,:\n res.pop("call", None)\n res.pop("bufsize", None)\n return res\n\n\n@set_module('numpy')\ndef setbufsize(size):\n """\n Set the size of the buffer used in ufuncs.\n\n .. versionchanged:: 2.0\n The scope of setting the buffer is tied to the `numpy.errstate`\n context. Exiting a ``with errstate():`` will also restore the bufsize.\n\n Parameters\n ----------\n size : int\n Size of buffer.\n\n Returns\n -------\n bufsize : int\n Previous size of ufunc buffer in bytes.\n\n Examples\n --------\n When exiting a `numpy.errstate` context manager the bufsize is restored:\n\n >>> import numpy as np\n >>> with np.errstate():\n ... np.setbufsize(4096)\n ... print(np.getbufsize())\n ...\n 8192\n 4096\n >>> np.getbufsize()\n 8192\n\n """\n old = _get_extobj_dict()["bufsize"]\n extobj = _make_extobj(bufsize=size)\n _extobj_contextvar.set(extobj)\n return old\n\n\n@set_module('numpy')\ndef getbufsize():\n """\n Return the size of the buffer used in ufuncs.\n\n Returns\n -------\n getbufsize : int\n Size of ufunc buffer in bytes.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.getbufsize()\n 8192\n\n """\n return _get_extobj_dict()["bufsize"]\n\n\n@set_module('numpy')\ndef seterrcall(func):\n """\n Set the floating-point error callback function or log object.\n\n There are two ways to capture floating-point error messages. The first\n is to set the error-handler to 'call', using `seterr`. Then, set\n the function to call using this function.\n\n The second is to set the error-handler to 'log', using `seterr`.\n Floating-point errors then trigger a call to the 'write' method of\n the provided object.\n\n Parameters\n ----------\n func : callable f(err, flag) or object with write method\n Function to call upon floating-point errors ('call'-mode) or\n object whose 'write' method is used to log such message ('log'-mode).\n\n The call function takes two arguments. The first is a string describing\n the type of error (such as "divide by zero", "overflow", "underflow",\n or "invalid value"), and the second is the status flag. The flag is a\n byte, whose four least-significant bits indicate the type of error, one\n of "divide", "over", "under", "invalid"::\n\n [0 0 0 0 divide over under invalid]\n\n In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.\n\n If an object is provided, its write method should take one argument,\n a string.\n\n Returns\n -------\n h : callable, log instance or None\n The old error handler.\n\n See Also\n --------\n seterr, geterr, geterrcall\n\n Examples\n --------\n Callback upon error:\n\n >>> def err_handler(type, flag):\n ... print("Floating point error (%s), with flag %s" % (type, flag))\n ...\n\n >>> import numpy as np\n\n >>> orig_handler = np.seterrcall(err_handler)\n >>> orig_err = np.seterr(all='call')\n\n >>> np.array([1, 2, 3]) / 0.0\n Floating point error (divide by zero), with flag 1\n array([inf, inf, inf])\n\n >>> np.seterrcall(orig_handler)\n <function err_handler at 0x...>\n >>> np.seterr(**orig_err)\n {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}\n\n Log error message:\n\n >>> class Log:\n ... def write(self, msg):\n ... print("LOG: %s" % msg)\n ...\n\n >>> log = Log()\n >>> saved_handler = np.seterrcall(log)\n >>> save_err = np.seterr(all='log')\n\n >>> np.array([1, 2, 3]) / 0.0\n LOG: Warning: divide by zero encountered in divide\n array([inf, inf, inf])\n\n >>> np.seterrcall(orig_handler)\n <numpy.Log object at 0x...>\n >>> np.seterr(**orig_err)\n {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}\n\n """\n old = _get_extobj_dict()["call"]\n extobj = _make_extobj(call=func)\n _extobj_contextvar.set(extobj)\n return old\n\n\n@set_module('numpy')\ndef geterrcall():\n """\n Return the current callback function used on floating-point errors.\n\n When the error handling for a floating-point error (one of "divide",\n "over", "under", or "invalid") is set to 'call' or 'log', the function\n that is called or the log instance that is written to is returned by\n `geterrcall`. This function or log instance has been set with\n `seterrcall`.\n\n Returns\n -------\n errobj : callable, log instance or None\n The current error handler. If no handler was set through `seterrcall`,\n ``None`` is returned.\n\n See Also\n --------\n seterrcall, seterr, geterr\n\n Notes\n -----\n For complete documentation of the types of floating-point exceptions and\n treatment options, see `seterr`.\n\n Examples\n --------\n >>> import numpy as np\n >>> np.geterrcall() # we did not yet set a handler, returns None\n\n >>> orig_settings = np.seterr(all='call')\n >>> def err_handler(type, flag):\n ... print("Floating point error (%s), with flag %s" % (type, flag))\n >>> old_handler = np.seterrcall(err_handler)\n >>> np.array([1, 2, 3]) / 0.0\n Floating point error (divide by zero), with flag 1\n array([inf, inf, inf])\n\n >>> cur_handler = np.geterrcall()\n >>> cur_handler is err_handler\n True\n >>> old_settings = np.seterr(**orig_settings) # restore original\n >>> old_handler = np.seterrcall(None) # restore original\n\n """\n return _get_extobj_dict()["call"]\n\n\nclass _unspecified:\n pass\n\n\n_Unspecified = _unspecified()\n\n\n@set_module('numpy')\nclass errstate:\n """\n errstate(**kwargs)\n\n Context manager for floating-point error handling.\n\n Using an instance of `errstate` as a context manager allows statements in\n that context to execute with a known error handling behavior. Upon entering\n the context the error handling is set with `seterr` and `seterrcall`, and\n upon exiting it is reset to what it was before.\n\n .. versionchanged:: 1.17.0\n `errstate` is also usable as a function decorator, saving\n a level of indentation if an entire function is wrapped.\n\n .. versionchanged:: 2.0\n `errstate` is now fully thread and asyncio safe, but may not be\n entered more than once.\n It is not safe to decorate async functions using ``errstate``.\n\n Parameters\n ----------\n kwargs : {divide, over, under, invalid}\n Keyword arguments. The valid keywords are the possible floating-point\n exceptions. Each keyword should have a string value that defines the\n treatment for the particular error. Possible values are\n {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.\n\n See Also\n --------\n seterr, geterr, seterrcall, geterrcall\n\n Notes\n -----\n For complete documentation of the types of floating-point exceptions and\n treatment options, see `seterr`.\n\n Examples\n --------\n >>> import numpy as np\n >>> olderr = np.seterr(all='ignore') # Set error handling to known state.\n\n >>> np.arange(3) / 0.\n array([nan, inf, inf])\n >>> with np.errstate(divide='ignore'):\n ... np.arange(3) / 0.\n array([nan, inf, inf])\n\n >>> np.sqrt(-1)\n np.float64(nan)\n >>> with np.errstate(invalid='raise'):\n ... np.sqrt(-1)\n Traceback (most recent call last):\n File "<stdin>", line 2, in <module>\n FloatingPointError: invalid value encountered in sqrt\n\n Outside the context the error handling behavior has not changed:\n\n >>> np.geterr()\n {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}\n >>> olderr = np.seterr(**olderr) # restore original state\n\n """\n __slots__ = (\n "_all",\n "_call",\n "_divide",\n "_invalid",\n "_over",\n "_token",\n "_under",\n )\n\n def __init__(self, *, call=_Unspecified,\n all=None, divide=None, over=None, under=None, invalid=None):\n self._token = None\n self._call = call\n self._all = all\n self._divide = divide\n self._over = over\n self._under = under\n self._invalid = invalid\n\n def __enter__(self):\n # Note that __call__ duplicates much of this logic\n if self._token is not None:\n raise TypeError("Cannot enter `np.errstate` twice.")\n if self._call is _Unspecified:\n extobj = _make_extobj(\n all=self._all, divide=self._divide, over=self._over,\n under=self._under, invalid=self._invalid)\n else:\n extobj = _make_extobj(\n call=self._call,\n all=self._all, divide=self._divide, over=self._over,\n under=self._under, invalid=self._invalid)\n\n self._token = _extobj_contextvar.set(extobj)\n\n def __exit__(self, *exc_info):\n _extobj_contextvar.reset(self._token)\n\n def __call__(self, func):\n # We need to customize `__call__` compared to `ContextDecorator`\n # because we must store the token per-thread so cannot store it on\n # the instance (we could create a new instance for this).\n # This duplicates the code from `__enter__`.\n @functools.wraps(func)\n def inner(*args, **kwargs):\n if self._call is _Unspecified:\n extobj = _make_extobj(\n all=self._all, divide=self._divide, over=self._over,\n under=self._under, invalid=self._invalid)\n else:\n extobj = _make_extobj(\n call=self._call,\n all=self._all, divide=self._divide, over=self._over,\n under=self._under, invalid=self._invalid)\n\n _token = _extobj_contextvar.set(extobj)\n try:\n # Call the original, decorated, function:\n return func(*args, **kwargs)\n finally:\n _extobj_contextvar.reset(_token)\n\n return inner\n
.venv\Lib\site-packages\numpy\_core\_ufunc_config.py
_ufunc_config.py
Python
15,541
0.95
0.096115
0.020566
node-utils
481
2024-03-22T13:41:18.785554
Apache-2.0
false
faaaea6e1bdd9673c579c3011252314a
from collections.abc import Callable\nfrom typing import Any, Literal, TypeAlias, TypedDict, type_check_only\n\nfrom _typeshed import SupportsWrite\n\nfrom numpy import errstate as errstate\n\n_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"]\n_ErrFunc: TypeAlias = Callable[[str, int], Any]\n_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str]\n\n@type_check_only\nclass _ErrDict(TypedDict):\n divide: _ErrKind\n over: _ErrKind\n under: _ErrKind\n invalid: _ErrKind\n\ndef seterr(\n all: _ErrKind | None = ...,\n divide: _ErrKind | None = ...,\n over: _ErrKind | None = ...,\n under: _ErrKind | None = ...,\n invalid: _ErrKind | None = ...,\n) -> _ErrDict: ...\ndef geterr() -> _ErrDict: ...\ndef setbufsize(size: int) -> int: ...\ndef getbufsize() -> int: ...\ndef seterrcall(func: _ErrCall | None) -> _ErrCall | None: ...\ndef geterrcall() -> _ErrCall | None: ...\n\n# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`\n
.venv\Lib\site-packages\numpy\_core\_ufunc_config.pyi
_ufunc_config.pyi
Other
1,004
0.95
0.28125
0.038462
react-lib
755
2024-05-05T08:13:24.752228
GPL-3.0
false
cdaf1cc0b9841d459268b54032328d36
!<arch>\n/ -1 0 190 `\n
.venv\Lib\site-packages\numpy\_core\_umath_tests.cp313-win_amd64.lib
_umath_tests.cp313-win_amd64.lib
Other
2,104
0.8
0
0
react-lib
891
2023-08-02T23:49:49.627095
Apache-2.0
true
2e3d912afdc52a3f417f52119b6d62fb
MZ
.venv\Lib\site-packages\numpy\_core\_umath_tests.cp313-win_amd64.pyd
_umath_tests.cp313-win_amd64.pyd
Other
34,304
0.95
0.013953
0.013953
node-utils
510
2024-02-02T01:19:05.461590
BSD-3-Clause
true
95d2a9be8b0a3fb50e0cc5fef222398e
"""\nContains the core of NumPy: ndarray, ufuncs, dtypes, etc.\n\nPlease note that this module is private. All functions and objects\nare available in the main ``numpy`` namespace - use that instead.\n\n"""\n\nimport os\n\nfrom numpy.version import version as __version__\n\n# disables OpenBLAS affinity setting of the main thread that limits\n# python threads or processes to one core\nenv_added = []\nfor envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:\n if envkey not in os.environ:\n os.environ[envkey] = '1'\n env_added.append(envkey)\n\ntry:\n from . import multiarray\nexcept ImportError as exc:\n import sys\n msg = """\n\nIMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!\n\nImporting the numpy C-extensions failed. This error can happen for\nmany reasons, often due to issues with your setup or how NumPy was\ninstalled.\n\nWe have compiled some common reasons and troubleshooting tips at:\n\n https://numpy.org/devdocs/user/troubleshooting-importerror.html\n\nPlease note and check the following:\n\n * The Python version is: Python%d.%d from "%s"\n * The NumPy version is: "%s"\n\nand make sure that they are the versions you expect.\nPlease carefully study the documentation linked above for further help.\n\nOriginal error was: %s\n""" % (sys.version_info[0], sys.version_info[1], sys.executable,\n __version__, exc)\n raise ImportError(msg) from exc\nfinally:\n for envkey in env_added:\n del os.environ[envkey]\ndel envkey\ndel env_added\ndel os\n\nfrom . import umath\n\n# Check that multiarray,umath are pure python modules wrapping\n# _multiarray_umath and not either of the old c-extension modules\nif not (hasattr(multiarray, '_multiarray_umath') and\n hasattr(umath, '_multiarray_umath')):\n import sys\n path = sys.modules['numpy'].__path__\n msg = ("Something is wrong with the numpy installation. "\n "While importing we detected an older version of "\n "numpy in {}. One method of fixing this is to repeatedly uninstall "\n "numpy until none is found, then reinstall this version.")\n raise ImportError(msg.format(path))\n\nfrom . import numerictypes as nt\nfrom .numerictypes import sctypeDict, sctypes\n\nmultiarray.set_typeDict(nt.sctypeDict)\nfrom . import (\n _machar,\n einsumfunc,\n fromnumeric,\n function_base,\n getlimits,\n numeric,\n shape_base,\n)\nfrom .einsumfunc import *\nfrom .fromnumeric import *\nfrom .function_base import *\nfrom .getlimits import *\n\n# Note: module name memmap is overwritten by a class with same name\nfrom .memmap import *\nfrom .numeric import *\nfrom .records import recarray, record\nfrom .shape_base import *\n\ndel nt\n\n# do this after everything else, to minimize the chance of this misleadingly\n# appearing in an import-time traceback\n# add these for module-freeze analysis (like PyInstaller)\nfrom . import (\n _add_newdocs,\n _add_newdocs_scalars,\n _dtype,\n _dtype_ctypes,\n _internal,\n _methods,\n)\nfrom .numeric import absolute as abs\n\nacos = numeric.arccos\nacosh = numeric.arccosh\nasin = numeric.arcsin\nasinh = numeric.arcsinh\natan = numeric.arctan\natanh = numeric.arctanh\natan2 = numeric.arctan2\nconcat = numeric.concatenate\nbitwise_left_shift = numeric.left_shift\nbitwise_invert = numeric.invert\nbitwise_right_shift = numeric.right_shift\npermute_dims = numeric.transpose\npow = numeric.power\n\n__all__ = [\n "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",\n "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",\n "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"\n]\n__all__ += numeric.__all__\n__all__ += function_base.__all__\n__all__ += getlimits.__all__\n__all__ += shape_base.__all__\n__all__ += einsumfunc.__all__\n\n\ndef _ufunc_reduce(func):\n # Report the `__name__`. pickle will try to find the module. Note that\n # pickle supports for this `__name__` to be a `__qualname__`. It may\n # make sense to add a `__qualname__` to ufuncs, to allow this more\n # explicitly (Numba has ufuncs as attributes).\n # See also: https://github.com/dask/distributed/issues/3450\n return func.__name__\n\n\ndef _DType_reconstruct(scalar_type):\n # This is a work-around to pickle type(np.dtype(np.float64)), etc.\n # and it should eventually be replaced with a better solution, e.g. when\n # DTypes become HeapTypes.\n return type(dtype(scalar_type))\n\n\ndef _DType_reduce(DType):\n # As types/classes, most DTypes can simply be pickled by their name:\n if not DType._legacy or DType.__module__ == "numpy.dtypes":\n return DType.__name__\n\n # However, user defined legacy dtypes (like rational) do not end up in\n # `numpy.dtypes` as module and do not have a public class at all.\n # For these, we pickle them by reconstructing them from the scalar type:\n scalar_type = DType.type\n return _DType_reconstruct, (scalar_type,)\n\n\ndef __getattr__(name):\n # Deprecated 2022-11-22, NumPy 1.25.\n if name == "MachAr":\n import warnings\n warnings.warn(\n "The `np._core.MachAr` is considered private API (NumPy 1.24)",\n DeprecationWarning, stacklevel=2,\n )\n return _machar.MachAr\n raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")\n\n\nimport copyreg\n\ncopyreg.pickle(ufunc, _ufunc_reduce)\ncopyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)\n\n# Unclutter namespace (must keep _*_reconstruct for unpickling)\ndel copyreg, _ufunc_reduce, _DType_reduce\n\nfrom numpy._pytesttester import PytestTester\n\ntest = PytestTester(__name__)\ndel PytestTester\n
.venv\Lib\site-packages\numpy\_core\__init__.py
__init__.py
Python
5,728
0.95
0.102151
0.162162
python-kit
418
2025-03-25T03:46:11.805569
MIT
false
c32ac8bd8ea61f6bd99b5f68505ec834
# NOTE: The `np._core` namespace is deliberately kept empty due to it\n# being private\n
.venv\Lib\site-packages\numpy\_core\__init__.pyi
__init__.pyi
Other
88
0.6
0
1
react-lib
919
2025-03-22T11:56:12.545401
Apache-2.0
false
bc05219a18cefbf5cf2aae9837e1ce52
#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_\n#define Py_ARRAYOBJECT_H\n\n#include "ndarrayobject.h"\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYOBJECT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\arrayobject.h
arrayobject.h
C
211
0.95
0
1
python-kit
845
2025-04-24T02:57:19.057888
GPL-3.0
false
09997308b2290c3bc281bc317e6fc5a1
#ifndef NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_\n\n#ifndef _MULTIARRAYMODULE\ntypedef struct {\n PyObject_HEAD\n npy_bool obval;\n} PyBoolScalarObject;\n#endif\n\n\ntypedef struct {\n PyObject_HEAD\n signed char obval;\n} PyByteScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n short obval;\n} PyShortScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n int obval;\n} PyIntScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n long obval;\n} PyLongScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_longlong obval;\n} PyLongLongScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n unsigned char obval;\n} PyUByteScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n unsigned short obval;\n} PyUShortScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n unsigned int obval;\n} PyUIntScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n unsigned long obval;\n} PyULongScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_ulonglong obval;\n} PyULongLongScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_half obval;\n} PyHalfScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n float obval;\n} PyFloatScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n double obval;\n} PyDoubleScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_longdouble obval;\n} PyLongDoubleScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_cfloat obval;\n} PyCFloatScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_cdouble obval;\n} PyCDoubleScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n npy_clongdouble obval;\n} PyCLongDoubleScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n PyObject * obval;\n} PyObjectScalarObject;\n\ntypedef struct {\n PyObject_HEAD\n npy_datetime obval;\n PyArray_DatetimeMetaData obmeta;\n} PyDatetimeScalarObject;\n\ntypedef struct {\n PyObject_HEAD\n npy_timedelta obval;\n PyArray_DatetimeMetaData obmeta;\n} PyTimedeltaScalarObject;\n\n\ntypedef struct {\n PyObject_HEAD\n char obval;\n} PyScalarObject;\n\n#define PyStringScalarObject PyBytesObject\n#ifndef Py_LIMITED_API\ntypedef struct {\n /* note that the PyObject_HEAD macro lives right here */\n PyUnicodeObject base;\n Py_UCS4 *obval;\n #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION\n char *buffer_fmt;\n #endif\n} PyUnicodeScalarObject;\n#endif\n\n\ntypedef struct {\n PyObject_VAR_HEAD\n char *obval;\n#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD\n /* Internally use the subclass to allow accessing names/fields */\n _PyArray_LegacyDescr *descr;\n#else\n PyArray_Descr *descr;\n#endif\n int flags;\n PyObject *base;\n #if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION\n void *_buffer_info; /* private buffer info, tagged to allow warning */\n #endif\n} PyVoidScalarObject;\n\n/* Macros\n Py<Cls><bitsize>ScalarObject\n Py<Cls><bitsize>ArrType_Type\n are defined in ndarrayobject.h\n*/\n\n#define PyArrayScalar_False ((PyObject *)(&(_PyArrayScalar_BoolValues[0])))\n#define PyArrayScalar_True ((PyObject *)(&(_PyArrayScalar_BoolValues[1])))\n#define PyArrayScalar_FromLong(i) \\n ((PyObject *)(&(_PyArrayScalar_BoolValues[((i)!=0)])))\n#define PyArrayScalar_RETURN_BOOL_FROM_LONG(i) \\n return Py_INCREF(PyArrayScalar_FromLong(i)), \\n PyArrayScalar_FromLong(i)\n#define PyArrayScalar_RETURN_FALSE \\n return Py_INCREF(PyArrayScalar_False), \\n PyArrayScalar_False\n#define PyArrayScalar_RETURN_TRUE \\n return Py_INCREF(PyArrayScalar_True), \\n PyArrayScalar_True\n\n#define PyArrayScalar_New(cls) \\n Py##cls##ArrType_Type.tp_alloc(&Py##cls##ArrType_Type, 0)\n#ifndef Py_LIMITED_API\n/* For the limited API, use PyArray_ScalarAsCtype instead */\n#define PyArrayScalar_VAL(obj, cls) \\n ((Py##cls##ScalarObject *)obj)->obval\n#define PyArrayScalar_ASSIGN(obj, cls, val) \\n PyArrayScalar_VAL(obj, cls) = val\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_ARRAYSCALARS_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\arrayscalars.h
arrayscalars.h
C
4,439
0.95
0.015306
0.209459
react-lib
843
2023-09-15T22:27:56.655969
Apache-2.0
false
e956f443f0d79d539f5248b8f9aebb23
/*\n * The public DType API\n */\n\n#ifndef NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_\n#define NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_\n\nstruct PyArrayMethodObject_tag;\n\n/*\n * Largely opaque struct for DType classes (i.e. metaclass instances).\n * The internal definition is currently in `ndarraytypes.h` (export is a bit\n * more complex because `PyArray_Descr` is a DTypeMeta internally but not\n * externally).\n */\n#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)\n\n#ifndef Py_LIMITED_API\n\n typedef struct PyArray_DTypeMeta_tag {\n PyHeapTypeObject super;\n\n /*\n * Most DTypes will have a singleton default instance, for the\n * parametric legacy DTypes (bytes, string, void, datetime) this\n * may be a pointer to the *prototype* instance?\n */\n PyArray_Descr *singleton;\n /* Copy of the legacy DTypes type number, usually invalid. */\n int type_num;\n\n /* The type object of the scalar instances (may be NULL?) */\n PyTypeObject *scalar_type;\n /*\n * DType flags to signal legacy, parametric, or\n * abstract. But plenty of space for additional information/flags.\n */\n npy_uint64 flags;\n\n /*\n * Use indirection in order to allow a fixed size for this struct.\n * A stable ABI size makes creating a static DType less painful\n * while also ensuring flexibility for all opaque API (with one\n * indirection due the pointer lookup).\n */\n void *dt_slots;\n /* Allow growing (at the moment also beyond this) */\n void *reserved[3];\n } PyArray_DTypeMeta;\n\n#else\n\ntypedef PyTypeObject PyArray_DTypeMeta;\n\n#endif /* Py_LIMITED_API */\n\n#endif /* not internal build */\n\n/*\n * ******************************************************\n * ArrayMethod API (Casting and UFuncs)\n * ******************************************************\n */\n\n\ntypedef enum {\n /* Flag for whether the GIL is required */\n NPY_METH_REQUIRES_PYAPI = 1 << 0,\n /*\n * Some functions cannot set floating point error flags, this flag\n * gives us the option (not requirement) to skip floating point error\n * setup/check. No function should set error flags and ignore them\n * since it would interfere with chaining operations (e.g. casting).\n */\n NPY_METH_NO_FLOATINGPOINT_ERRORS = 1 << 1,\n /* Whether the method supports unaligned access (not runtime) */\n NPY_METH_SUPPORTS_UNALIGNED = 1 << 2,\n /*\n * Used for reductions to allow reordering the operation. At this point\n * assume that if set, it also applies to normal operations though!\n */\n NPY_METH_IS_REORDERABLE = 1 << 3,\n /*\n * Private flag for now for *logic* functions. The logical functions\n * `logical_or` and `logical_and` can always cast the inputs to booleans\n * "safely" (because that is how the cast to bool is defined).\n * @seberg: I am not sure this is the best way to handle this, so its\n * private for now (also it is very limited anyway).\n * There is one "exception". NA aware dtypes cannot cast to bool\n * (hopefully), so the `??->?` loop should error even with this flag.\n * But a second NA fallback loop will be necessary.\n */\n _NPY_METH_FORCE_CAST_INPUTS = 1 << 17,\n\n /* All flags which can change at runtime */\n NPY_METH_RUNTIME_FLAGS = (\n NPY_METH_REQUIRES_PYAPI |\n NPY_METH_NO_FLOATINGPOINT_ERRORS),\n} NPY_ARRAYMETHOD_FLAGS;\n\n\ntypedef struct PyArrayMethod_Context_tag {\n /* The caller, which is typically the original ufunc. May be NULL */\n PyObject *caller;\n /* The method "self". Currently an opaque object. */\n struct PyArrayMethodObject_tag *method;\n\n /* Operand descriptors, filled in by resolve_descriptors */\n PyArray_Descr *const *descriptors;\n /* Structure may grow (this is harmless for DType authors) */\n} PyArrayMethod_Context;\n\n\n/*\n * The main object for creating a new ArrayMethod. We use the typical `slots`\n * mechanism used by the Python limited API (see below for the slot defs).\n */\ntypedef struct {\n const char *name;\n int nin, nout;\n NPY_CASTING casting;\n NPY_ARRAYMETHOD_FLAGS flags;\n PyArray_DTypeMeta **dtypes;\n PyType_Slot *slots;\n} PyArrayMethod_Spec;\n\n\n/*\n * ArrayMethod slots\n * -----------------\n *\n * SLOTS IDs For the ArrayMethod creation, once fully public, IDs are fixed\n * but can be deprecated and arbitrarily extended.\n */\n#define _NPY_METH_resolve_descriptors_with_scalars 1\n#define NPY_METH_resolve_descriptors 2\n#define NPY_METH_get_loop 3\n#define NPY_METH_get_reduction_initial 4\n/* specific loops for constructions/default get_loop: */\n#define NPY_METH_strided_loop 5\n#define NPY_METH_contiguous_loop 6\n#define NPY_METH_unaligned_strided_loop 7\n#define NPY_METH_unaligned_contiguous_loop 8\n#define NPY_METH_contiguous_indexed_loop 9\n#define _NPY_METH_static_data 10\n\n\n/*\n * The resolve descriptors function, must be able to handle NULL values for\n * all output (but not input) `given_descrs` and fill `loop_descrs`.\n * Return -1 on error or 0 if the operation is not possible without an error\n * set. (This may still be in flux.)\n * Otherwise must return the "casting safety", for normal functions, this is\n * almost always "safe" (or even "equivalent"?).\n *\n * `resolve_descriptors` is optional if all output DTypes are non-parametric.\n */\ntypedef NPY_CASTING (PyArrayMethod_ResolveDescriptors)(\n /* "method" is currently opaque (necessary e.g. to wrap Python) */\n struct PyArrayMethodObject_tag *method,\n /* DTypes the method was created for */\n PyArray_DTypeMeta *const *dtypes,\n /* Input descriptors (instances). Outputs may be NULL. */\n PyArray_Descr *const *given_descrs,\n /* Exact loop descriptors to use, must not hold references on error */\n PyArray_Descr **loop_descrs,\n npy_intp *view_offset);\n\n\n/*\n * Rarely needed, slightly more powerful version of `resolve_descriptors`.\n * See also `PyArrayMethod_ResolveDescriptors` for details on shared arguments.\n *\n * NOTE: This function is private now as it is unclear how and what to pass\n * exactly as additional information to allow dealing with the scalars.\n * See also gh-24915.\n */\ntypedef NPY_CASTING (PyArrayMethod_ResolveDescriptorsWithScalar)(\n struct PyArrayMethodObject_tag *method,\n PyArray_DTypeMeta *const *dtypes,\n /* Unlike above, these can have any DType and we may allow NULL. */\n PyArray_Descr *const *given_descrs,\n /*\n * Input scalars or NULL. Only ever passed for python scalars.\n * WARNING: In some cases, a loop may be explicitly selected and the\n * value passed is not available (NULL) or does not have the\n * expected type.\n */\n PyObject *const *input_scalars,\n PyArray_Descr **loop_descrs,\n npy_intp *view_offset);\n\n\n\ntypedef int (PyArrayMethod_StridedLoop)(PyArrayMethod_Context *context,\n char *const *data, const npy_intp *dimensions, const npy_intp *strides,\n NpyAuxData *transferdata);\n\n\ntypedef int (PyArrayMethod_GetLoop)(\n PyArrayMethod_Context *context,\n int aligned, int move_references,\n const npy_intp *strides,\n PyArrayMethod_StridedLoop **out_loop,\n NpyAuxData **out_transferdata,\n NPY_ARRAYMETHOD_FLAGS *flags);\n\n/**\n * Query an ArrayMethod for the initial value for use in reduction.\n *\n * @param context The arraymethod context, mainly to access the descriptors.\n * @param reduction_is_empty Whether the reduction is empty. When it is, the\n * value returned may differ. In this case it is a "default" value that\n * may differ from the "identity" value normally used. For example:\n * - `0.0` is the default for `sum([])`. But `-0.0` is the correct\n * identity otherwise as it preserves the sign for `sum([-0.0])`.\n * - We use no identity for object, but return the default of `0` and `1`\n * for the empty `sum([], dtype=object)` and `prod([], dtype=object)`.\n * This allows `np.sum(np.array(["a", "b"], dtype=object))` to work.\n * - `-inf` or `INT_MIN` for `max` is an identity, but at least `INT_MIN`\n * not a good *default* when there are no items.\n * @param initial Pointer to initial data to be filled (if possible)\n *\n * @returns -1, 0, or 1 indicating error, no initial value, and initial being\n * successfully filled. Errors must not be given where 0 is correct, NumPy\n * may call this even when not strictly necessary.\n */\ntypedef int (PyArrayMethod_GetReductionInitial)(\n PyArrayMethod_Context *context, npy_bool reduction_is_empty,\n void *initial);\n\n/*\n * The following functions are only used by the wrapping array method defined\n * in umath/wrapping_array_method.c\n */\n\n\n/*\n * The function to convert the given descriptors (passed in to\n * `resolve_descriptors`) and translates them for the wrapped loop.\n * The new descriptors MUST be viewable with the old ones, `NULL` must be\n * supported (for outputs) and should normally be forwarded.\n *\n * The function must clean up on error.\n *\n * NOTE: We currently assume that this translation gives "viewable" results.\n * I.e. there is no additional casting related to the wrapping process.\n * In principle that could be supported, but not sure it is useful.\n * This currently also means that e.g. alignment must apply identically\n * to the new dtypes.\n *\n * TODO: Due to the fact that `resolve_descriptors` is also used for `can_cast`\n * there is no way to "pass out" the result of this function. This means\n * it will be called twice for every ufunc call.\n * (I am considering including `auxdata` as an "optional" parameter to\n * `resolve_descriptors`, so that it can be filled there if not NULL.)\n */\ntypedef int (PyArrayMethod_TranslateGivenDescriptors)(int nin, int nout,\n PyArray_DTypeMeta *const wrapped_dtypes[],\n PyArray_Descr *const given_descrs[], PyArray_Descr *new_descrs[]);\n\n/**\n * The function to convert the actual loop descriptors (as returned by the\n * original `resolve_descriptors` function) to the ones the output array\n * should use.\n * This function must return "viewable" types, it must not mutate them in any\n * form that would break the inner-loop logic. Does not need to support NULL.\n *\n * The function must clean up on error.\n *\n * @param nin Number of input arguments\n * @param nout Number of output arguments\n * @param new_dtypes The DTypes of the output (usually probably not needed)\n * @param given_descrs Original given_descrs to the resolver, necessary to\n * fetch any information related to the new dtypes from the original.\n * @param original_descrs The `loop_descrs` returned by the wrapped loop.\n * @param loop_descrs The output descriptors, compatible to `original_descrs`.\n *\n * @returns 0 on success, -1 on failure.\n */\ntypedef int (PyArrayMethod_TranslateLoopDescriptors)(int nin, int nout,\n PyArray_DTypeMeta *const new_dtypes[], PyArray_Descr *const given_descrs[],\n PyArray_Descr *original_descrs[], PyArray_Descr *loop_descrs[]);\n\n\n\n/*\n * A traverse loop working on a single array. This is similar to the general\n * strided-loop function. This is designed for loops that need to visit every\n * element of a single array.\n *\n * Currently this is used for array clearing, via the NPY_DT_get_clear_loop\n * API hook, and zero-filling, via the NPY_DT_get_fill_zero_loop API hook.\n * These are most useful for handling arrays storing embedded references to\n * python objects or heap-allocated data.\n *\n * The `void *traverse_context` is passed in because we may need to pass in\n * Interpreter state or similar in the future, but we don't want to pass in\n * a full context (with pointers to dtypes, method, caller which all make\n * no sense for a traverse function).\n *\n * We assume for now that this context can be just passed through in the\n * the future (for structured dtypes).\n *\n */\ntypedef int (PyArrayMethod_TraverseLoop)(\n void *traverse_context, const PyArray_Descr *descr, char *data,\n npy_intp size, npy_intp stride, NpyAuxData *auxdata);\n\n\n/*\n * Simplified get_loop function specific to dtype traversal\n *\n * It should set the flags needed for the traversal loop and set out_loop to the\n * loop function, which must be a valid PyArrayMethod_TraverseLoop\n * pointer. Currently this is used for zero-filling and clearing arrays storing\n * embedded references.\n *\n */\ntypedef int (PyArrayMethod_GetTraverseLoop)(\n void *traverse_context, const PyArray_Descr *descr,\n int aligned, npy_intp fixed_stride,\n PyArrayMethod_TraverseLoop **out_loop, NpyAuxData **out_auxdata,\n NPY_ARRAYMETHOD_FLAGS *flags);\n\n\n/*\n * Type of the C promoter function, which must be wrapped into a\n * PyCapsule with name "numpy._ufunc_promoter".\n *\n * Note that currently the output dtypes are always NULL unless they are\n * also part of the signature. This is an implementation detail and could\n * change in the future. However, in general promoters should not have a\n * need for output dtypes.\n * (There are potential use-cases, these are currently unsupported.)\n */\ntypedef int (PyArrayMethod_PromoterFunction)(PyObject *ufunc,\n PyArray_DTypeMeta *const op_dtypes[], PyArray_DTypeMeta *const signature[],\n PyArray_DTypeMeta *new_op_dtypes[]);\n\n/*\n * ****************************\n * DTYPE API\n * ****************************\n */\n\n#define NPY_DT_ABSTRACT 1 << 1\n#define NPY_DT_PARAMETRIC 1 << 2\n#define NPY_DT_NUMERIC 1 << 3\n\n/*\n * These correspond to slots in the NPY_DType_Slots struct and must\n * be in the same order as the members of that struct. If new slots\n * get added or old slots get removed NPY_NUM_DTYPE_SLOTS must also\n * be updated\n */\n\n#define NPY_DT_discover_descr_from_pyobject 1\n// this slot is considered private because its API hasn't been decided\n#define _NPY_DT_is_known_scalar_type 2\n#define NPY_DT_default_descr 3\n#define NPY_DT_common_dtype 4\n#define NPY_DT_common_instance 5\n#define NPY_DT_ensure_canonical 6\n#define NPY_DT_setitem 7\n#define NPY_DT_getitem 8\n#define NPY_DT_get_clear_loop 9\n#define NPY_DT_get_fill_zero_loop 10\n#define NPY_DT_finalize_descr 11\n\n// These PyArray_ArrFunc slots will be deprecated and replaced eventually\n// getitem and setitem can be defined as a performance optimization;\n// by default the user dtypes call `legacy_getitem_using_DType` and\n// `legacy_setitem_using_DType`, respectively. This functionality is\n// only supported for basic NumPy DTypes.\n\n\n// used to separate dtype slots from arrfuncs slots\n// intended only for internal use but defined here for clarity\n#define _NPY_DT_ARRFUNCS_OFFSET (1 << 10)\n\n// Cast is disabled\n// #define NPY_DT_PyArray_ArrFuncs_cast 0 + _NPY_DT_ARRFUNCS_OFFSET\n\n#define NPY_DT_PyArray_ArrFuncs_getitem 1 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_setitem 2 + _NPY_DT_ARRFUNCS_OFFSET\n\n// Copyswap is disabled\n// #define NPY_DT_PyArray_ArrFuncs_copyswapn 3 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_copyswap 4 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_compare 5 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_argmax 6 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_dotfunc 7 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_scanfunc 8 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_fromstr 9 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_nonzero 10 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_fill 11 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_fillwithscalar 12 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_sort 13 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_argsort 14 + _NPY_DT_ARRFUNCS_OFFSET\n\n// Casting related slots are disabled. See\n// https://github.com/numpy/numpy/pull/23173#discussion_r1101098163\n// #define NPY_DT_PyArray_ArrFuncs_castdict 15 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_scalarkind 16 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_cancastscalarkindto 17 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_cancastto 18 + _NPY_DT_ARRFUNCS_OFFSET\n\n// These are deprecated in NumPy 1.19, so are disabled here.\n// #define NPY_DT_PyArray_ArrFuncs_fastclip 19 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_fastputmask 20 + _NPY_DT_ARRFUNCS_OFFSET\n// #define NPY_DT_PyArray_ArrFuncs_fasttake 21 + _NPY_DT_ARRFUNCS_OFFSET\n#define NPY_DT_PyArray_ArrFuncs_argmin 22 + _NPY_DT_ARRFUNCS_OFFSET\n\n\n// TODO: These slots probably still need some thought, and/or a way to "grow"?\ntypedef struct {\n PyTypeObject *typeobj; /* type of python scalar or NULL */\n int flags; /* flags, including parametric and abstract */\n /* NULL terminated cast definitions. Use NULL for the newly created DType */\n PyArrayMethod_Spec **casts;\n PyType_Slot *slots;\n /* Baseclass or NULL (will always subclass `np.dtype`) */\n PyTypeObject *baseclass;\n} PyArrayDTypeMeta_Spec;\n\n\ntypedef PyArray_Descr *(PyArrayDTypeMeta_DiscoverDescrFromPyobject)(\n PyArray_DTypeMeta *cls, PyObject *obj);\n\n/*\n * Before making this public, we should decide whether it should pass\n * the type, or allow looking at the object. A possible use-case:\n * `np.array(np.array([0]), dtype=np.ndarray)`\n * Could consider arrays that are not `dtype=ndarray` "scalars".\n */\ntypedef int (PyArrayDTypeMeta_IsKnownScalarType)(\n PyArray_DTypeMeta *cls, PyTypeObject *obj);\n\ntypedef PyArray_Descr *(PyArrayDTypeMeta_DefaultDescriptor)(PyArray_DTypeMeta *cls);\ntypedef PyArray_DTypeMeta *(PyArrayDTypeMeta_CommonDType)(\n PyArray_DTypeMeta *dtype1, PyArray_DTypeMeta *dtype2);\n\n\n/*\n * Convenience utility for getting a reference to the DType metaclass associated\n * with a dtype instance.\n */\n#define NPY_DTYPE(descr) ((PyArray_DTypeMeta *)Py_TYPE(descr))\n\nstatic inline PyArray_DTypeMeta *\nNPY_DT_NewRef(PyArray_DTypeMeta *o) {\n Py_INCREF((PyObject *)o);\n return o;\n}\n\n\ntypedef PyArray_Descr *(PyArrayDTypeMeta_CommonInstance)(\n PyArray_Descr *dtype1, PyArray_Descr *dtype2);\ntypedef PyArray_Descr *(PyArrayDTypeMeta_EnsureCanonical)(PyArray_Descr *dtype);\n/*\n * Returns either a new reference to *dtype* or a new descriptor instance\n * initialized with the same parameters as *dtype*. The caller cannot know\n * which choice a dtype will make. This function is called just before the\n * array buffer is created for a newly created array, it is not called for\n * views and the descriptor returned by this function is attached to the array.\n */\ntypedef PyArray_Descr *(PyArrayDTypeMeta_FinalizeDescriptor)(PyArray_Descr *dtype);\n\n/*\n * TODO: These two functions are currently only used for experimental DType\n * API support. Their relation should be "reversed": NumPy should\n * always use them internally.\n * There are open points about "casting safety" though, e.g. setting\n * elements is currently always unsafe.\n */\ntypedef int(PyArrayDTypeMeta_SetItem)(PyArray_Descr *, PyObject *, char *);\ntypedef PyObject *(PyArrayDTypeMeta_GetItem)(PyArray_Descr *, char *);\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY___DTYPE_API_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\dtype_api.h
dtype_api.h
C
19,718
0.95
0.147917
0.751825
awesome-app
923
2024-02-19T21:59:11.199307
MIT
false
f964e255d8831708fbcbfdced0e10121
#ifndef NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_\n\n#include <Python.h>\n#include <numpy/npy_math.h>\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n/*\n * Half-precision routines\n */\n\n/* Conversions */\nfloat npy_half_to_float(npy_half h);\ndouble npy_half_to_double(npy_half h);\nnpy_half npy_float_to_half(float f);\nnpy_half npy_double_to_half(double d);\n/* Comparisons */\nint npy_half_eq(npy_half h1, npy_half h2);\nint npy_half_ne(npy_half h1, npy_half h2);\nint npy_half_le(npy_half h1, npy_half h2);\nint npy_half_lt(npy_half h1, npy_half h2);\nint npy_half_ge(npy_half h1, npy_half h2);\nint npy_half_gt(npy_half h1, npy_half h2);\n/* faster *_nonan variants for when you know h1 and h2 are not NaN */\nint npy_half_eq_nonan(npy_half h1, npy_half h2);\nint npy_half_lt_nonan(npy_half h1, npy_half h2);\nint npy_half_le_nonan(npy_half h1, npy_half h2);\n/* Miscellaneous functions */\nint npy_half_iszero(npy_half h);\nint npy_half_isnan(npy_half h);\nint npy_half_isinf(npy_half h);\nint npy_half_isfinite(npy_half h);\nint npy_half_signbit(npy_half h);\nnpy_half npy_half_copysign(npy_half x, npy_half y);\nnpy_half npy_half_spacing(npy_half h);\nnpy_half npy_half_nextafter(npy_half x, npy_half y);\nnpy_half npy_half_divmod(npy_half x, npy_half y, npy_half *modulus);\n\n/*\n * Half-precision constants\n */\n\n#define NPY_HALF_ZERO (0x0000u)\n#define NPY_HALF_PZERO (0x0000u)\n#define NPY_HALF_NZERO (0x8000u)\n#define NPY_HALF_ONE (0x3c00u)\n#define NPY_HALF_NEGONE (0xbc00u)\n#define NPY_HALF_PINF (0x7c00u)\n#define NPY_HALF_NINF (0xfc00u)\n#define NPY_HALF_NAN (0x7e00u)\n\n#define NPY_MAX_HALF (0x7bffu)\n\n/*\n * Bit-level conversions\n */\n\nnpy_uint16 npy_floatbits_to_halfbits(npy_uint32 f);\nnpy_uint16 npy_doublebits_to_halfbits(npy_uint64 d);\nnpy_uint32 npy_halfbits_to_floatbits(npy_uint16 h);\nnpy_uint64 npy_halfbits_to_doublebits(npy_uint16 h);\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_HALFFLOAT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\halffloat.h
halffloat.h
C
2,029
0.95
0.014286
0.525424
python-kit
158
2023-10-29T15:09:09.868295
Apache-2.0
false
30abfb26f7896504e978b52afb6b9e06
/*\n * DON'T INCLUDE THIS DIRECTLY.\n */\n#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n#include <Python.h>\n#include "ndarraytypes.h"\n#include "dtype_api.h"\n\n/* Includes the "function" C-API -- these are all stored in a\n list of pointers --- one for each file\n The two lists are concatenated into one in multiarray.\n\n They are available as import_array()\n*/\n\n#include "__multiarray_api.h"\n\n/*\n * Include any definitions which are defined differently for 1.x and 2.x\n * (Symbols only available on 2.x are not there, but rather guarded.)\n */\n#include "npy_2_compat.h"\n\n/* C-API that requires previous API to be defined */\n\n#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)\n\n#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)\n#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)\n\n#define PyArray_HasArrayInterfaceType(op, type, context, out) \\n ((((out)=PyArray_FromStructInterface(op)) != Py_NotImplemented) || \\n (((out)=PyArray_FromInterface(op)) != Py_NotImplemented) || \\n (((out)=PyArray_FromArrayAttr(op, type, context)) != \\n Py_NotImplemented))\n\n#define PyArray_HasArrayInterface(op, out) \\n PyArray_HasArrayInterfaceType(op, NULL, NULL, out)\n\n#define PyArray_IsZeroDim(op) (PyArray_Check(op) && \\n (PyArray_NDIM((PyArrayObject *)op) == 0))\n\n#define PyArray_IsScalar(obj, cls) \\n (PyObject_TypeCheck(obj, &Py##cls##ArrType_Type))\n\n#define PyArray_CheckScalar(m) (PyArray_IsScalar(m, Generic) || \\n PyArray_IsZeroDim(m))\n#define PyArray_IsPythonNumber(obj) \\n (PyFloat_Check(obj) || PyComplex_Check(obj) || \\n PyLong_Check(obj) || PyBool_Check(obj))\n#define PyArray_IsIntegerScalar(obj) (PyLong_Check(obj) \\n || PyArray_IsScalar((obj), Integer))\n#define PyArray_IsPythonScalar(obj) \\n (PyArray_IsPythonNumber(obj) || PyBytes_Check(obj) || \\n PyUnicode_Check(obj))\n\n#define PyArray_IsAnyScalar(obj) \\n (PyArray_IsScalar(obj, Generic) || PyArray_IsPythonScalar(obj))\n\n#define PyArray_CheckAnyScalar(obj) (PyArray_IsPythonScalar(obj) || \\n PyArray_CheckScalar(obj))\n\n\n#define PyArray_GETCONTIGUOUS(m) (PyArray_ISCONTIGUOUS(m) ? \\n Py_INCREF(m), (m) : \\n (PyArrayObject *)(PyArray_Copy(m)))\n\n#define PyArray_SAMESHAPE(a1,a2) ((PyArray_NDIM(a1) == PyArray_NDIM(a2)) && \\n PyArray_CompareLists(PyArray_DIMS(a1), \\n PyArray_DIMS(a2), \\n PyArray_NDIM(a1)))\n\n#define PyArray_SIZE(m) PyArray_MultiplyList(PyArray_DIMS(m), PyArray_NDIM(m))\n#define PyArray_NBYTES(m) (PyArray_ITEMSIZE(m) * PyArray_SIZE(m))\n#define PyArray_FROM_O(m) PyArray_FromAny(m, NULL, 0, 0, 0, NULL)\n\n#define PyArray_FROM_OF(m,flags) PyArray_CheckFromAny(m, NULL, 0, 0, flags, \\n NULL)\n\n#define PyArray_FROM_OT(m,type) PyArray_FromAny(m, \\n PyArray_DescrFromType(type), 0, 0, 0, NULL)\n\n#define PyArray_FROM_OTF(m, type, flags) \\n PyArray_FromAny(m, PyArray_DescrFromType(type), 0, 0, \\n (((flags) & NPY_ARRAY_ENSURECOPY) ? \\n ((flags) | NPY_ARRAY_DEFAULT) : (flags)), NULL)\n\n#define PyArray_FROMANY(m, type, min, max, flags) \\n PyArray_FromAny(m, PyArray_DescrFromType(type), min, max, \\n (((flags) & NPY_ARRAY_ENSURECOPY) ? \\n (flags) | NPY_ARRAY_DEFAULT : (flags)), NULL)\n\n#define PyArray_ZEROS(m, dims, type, is_f_order) \\n PyArray_Zeros(m, dims, PyArray_DescrFromType(type), is_f_order)\n\n#define PyArray_EMPTY(m, dims, type, is_f_order) \\n PyArray_Empty(m, dims, PyArray_DescrFromType(type), is_f_order)\n\n#define PyArray_FILLWBYTE(obj, val) memset(PyArray_DATA(obj), val, \\n PyArray_NBYTES(obj))\n\n#define PyArray_ContiguousFromAny(op, type, min_depth, max_depth) \\n PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \\n max_depth, NPY_ARRAY_DEFAULT, NULL)\n\n#define PyArray_EquivArrTypes(a1, a2) \\n PyArray_EquivTypes(PyArray_DESCR(a1), PyArray_DESCR(a2))\n\n#define PyArray_EquivByteorders(b1, b2) \\n (((b1) == (b2)) || (PyArray_ISNBO(b1) == PyArray_ISNBO(b2)))\n\n#define PyArray_SimpleNew(nd, dims, typenum) \\n PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, NULL, 0, 0, NULL)\n\n#define PyArray_SimpleNewFromData(nd, dims, typenum, data) \\n PyArray_New(&PyArray_Type, nd, dims, typenum, NULL, \\n data, 0, NPY_ARRAY_CARRAY, NULL)\n\n#define PyArray_SimpleNewFromDescr(nd, dims, descr) \\n PyArray_NewFromDescr(&PyArray_Type, descr, nd, dims, \\n NULL, NULL, 0, NULL)\n\n#define PyArray_ToScalar(data, arr) \\n PyArray_Scalar(data, PyArray_DESCR(arr), (PyObject *)arr)\n\n\n/* These might be faster without the dereferencing of obj\n going on inside -- of course an optimizing compiler should\n inline the constants inside a for loop making it a moot point\n*/\n\n#define PyArray_GETPTR1(obj, i) ((void *)(PyArray_BYTES(obj) + \\n (i)*PyArray_STRIDES(obj)[0]))\n\n#define PyArray_GETPTR2(obj, i, j) ((void *)(PyArray_BYTES(obj) + \\n (i)*PyArray_STRIDES(obj)[0] + \\n (j)*PyArray_STRIDES(obj)[1]))\n\n#define PyArray_GETPTR3(obj, i, j, k) ((void *)(PyArray_BYTES(obj) + \\n (i)*PyArray_STRIDES(obj)[0] + \\n (j)*PyArray_STRIDES(obj)[1] + \\n (k)*PyArray_STRIDES(obj)[2]))\n\n#define PyArray_GETPTR4(obj, i, j, k, l) ((void *)(PyArray_BYTES(obj) + \\n (i)*PyArray_STRIDES(obj)[0] + \\n (j)*PyArray_STRIDES(obj)[1] + \\n (k)*PyArray_STRIDES(obj)[2] + \\n (l)*PyArray_STRIDES(obj)[3]))\n\nstatic inline void\nPyArray_DiscardWritebackIfCopy(PyArrayObject *arr)\n{\n PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;\n if (fa && fa->base) {\n if (fa->flags & NPY_ARRAY_WRITEBACKIFCOPY) {\n PyArray_ENABLEFLAGS((PyArrayObject*)fa->base, NPY_ARRAY_WRITEABLE);\n Py_DECREF(fa->base);\n fa->base = NULL;\n PyArray_CLEARFLAGS(arr, NPY_ARRAY_WRITEBACKIFCOPY);\n }\n }\n}\n\n#define PyArray_DESCR_REPLACE(descr) do { \\n PyArray_Descr *_new_; \\n _new_ = PyArray_DescrNew(descr); \\n Py_XDECREF(descr); \\n descr = _new_; \\n } while(0)\n\n/* Copy should always return contiguous array */\n#define PyArray_Copy(obj) PyArray_NewCopy(obj, NPY_CORDER)\n\n#define PyArray_FromObject(op, type, min_depth, max_depth) \\n PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \\n max_depth, NPY_ARRAY_BEHAVED | \\n NPY_ARRAY_ENSUREARRAY, NULL)\n\n#define PyArray_ContiguousFromObject(op, type, min_depth, max_depth) \\n PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \\n max_depth, NPY_ARRAY_DEFAULT | \\n NPY_ARRAY_ENSUREARRAY, NULL)\n\n#define PyArray_CopyFromObject(op, type, min_depth, max_depth) \\n PyArray_FromAny(op, PyArray_DescrFromType(type), min_depth, \\n max_depth, NPY_ARRAY_ENSURECOPY | \\n NPY_ARRAY_DEFAULT | \\n NPY_ARRAY_ENSUREARRAY, NULL)\n\n#define PyArray_Cast(mp, type_num) \\n PyArray_CastToType(mp, PyArray_DescrFromType(type_num), 0)\n\n#define PyArray_Take(ap, items, axis) \\n PyArray_TakeFrom(ap, items, axis, NULL, NPY_RAISE)\n\n#define PyArray_Put(ap, items, values) \\n PyArray_PutTo(ap, items, values, NPY_RAISE)\n\n\n/*\n Check to see if this key in the dictionary is the "title"\n entry of the tuple (i.e. a duplicate dictionary entry in the fields\n dict).\n*/\n\nstatic inline int\nNPY_TITLE_KEY_check(PyObject *key, PyObject *value)\n{\n PyObject *title;\n if (PyTuple_Size(value) != 3) {\n return 0;\n }\n title = PyTuple_GetItem(value, 2);\n if (key == title) {\n return 1;\n }\n#ifdef PYPY_VERSION\n /*\n * On PyPy, dictionary keys do not always preserve object identity.\n * Fall back to comparison by value.\n */\n if (PyUnicode_Check(title) && PyUnicode_Check(key)) {\n return PyUnicode_Compare(title, key) == 0 ? 1 : 0;\n }\n#endif\n return 0;\n}\n\n/* Macro, for backward compat with "if NPY_TITLE_KEY(key, value) { ..." */\n#define NPY_TITLE_KEY(key, value) (NPY_TITLE_KEY_check((key), (value)))\n\n#define DEPRECATE(msg) PyErr_WarnEx(PyExc_DeprecationWarning,msg,1)\n#define DEPRECATE_FUTUREWARNING(msg) PyErr_WarnEx(PyExc_FutureWarning,msg,1)\n\n\n/*\n * These macros and functions unfortunately require runtime version checks\n * that are only defined in `npy_2_compat.h`. For that reasons they cannot be\n * part of `ndarraytypes.h` which tries to be self contained.\n */\n\nstatic inline npy_intp\nPyArray_ITEMSIZE(const PyArrayObject *arr)\n{\n return PyDataType_ELSIZE(((PyArrayObject_fields *)arr)->descr);\n}\n\n#define PyDataType_HASFIELDS(obj) (PyDataType_ISLEGACY((PyArray_Descr*)(obj)) && PyDataType_NAMES((PyArray_Descr*)(obj)) != NULL)\n#define PyDataType_HASSUBARRAY(dtype) (PyDataType_ISLEGACY(dtype) && PyDataType_SUBARRAY(dtype) != NULL)\n#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \\n !PyDataType_HASFIELDS(dtype))\n\n#define PyDataType_FLAGCHK(dtype, flag) \\n ((PyDataType_FLAGS(dtype) & (flag)) == (flag))\n\n#define PyDataType_REFCHK(dtype) \\n PyDataType_FLAGCHK(dtype, NPY_ITEM_REFCOUNT)\n\n#define NPY_BEGIN_THREADS_DESCR(dtype) \\n do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \\n NPY_BEGIN_THREADS;} while (0);\n\n#define NPY_END_THREADS_DESCR(dtype) \\n do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \\n NPY_END_THREADS; } while (0);\n\n#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)\n/* The internal copy of this is now defined in `dtypemeta.h` */\n/*\n * `PyArray_Scalar` is the same as this function but converts will convert\n * most NumPy types to Python scalars.\n */\nstatic inline PyObject *\nPyArray_GETITEM(const PyArrayObject *arr, const char *itemptr)\n{\n return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->getitem(\n (void *)itemptr, (PyArrayObject *)arr);\n}\n\n/*\n * SETITEM should only be used if it is known that the value is a scalar\n * and of a type understood by the arrays dtype.\n * Use `PyArray_Pack` if the value may be of a different dtype.\n */\nstatic inline int\nPyArray_SETITEM(PyArrayObject *arr, char *itemptr, PyObject *v)\n{\n return PyDataType_GetArrFuncs(((PyArrayObject_fields *)arr)->descr)->setitem(v, itemptr, arr);\n}\n#endif /* not internal */\n\n\n#ifdef __cplusplus\n}\n#endif\n\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYOBJECT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\ndarrayobject.h
ndarrayobject.h
C
12,361
0.95
0.069079
0.444915
react-lib
652
2024-08-03T21:21:29.455074
MIT
false
415a16200e35b304555a9929aa60141c
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_\n\n#include "npy_common.h"\n#include "npy_endian.h"\n#include "npy_cpu.h"\n#include "utils.h"\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n#define NPY_NO_EXPORT NPY_VISIBILITY_HIDDEN\n\n/* Always allow threading unless it was explicitly disabled at build time */\n#if !NPY_NO_SMP\n #define NPY_ALLOW_THREADS 1\n#else\n #define NPY_ALLOW_THREADS 0\n#endif\n\n#ifndef __has_extension\n#define __has_extension(x) 0\n#endif\n\n/*\n * There are several places in the code where an array of dimensions\n * is allocated statically. This is the size of that static\n * allocation.\n *\n * The array creation itself could have arbitrary dimensions but all\n * the places where static allocation is used would need to be changed\n * to dynamic (including inside of several structures)\n *\n * As of NumPy 2.0, we strongly discourage the downstream use of NPY_MAXDIMS,\n * but since auditing everything seems a big ask, define it as 64.\n * A future version could:\n * - Increase or remove the limit and require recompilation (like 2.0 did)\n * - Deprecate or remove the macro but keep the limit (at basically any time)\n */\n#define NPY_MAXDIMS 64\n/* We cannot change this as it would break ABI: */\n#define NPY_MAXDIMS_LEGACY_ITERS 32\n/* NPY_MAXARGS is version dependent and defined in npy_2_compat.h */\n\n/* Used for Converter Functions "O&" code in ParseTuple */\n#define NPY_FAIL 0\n#define NPY_SUCCEED 1\n\n\nenum NPY_TYPES { NPY_BOOL=0,\n NPY_BYTE, NPY_UBYTE,\n NPY_SHORT, NPY_USHORT,\n NPY_INT, NPY_UINT,\n NPY_LONG, NPY_ULONG,\n NPY_LONGLONG, NPY_ULONGLONG,\n NPY_FLOAT, NPY_DOUBLE, NPY_LONGDOUBLE,\n NPY_CFLOAT, NPY_CDOUBLE, NPY_CLONGDOUBLE,\n NPY_OBJECT=17,\n NPY_STRING, NPY_UNICODE,\n NPY_VOID,\n /*\n * New 1.6 types appended, may be integrated\n * into the above in 2.0.\n */\n NPY_DATETIME, NPY_TIMEDELTA, NPY_HALF,\n\n NPY_CHAR, /* Deprecated, will raise if used */\n\n /* The number of *legacy* dtypes */\n NPY_NTYPES_LEGACY=24,\n\n /* assign a high value to avoid changing this in the\n future when new dtypes are added */\n NPY_NOTYPE=25,\n\n NPY_USERDEF=256, /* leave room for characters */\n\n /* The number of types not including the new 1.6 types */\n NPY_NTYPES_ABI_COMPATIBLE=21,\n\n /*\n * New DTypes which do not share the legacy layout\n * (added after NumPy 2.0). VSTRING is the first of these\n * we may open up a block for user-defined dtypes in the\n * future.\n */\n NPY_VSTRING=2056,\n};\n\n\n/* basetype array priority */\n#define NPY_PRIORITY 0.0\n\n/* default subtype priority */\n#define NPY_SUBTYPE_PRIORITY 1.0\n\n/* default scalar priority */\n#define NPY_SCALAR_PRIORITY -1000000.0\n\n/* How many floating point types are there (excluding half) */\n#define NPY_NUM_FLOATTYPE 3\n\n/*\n * These characters correspond to the array type and the struct\n * module\n */\n\nenum NPY_TYPECHAR {\n NPY_BOOLLTR = '?',\n NPY_BYTELTR = 'b',\n NPY_UBYTELTR = 'B',\n NPY_SHORTLTR = 'h',\n NPY_USHORTLTR = 'H',\n NPY_INTLTR = 'i',\n NPY_UINTLTR = 'I',\n NPY_LONGLTR = 'l',\n NPY_ULONGLTR = 'L',\n NPY_LONGLONGLTR = 'q',\n NPY_ULONGLONGLTR = 'Q',\n NPY_HALFLTR = 'e',\n NPY_FLOATLTR = 'f',\n NPY_DOUBLELTR = 'd',\n NPY_LONGDOUBLELTR = 'g',\n NPY_CFLOATLTR = 'F',\n NPY_CDOUBLELTR = 'D',\n NPY_CLONGDOUBLELTR = 'G',\n NPY_OBJECTLTR = 'O',\n NPY_STRINGLTR = 'S',\n NPY_DEPRECATED_STRINGLTR2 = 'a',\n NPY_UNICODELTR = 'U',\n NPY_VOIDLTR = 'V',\n NPY_DATETIMELTR = 'M',\n NPY_TIMEDELTALTR = 'm',\n NPY_CHARLTR = 'c',\n\n /*\n * New non-legacy DTypes\n */\n NPY_VSTRINGLTR = 'T',\n\n /*\n * Note, we removed `NPY_INTPLTR` due to changing its definition\n * to 'n', rather than 'p'. On any typical platform this is the\n * same integer. 'n' should be used for the `np.intp` with the same\n * size as `size_t` while 'p' remains pointer sized.\n *\n * 'p', 'P', 'n', and 'N' are valid and defined explicitly\n * in `arraytypes.c.src`.\n */\n\n /*\n * These are for dtype 'kinds', not dtype 'typecodes'\n * as the above are for.\n */\n NPY_GENBOOLLTR ='b',\n NPY_SIGNEDLTR = 'i',\n NPY_UNSIGNEDLTR = 'u',\n NPY_FLOATINGLTR = 'f',\n NPY_COMPLEXLTR = 'c',\n\n};\n\n/*\n * Changing this may break Numpy API compatibility\n * due to changing offsets in PyArray_ArrFuncs, so be\n * careful. Here we have reused the mergesort slot for\n * any kind of stable sort, the actual implementation will\n * depend on the data type.\n */\ntypedef enum {\n _NPY_SORT_UNDEFINED=-1,\n NPY_QUICKSORT=0,\n NPY_HEAPSORT=1,\n NPY_MERGESORT=2,\n NPY_STABLESORT=2,\n} NPY_SORTKIND;\n#define NPY_NSORTS (NPY_STABLESORT + 1)\n\n\ntypedef enum {\n NPY_INTROSELECT=0\n} NPY_SELECTKIND;\n#define NPY_NSELECTS (NPY_INTROSELECT + 1)\n\n\ntypedef enum {\n NPY_SEARCHLEFT=0,\n NPY_SEARCHRIGHT=1\n} NPY_SEARCHSIDE;\n#define NPY_NSEARCHSIDES (NPY_SEARCHRIGHT + 1)\n\n\ntypedef enum {\n NPY_NOSCALAR=-1,\n NPY_BOOL_SCALAR,\n NPY_INTPOS_SCALAR,\n NPY_INTNEG_SCALAR,\n NPY_FLOAT_SCALAR,\n NPY_COMPLEX_SCALAR,\n NPY_OBJECT_SCALAR\n} NPY_SCALARKIND;\n#define NPY_NSCALARKINDS (NPY_OBJECT_SCALAR + 1)\n\n/* For specifying array memory layout or iteration order */\ntypedef enum {\n /* Fortran order if inputs are all Fortran, C otherwise */\n NPY_ANYORDER=-1,\n /* C order */\n NPY_CORDER=0,\n /* Fortran order */\n NPY_FORTRANORDER=1,\n /* An order as close to the inputs as possible */\n NPY_KEEPORDER=2\n} NPY_ORDER;\n\n/* For specifying allowed casting in operations which support it */\ntypedef enum {\n _NPY_ERROR_OCCURRED_IN_CAST = -1,\n /* Only allow identical types */\n NPY_NO_CASTING=0,\n /* Allow identical and byte swapped types */\n NPY_EQUIV_CASTING=1,\n /* Only allow safe casts */\n NPY_SAFE_CASTING=2,\n /* Allow safe casts or casts within the same kind */\n NPY_SAME_KIND_CASTING=3,\n /* Allow any casts */\n NPY_UNSAFE_CASTING=4,\n} NPY_CASTING;\n\ntypedef enum {\n NPY_CLIP=0,\n NPY_WRAP=1,\n NPY_RAISE=2\n} NPY_CLIPMODE;\n\ntypedef enum {\n NPY_VALID=0,\n NPY_SAME=1,\n NPY_FULL=2\n} NPY_CORRELATEMODE;\n\n/* The special not-a-time (NaT) value */\n#define NPY_DATETIME_NAT NPY_MIN_INT64\n\n/*\n * Upper bound on the length of a DATETIME ISO 8601 string\n * YEAR: 21 (64-bit year)\n * MONTH: 3\n * DAY: 3\n * HOURS: 3\n * MINUTES: 3\n * SECONDS: 3\n * ATTOSECONDS: 1 + 3*6\n * TIMEZONE: 5\n * NULL TERMINATOR: 1\n */\n#define NPY_DATETIME_MAX_ISO8601_STRLEN (21 + 3*5 + 1 + 3*6 + 6 + 1)\n\n/* The FR in the unit names stands for frequency */\ntypedef enum {\n /* Force signed enum type, must be -1 for code compatibility */\n NPY_FR_ERROR = -1, /* error or undetermined */\n\n /* Start of valid units */\n NPY_FR_Y = 0, /* Years */\n NPY_FR_M = 1, /* Months */\n NPY_FR_W = 2, /* Weeks */\n /* Gap where 1.6 NPY_FR_B (value 3) was */\n NPY_FR_D = 4, /* Days */\n NPY_FR_h = 5, /* hours */\n NPY_FR_m = 6, /* minutes */\n NPY_FR_s = 7, /* seconds */\n NPY_FR_ms = 8, /* milliseconds */\n NPY_FR_us = 9, /* microseconds */\n NPY_FR_ns = 10, /* nanoseconds */\n NPY_FR_ps = 11, /* picoseconds */\n NPY_FR_fs = 12, /* femtoseconds */\n NPY_FR_as = 13, /* attoseconds */\n NPY_FR_GENERIC = 14 /* unbound units, can convert to anything */\n} NPY_DATETIMEUNIT;\n\n/*\n * NOTE: With the NPY_FR_B gap for 1.6 ABI compatibility, NPY_DATETIME_NUMUNITS\n * is technically one more than the actual number of units.\n */\n#define NPY_DATETIME_NUMUNITS (NPY_FR_GENERIC + 1)\n#define NPY_DATETIME_DEFAULTUNIT NPY_FR_GENERIC\n\n/*\n * Business day conventions for mapping invalid business\n * days to valid business days.\n */\ntypedef enum {\n /* Go forward in time to the following business day. */\n NPY_BUSDAY_FORWARD,\n NPY_BUSDAY_FOLLOWING = NPY_BUSDAY_FORWARD,\n /* Go backward in time to the preceding business day. */\n NPY_BUSDAY_BACKWARD,\n NPY_BUSDAY_PRECEDING = NPY_BUSDAY_BACKWARD,\n /*\n * Go forward in time to the following business day, unless it\n * crosses a month boundary, in which case go backward\n */\n NPY_BUSDAY_MODIFIEDFOLLOWING,\n /*\n * Go backward in time to the preceding business day, unless it\n * crosses a month boundary, in which case go forward.\n */\n NPY_BUSDAY_MODIFIEDPRECEDING,\n /* Produce a NaT for non-business days. */\n NPY_BUSDAY_NAT,\n /* Raise an exception for non-business days. */\n NPY_BUSDAY_RAISE\n} NPY_BUSDAY_ROLL;\n\n\n/************************************************************\n * NumPy Auxiliary Data for inner loops, sort functions, etc.\n ************************************************************/\n\n/*\n * When creating an auxiliary data struct, this should always appear\n * as the first member, like this:\n *\n * typedef struct {\n * NpyAuxData base;\n * double constant;\n * } constant_multiplier_aux_data;\n */\ntypedef struct NpyAuxData_tag NpyAuxData;\n\n/* Function pointers for freeing or cloning auxiliary data */\ntypedef void (NpyAuxData_FreeFunc) (NpyAuxData *);\ntypedef NpyAuxData *(NpyAuxData_CloneFunc) (NpyAuxData *);\n\nstruct NpyAuxData_tag {\n NpyAuxData_FreeFunc *free;\n NpyAuxData_CloneFunc *clone;\n /* To allow for a bit of expansion without breaking the ABI */\n void *reserved[2];\n};\n\n/* Macros to use for freeing and cloning auxiliary data */\n#define NPY_AUXDATA_FREE(auxdata) \\n do { \\n if ((auxdata) != NULL) { \\n (auxdata)->free(auxdata); \\n } \\n } while(0)\n#define NPY_AUXDATA_CLONE(auxdata) \\n ((auxdata)->clone(auxdata))\n\n#define NPY_ERR(str) fprintf(stderr, #str); fflush(stderr);\n#define NPY_ERR2(str) fprintf(stderr, str); fflush(stderr);\n\n/*\n* Macros to define how array, and dimension/strides data is\n* allocated. These should be made private\n*/\n\n#define NPY_USE_PYMEM 1\n\n\n#if NPY_USE_PYMEM == 1\n/* use the Raw versions which are safe to call with the GIL released */\n#define PyArray_malloc PyMem_RawMalloc\n#define PyArray_free PyMem_RawFree\n#define PyArray_realloc PyMem_RawRealloc\n#else\n#define PyArray_malloc malloc\n#define PyArray_free free\n#define PyArray_realloc realloc\n#endif\n\n/* Dimensions and strides */\n#define PyDimMem_NEW(size) \\n ((npy_intp *)PyArray_malloc(size*sizeof(npy_intp)))\n\n#define PyDimMem_FREE(ptr) PyArray_free(ptr)\n\n#define PyDimMem_RENEW(ptr,size) \\n ((npy_intp *)PyArray_realloc(ptr,size*sizeof(npy_intp)))\n\n/* forward declaration */\nstruct _PyArray_Descr;\n\n/* These must deal with unaligned and swapped data if necessary */\ntypedef PyObject * (PyArray_GetItemFunc) (void *, void *);\ntypedef int (PyArray_SetItemFunc)(PyObject *, void *, void *);\n\ntypedef void (PyArray_CopySwapNFunc)(void *, npy_intp, void *, npy_intp,\n npy_intp, int, void *);\n\ntypedef void (PyArray_CopySwapFunc)(void *, void *, int, void *);\ntypedef npy_bool (PyArray_NonzeroFunc)(void *, void *);\n\n\n/*\n * These assume aligned and notswapped data -- a buffer will be used\n * before or contiguous data will be obtained\n */\n\ntypedef int (PyArray_CompareFunc)(const void *, const void *, void *);\ntypedef int (PyArray_ArgFunc)(void*, npy_intp, npy_intp*, void *);\n\ntypedef void (PyArray_DotFunc)(void *, npy_intp, void *, npy_intp, void *,\n npy_intp, void *);\n\ntypedef void (PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *,\n void *);\n\n/*\n * XXX the ignore argument should be removed next time the API version\n * is bumped. It used to be the separator.\n */\ntypedef int (PyArray_ScanFunc)(FILE *fp, void *dptr,\n char *ignore, struct _PyArray_Descr *);\ntypedef int (PyArray_FromStrFunc)(char *s, void *dptr, char **endptr,\n struct _PyArray_Descr *);\n\ntypedef int (PyArray_FillFunc)(void *, npy_intp, void *);\n\ntypedef int (PyArray_SortFunc)(void *, npy_intp, void *);\ntypedef int (PyArray_ArgSortFunc)(void *, npy_intp *, npy_intp, void *);\n\ntypedef int (PyArray_FillWithScalarFunc)(void *, npy_intp, void *, void *);\n\ntypedef int (PyArray_ScalarKindFunc)(void *);\n\ntypedef struct {\n npy_intp *ptr;\n int len;\n} PyArray_Dims;\n\ntypedef struct {\n /*\n * Functions to cast to most other standard types\n * Can have some NULL entries. The types\n * DATETIME, TIMEDELTA, and HALF go into the castdict\n * even though they are built-in.\n */\n PyArray_VectorUnaryFunc *cast[NPY_NTYPES_ABI_COMPATIBLE];\n\n /* The next four functions *cannot* be NULL */\n\n /*\n * Functions to get and set items with standard Python types\n * -- not array scalars\n */\n PyArray_GetItemFunc *getitem;\n PyArray_SetItemFunc *setitem;\n\n /*\n * Copy and/or swap data. Memory areas may not overlap\n * Use memmove first if they might\n */\n PyArray_CopySwapNFunc *copyswapn;\n PyArray_CopySwapFunc *copyswap;\n\n /*\n * Function to compare items\n * Can be NULL\n */\n PyArray_CompareFunc *compare;\n\n /*\n * Function to select largest\n * Can be NULL\n */\n PyArray_ArgFunc *argmax;\n\n /*\n * Function to compute dot product\n * Can be NULL\n */\n PyArray_DotFunc *dotfunc;\n\n /*\n * Function to scan an ASCII file and\n * place a single value plus possible separator\n * Can be NULL\n */\n PyArray_ScanFunc *scanfunc;\n\n /*\n * Function to read a single value from a string\n * and adjust the pointer; Can be NULL\n */\n PyArray_FromStrFunc *fromstr;\n\n /*\n * Function to determine if data is zero or not\n * If NULL a default version is\n * used at Registration time.\n */\n PyArray_NonzeroFunc *nonzero;\n\n /*\n * Used for arange. Should return 0 on success\n * and -1 on failure.\n * Can be NULL.\n */\n PyArray_FillFunc *fill;\n\n /*\n * Function to fill arrays with scalar values\n * Can be NULL\n */\n PyArray_FillWithScalarFunc *fillwithscalar;\n\n /*\n * Sorting functions\n * Can be NULL\n */\n PyArray_SortFunc *sort[NPY_NSORTS];\n PyArray_ArgSortFunc *argsort[NPY_NSORTS];\n\n /*\n * Dictionary of additional casting functions\n * PyArray_VectorUnaryFuncs\n * which can be populated to support casting\n * to other registered types. Can be NULL\n */\n PyObject *castdict;\n\n /*\n * Functions useful for generalizing\n * the casting rules.\n * Can be NULL;\n */\n PyArray_ScalarKindFunc *scalarkind;\n int **cancastscalarkindto;\n int *cancastto;\n\n void *_unused1;\n void *_unused2;\n void *_unused3;\n\n /*\n * Function to select smallest\n * Can be NULL\n */\n PyArray_ArgFunc *argmin;\n\n} PyArray_ArrFuncs;\n\n\n/* The item must be reference counted when it is inserted or extracted. */\n#define NPY_ITEM_REFCOUNT 0x01\n/* Same as needing REFCOUNT */\n#define NPY_ITEM_HASOBJECT 0x01\n/* Convert to list for pickling */\n#define NPY_LIST_PICKLE 0x02\n/* The item is a POINTER */\n#define NPY_ITEM_IS_POINTER 0x04\n/* memory needs to be initialized for this data-type */\n#define NPY_NEEDS_INIT 0x08\n/* operations need Python C-API so don't give-up thread. */\n#define NPY_NEEDS_PYAPI 0x10\n/* Use f.getitem when extracting elements of this data-type */\n#define NPY_USE_GETITEM 0x20\n/* Use f.setitem when setting creating 0-d array from this data-type.*/\n#define NPY_USE_SETITEM 0x40\n/* A sticky flag specifically for structured arrays */\n#define NPY_ALIGNED_STRUCT 0x80\n\n/*\n *These are inherited for global data-type if any data-types in the\n * field have them\n */\n#define NPY_FROM_FIELDS (NPY_NEEDS_INIT | NPY_LIST_PICKLE | \\n NPY_ITEM_REFCOUNT | NPY_NEEDS_PYAPI)\n\n#define NPY_OBJECT_DTYPE_FLAGS (NPY_LIST_PICKLE | NPY_USE_GETITEM | \\n NPY_ITEM_IS_POINTER | NPY_ITEM_REFCOUNT | \\n NPY_NEEDS_INIT | NPY_NEEDS_PYAPI)\n\n#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n/*\n * Public version of the Descriptor struct as of 2.x\n */\ntypedef struct _PyArray_Descr {\n PyObject_HEAD\n /*\n * the type object representing an\n * instance of this type -- should not\n * be two type_numbers with the same type\n * object.\n */\n PyTypeObject *typeobj;\n /* kind for this type */\n char kind;\n /* unique-character representing this type */\n char type;\n /*\n * '>' (big), '<' (little), '|'\n * (not-applicable), or '=' (native).\n */\n char byteorder;\n /* Former flags flags space (unused) to ensure type_num is stable. */\n char _former_flags;\n /* number representing this type */\n int type_num;\n /* Space for dtype instance specific flags. */\n npy_uint64 flags;\n /* element size (itemsize) for this type */\n npy_intp elsize;\n /* alignment needed for this type */\n npy_intp alignment;\n /* metadata dict or NULL */\n PyObject *metadata;\n /* Cached hash value (-1 if not yet computed). */\n npy_hash_t hash;\n /* Unused slot (must be initialized to NULL) for future use */\n void *reserved_null[2];\n} PyArray_Descr;\n\n#else /* 1.x and 2.x compatible version (only shared fields): */\n\ntypedef struct _PyArray_Descr {\n PyObject_HEAD\n PyTypeObject *typeobj;\n char kind;\n char type;\n char byteorder;\n char _former_flags;\n int type_num;\n} PyArray_Descr;\n\n/* To access modified fields, define the full 2.0 struct: */\ntypedef struct {\n PyObject_HEAD\n PyTypeObject *typeobj;\n char kind;\n char type;\n char byteorder;\n char _former_flags;\n int type_num;\n npy_uint64 flags;\n npy_intp elsize;\n npy_intp alignment;\n PyObject *metadata;\n npy_hash_t hash;\n void *reserved_null[2];\n} _PyArray_DescrNumPy2;\n\n#endif /* 1.x and 2.x compatible version */\n\n/*\n * Semi-private struct with additional field of legacy descriptors (must\n * check NPY_DT_is_legacy before casting/accessing). The struct is also not\n * valid when running on 1.x (i.e. in public API use).\n */\ntypedef struct {\n PyObject_HEAD\n PyTypeObject *typeobj;\n char kind;\n char type;\n char byteorder;\n char _former_flags;\n int type_num;\n npy_uint64 flags;\n npy_intp elsize;\n npy_intp alignment;\n PyObject *metadata;\n npy_hash_t hash;\n void *reserved_null[2];\n struct _arr_descr *subarray;\n PyObject *fields;\n PyObject *names;\n NpyAuxData *c_metadata;\n} _PyArray_LegacyDescr;\n\n\n/*\n * Umodified PyArray_Descr struct identical to NumPy 1.x. This struct is\n * used as a prototype for registering a new legacy DType.\n * It is also used to access the fields in user code running on 1.x.\n */\ntypedef struct {\n PyObject_HEAD\n PyTypeObject *typeobj;\n char kind;\n char type;\n char byteorder;\n char flags;\n int type_num;\n int elsize;\n int alignment;\n struct _arr_descr *subarray;\n PyObject *fields;\n PyObject *names;\n PyArray_ArrFuncs *f;\n PyObject *metadata;\n NpyAuxData *c_metadata;\n npy_hash_t hash;\n} PyArray_DescrProto;\n\n\ntypedef struct _arr_descr {\n PyArray_Descr *base;\n PyObject *shape; /* a tuple */\n} PyArray_ArrayDescr;\n\n/*\n * Memory handler structure for array data.\n */\n/* The declaration of free differs from PyMemAllocatorEx */\ntypedef struct {\n void *ctx;\n void* (*malloc) (void *ctx, size_t size);\n void* (*calloc) (void *ctx, size_t nelem, size_t elsize);\n void* (*realloc) (void *ctx, void *ptr, size_t new_size);\n void (*free) (void *ctx, void *ptr, size_t size);\n /*\n * This is the end of the version=1 struct. Only add new fields after\n * this line\n */\n} PyDataMemAllocator;\n\ntypedef struct {\n char name[127]; /* multiple of 64 to keep the struct aligned */\n uint8_t version; /* currently 1 */\n PyDataMemAllocator allocator;\n} PyDataMem_Handler;\n\n\n/*\n * The main array object structure.\n *\n * It has been recommended to use the inline functions defined below\n * (PyArray_DATA and friends) to access fields here for a number of\n * releases. Direct access to the members themselves is deprecated.\n * To ensure that your code does not use deprecated access,\n * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n * (or NPY_1_8_API_VERSION or higher as required).\n */\n/* This struct will be moved to a private header in a future release */\ntypedef struct tagPyArrayObject_fields {\n PyObject_HEAD\n /* Pointer to the raw data buffer */\n char *data;\n /* The number of dimensions, also called 'ndim' */\n int nd;\n /* The size in each dimension, also called 'shape' */\n npy_intp *dimensions;\n /*\n * Number of bytes to jump to get to the\n * next element in each dimension\n */\n npy_intp *strides;\n /*\n * This object is decref'd upon\n * deletion of array. Except in the\n * case of WRITEBACKIFCOPY which has\n * special handling.\n *\n * For views it points to the original\n * array, collapsed so no chains of\n * views occur.\n *\n * For creation from buffer object it\n * points to an object that should be\n * decref'd on deletion\n *\n * For WRITEBACKIFCOPY flag this is an\n * array to-be-updated upon calling\n * PyArray_ResolveWritebackIfCopy\n */\n PyObject *base;\n /* Pointer to type structure */\n PyArray_Descr *descr;\n /* Flags describing array -- see below */\n int flags;\n /* For weak references */\n PyObject *weakreflist;\n#if NPY_FEATURE_VERSION >= NPY_1_20_API_VERSION\n void *_buffer_info; /* private buffer info, tagged to allow warning */\n#endif\n /*\n * For malloc/calloc/realloc/free per object\n */\n#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION\n PyObject *mem_handler;\n#endif\n} PyArrayObject_fields;\n\n/*\n * To hide the implementation details, we only expose\n * the Python struct HEAD.\n */\n#if !defined(NPY_NO_DEPRECATED_API) || \\n (NPY_NO_DEPRECATED_API < NPY_1_7_API_VERSION)\n/*\n * Can't put this in npy_deprecated_api.h like the others.\n * PyArrayObject field access is deprecated as of NumPy 1.7.\n */\ntypedef PyArrayObject_fields PyArrayObject;\n#else\ntypedef struct tagPyArrayObject {\n PyObject_HEAD\n} PyArrayObject;\n#endif\n\n/*\n * Removed 2020-Nov-25, NumPy 1.20\n * #define NPY_SIZEOF_PYARRAYOBJECT (sizeof(PyArrayObject_fields))\n *\n * The above macro was removed as it gave a false sense of a stable ABI\n * with respect to the structures size. If you require a runtime constant,\n * you can use `PyArray_Type.tp_basicsize` instead. Otherwise, please\n * see the PyArrayObject documentation or ask the NumPy developers for\n * information on how to correctly replace the macro in a way that is\n * compatible with multiple NumPy versions.\n */\n\n/* Mirrors buffer object to ptr */\n\ntypedef struct {\n PyObject_HEAD\n PyObject *base;\n void *ptr;\n npy_intp len;\n int flags;\n} PyArray_Chunk;\n\ntypedef struct {\n NPY_DATETIMEUNIT base;\n int num;\n} PyArray_DatetimeMetaData;\n\ntypedef struct {\n NpyAuxData base;\n PyArray_DatetimeMetaData meta;\n} PyArray_DatetimeDTypeMetaData;\n\n/*\n * This structure contains an exploded view of a date-time value.\n * NaT is represented by year == NPY_DATETIME_NAT.\n */\ntypedef struct {\n npy_int64 year;\n npy_int32 month, day, hour, min, sec, us, ps, as;\n} npy_datetimestruct;\n\n/* This structure contains an exploded view of a timedelta value */\ntypedef struct {\n npy_int64 day;\n npy_int32 sec, us, ps, as;\n} npy_timedeltastruct;\n\ntypedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);\n\n/*\n * Means c-style contiguous (last index varies the fastest). The data\n * elements right after each other.\n *\n * This flag may be requested in constructor functions.\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_C_CONTIGUOUS 0x0001\n\n/*\n * Set if array is a contiguous Fortran array: the first index varies\n * the fastest in memory (strides array is reverse of C-contiguous\n * array)\n *\n * This flag may be requested in constructor functions.\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_F_CONTIGUOUS 0x0002\n\n/*\n * Note: all 0-d arrays are C_CONTIGUOUS and F_CONTIGUOUS. If a\n * 1-d array is C_CONTIGUOUS it is also F_CONTIGUOUS. Arrays with\n * more then one dimension can be C_CONTIGUOUS and F_CONTIGUOUS\n * at the same time if they have either zero or one element.\n * A higher dimensional array always has the same contiguity flags as\n * `array.squeeze()`; dimensions with `array.shape[dimension] == 1` are\n * effectively ignored when checking for contiguity.\n */\n\n/*\n * If set, the array owns the data: it will be free'd when the array\n * is deleted.\n *\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_OWNDATA 0x0004\n\n/*\n * An array never has the next four set; they're only used as parameter\n * flags to the various FromAny functions\n *\n * This flag may be requested in constructor functions.\n */\n\n/* Cause a cast to occur regardless of whether or not it is safe. */\n#define NPY_ARRAY_FORCECAST 0x0010\n\n/*\n * Always copy the array. Returned arrays are always CONTIGUOUS,\n * ALIGNED, and WRITEABLE. See also: NPY_ARRAY_ENSURENOCOPY = 0x4000.\n *\n * This flag may be requested in constructor functions.\n */\n#define NPY_ARRAY_ENSURECOPY 0x0020\n\n/*\n * Make sure the returned array is a base-class ndarray\n *\n * This flag may be requested in constructor functions.\n */\n#define NPY_ARRAY_ENSUREARRAY 0x0040\n\n/*\n * Make sure that the strides are in units of the element size Needed\n * for some operations with record-arrays.\n *\n * This flag may be requested in constructor functions.\n */\n#define NPY_ARRAY_ELEMENTSTRIDES 0x0080\n\n/*\n * Array data is aligned on the appropriate memory address for the type\n * stored according to how the compiler would align things (e.g., an\n * array of integers (4 bytes each) starts on a memory address that's\n * a multiple of 4)\n *\n * This flag may be requested in constructor functions.\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_ALIGNED 0x0100\n\n/*\n * Array data has the native endianness\n *\n * This flag may be requested in constructor functions.\n */\n#define NPY_ARRAY_NOTSWAPPED 0x0200\n\n/*\n * Array data is writeable\n *\n * This flag may be requested in constructor functions.\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_WRITEABLE 0x0400\n\n/*\n * If this flag is set, then base contains a pointer to an array of\n * the same size that should be updated with the current contents of\n * this array when PyArray_ResolveWritebackIfCopy is called.\n *\n * This flag may be requested in constructor functions.\n * This flag may be tested for in PyArray_FLAGS(arr).\n */\n#define NPY_ARRAY_WRITEBACKIFCOPY 0x2000\n\n/*\n * No copy may be made while converting from an object/array (result is a view)\n *\n * This flag may be requested in constructor functions.\n */\n#define NPY_ARRAY_ENSURENOCOPY 0x4000\n\n/*\n * NOTE: there are also internal flags defined in multiarray/arrayobject.h,\n * which start at bit 31 and work down.\n */\n\n#define NPY_ARRAY_BEHAVED (NPY_ARRAY_ALIGNED | \\n NPY_ARRAY_WRITEABLE)\n#define NPY_ARRAY_BEHAVED_NS (NPY_ARRAY_ALIGNED | \\n NPY_ARRAY_WRITEABLE | \\n NPY_ARRAY_NOTSWAPPED)\n#define NPY_ARRAY_CARRAY (NPY_ARRAY_C_CONTIGUOUS | \\n NPY_ARRAY_BEHAVED)\n#define NPY_ARRAY_CARRAY_RO (NPY_ARRAY_C_CONTIGUOUS | \\n NPY_ARRAY_ALIGNED)\n#define NPY_ARRAY_FARRAY (NPY_ARRAY_F_CONTIGUOUS | \\n NPY_ARRAY_BEHAVED)\n#define NPY_ARRAY_FARRAY_RO (NPY_ARRAY_F_CONTIGUOUS | \\n NPY_ARRAY_ALIGNED)\n#define NPY_ARRAY_DEFAULT (NPY_ARRAY_CARRAY)\n#define NPY_ARRAY_IN_ARRAY (NPY_ARRAY_CARRAY_RO)\n#define NPY_ARRAY_OUT_ARRAY (NPY_ARRAY_CARRAY)\n#define NPY_ARRAY_INOUT_ARRAY (NPY_ARRAY_CARRAY)\n#define NPY_ARRAY_INOUT_ARRAY2 (NPY_ARRAY_CARRAY | \\n NPY_ARRAY_WRITEBACKIFCOPY)\n#define NPY_ARRAY_IN_FARRAY (NPY_ARRAY_FARRAY_RO)\n#define NPY_ARRAY_OUT_FARRAY (NPY_ARRAY_FARRAY)\n#define NPY_ARRAY_INOUT_FARRAY (NPY_ARRAY_FARRAY)\n#define NPY_ARRAY_INOUT_FARRAY2 (NPY_ARRAY_FARRAY | \\n NPY_ARRAY_WRITEBACKIFCOPY)\n\n#define NPY_ARRAY_UPDATE_ALL (NPY_ARRAY_C_CONTIGUOUS | \\n NPY_ARRAY_F_CONTIGUOUS | \\n NPY_ARRAY_ALIGNED)\n\n/* This flag is for the array interface, not PyArrayObject */\n#define NPY_ARR_HAS_DESCR 0x0800\n\n\n\n\n/*\n * Size of internal buffers used for alignment Make BUFSIZE a multiple\n * of sizeof(npy_cdouble) -- usually 16 so that ufunc buffers are aligned\n */\n#define NPY_MIN_BUFSIZE ((int)sizeof(npy_cdouble))\n#define NPY_MAX_BUFSIZE (((int)sizeof(npy_cdouble))*1000000)\n#define NPY_BUFSIZE 8192\n/* buffer stress test size: */\n/*#define NPY_BUFSIZE 17*/\n\n/*\n * C API: consists of Macros and functions. The MACROS are defined\n * here.\n */\n\n\n#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)\n#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE)\n#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED)\n\n#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)\n#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS)\n\n/* the variable is used in some places, so always define it */\n#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;\n#if NPY_ALLOW_THREADS\n#define NPY_BEGIN_ALLOW_THREADS Py_BEGIN_ALLOW_THREADS\n#define NPY_END_ALLOW_THREADS Py_END_ALLOW_THREADS\n#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);\n#define NPY_END_THREADS do { if (_save) \\n { PyEval_RestoreThread(_save); _save = NULL;} } while (0);\n#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \\n { _save = PyEval_SaveThread();} } while (0);\n\n\n#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__;\n#define NPY_ALLOW_C_API do {__save__ = PyGILState_Ensure();} while (0);\n#define NPY_DISABLE_C_API do {PyGILState_Release(__save__);} while (0);\n#else\n#define NPY_BEGIN_ALLOW_THREADS\n#define NPY_END_ALLOW_THREADS\n#define NPY_BEGIN_THREADS\n#define NPY_END_THREADS\n#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size)\n#define NPY_BEGIN_THREADS_DESCR(dtype)\n#define NPY_END_THREADS_DESCR(dtype)\n#define NPY_ALLOW_C_API_DEF\n#define NPY_ALLOW_C_API\n#define NPY_DISABLE_C_API\n#endif\n\n/**********************************\n * The nditer object, added in 1.6\n **********************************/\n\n/* The actual structure of the iterator is an internal detail */\ntypedef struct NpyIter_InternalOnly NpyIter;\n\n/* Iterator function pointers that may be specialized */\ntypedef int (NpyIter_IterNextFunc)(NpyIter *iter);\ntypedef void (NpyIter_GetMultiIndexFunc)(NpyIter *iter,\n npy_intp *outcoords);\n\n/*** Global flags that may be passed to the iterator constructors ***/\n\n/* Track an index representing C order */\n#define NPY_ITER_C_INDEX 0x00000001\n/* Track an index representing Fortran order */\n#define NPY_ITER_F_INDEX 0x00000002\n/* Track a multi-index */\n#define NPY_ITER_MULTI_INDEX 0x00000004\n/* User code external to the iterator does the 1-dimensional innermost loop */\n#define NPY_ITER_EXTERNAL_LOOP 0x00000008\n/* Convert all the operands to a common data type */\n#define NPY_ITER_COMMON_DTYPE 0x00000010\n/* Operands may hold references, requiring API access during iteration */\n#define NPY_ITER_REFS_OK 0x00000020\n/* Zero-sized operands should be permitted, iteration checks IterSize for 0 */\n#define NPY_ITER_ZEROSIZE_OK 0x00000040\n/* Permits reductions (size-0 stride with dimension size > 1) */\n#define NPY_ITER_REDUCE_OK 0x00000080\n/* Enables sub-range iteration */\n#define NPY_ITER_RANGED 0x00000100\n/* Enables buffering */\n#define NPY_ITER_BUFFERED 0x00000200\n/* When buffering is enabled, grows the inner loop if possible */\n#define NPY_ITER_GROWINNER 0x00000400\n/* Delay allocation of buffers until first Reset* call */\n#define NPY_ITER_DELAY_BUFALLOC 0x00000800\n/* When NPY_KEEPORDER is specified, disable reversing negative-stride axes */\n#define NPY_ITER_DONT_NEGATE_STRIDES 0x00001000\n/*\n * If output operands overlap with other operands (based on heuristics that\n * has false positives but no false negatives), make temporary copies to\n * eliminate overlap.\n */\n#define NPY_ITER_COPY_IF_OVERLAP 0x00002000\n\n/*** Per-operand flags that may be passed to the iterator constructors ***/\n\n/* The operand will be read from and written to */\n#define NPY_ITER_READWRITE 0x00010000\n/* The operand will only be read from */\n#define NPY_ITER_READONLY 0x00020000\n/* The operand will only be written to */\n#define NPY_ITER_WRITEONLY 0x00040000\n/* The operand's data must be in native byte order */\n#define NPY_ITER_NBO 0x00080000\n/* The operand's data must be aligned */\n#define NPY_ITER_ALIGNED 0x00100000\n/* The operand's data must be contiguous (within the inner loop) */\n#define NPY_ITER_CONTIG 0x00200000\n/* The operand may be copied to satisfy requirements */\n#define NPY_ITER_COPY 0x00400000\n/* The operand may be copied with WRITEBACKIFCOPY to satisfy requirements */\n#define NPY_ITER_UPDATEIFCOPY 0x00800000\n/* Allocate the operand if it is NULL */\n#define NPY_ITER_ALLOCATE 0x01000000\n/* If an operand is allocated, don't use any subtype */\n#define NPY_ITER_NO_SUBTYPE 0x02000000\n/* This is a virtual array slot, operand is NULL but temporary data is there */\n#define NPY_ITER_VIRTUAL 0x04000000\n/* Require that the dimension match the iterator dimensions exactly */\n#define NPY_ITER_NO_BROADCAST 0x08000000\n/* A mask is being used on this array, affects buffer -> array copy */\n#define NPY_ITER_WRITEMASKED 0x10000000\n/* This array is the mask for all WRITEMASKED operands */\n#define NPY_ITER_ARRAYMASK 0x20000000\n/* Assume iterator order data access for COPY_IF_OVERLAP */\n#define NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE 0x40000000\n\n#define NPY_ITER_GLOBAL_FLAGS 0x0000ffff\n#define NPY_ITER_PER_OP_FLAGS 0xffff0000\n\n\n/*****************************\n * Basic iterator object\n *****************************/\n\n/* FWD declaration */\ntypedef struct PyArrayIterObject_tag PyArrayIterObject;\n\n/*\n * type of the function which translates a set of coordinates to a\n * pointer to the data\n */\ntypedef char* (*npy_iter_get_dataptr_t)(\n PyArrayIterObject* iter, const npy_intp*);\n\nstruct PyArrayIterObject_tag {\n PyObject_HEAD\n int nd_m1; /* number of dimensions - 1 */\n npy_intp index, size;\n npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */\n npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */\n npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */\n npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */\n npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */\n PyArrayObject *ao;\n char *dataptr; /* pointer to current item*/\n npy_bool contiguous;\n\n npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2];\n npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2];\n npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS];\n npy_iter_get_dataptr_t translate;\n} ;\n\n\n/* Iterator API */\n#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type)\n\n#define _PyAIT(it) ((PyArrayIterObject *)(it))\n#define PyArray_ITER_RESET(it) do { \\n _PyAIT(it)->index = 0; \\n _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \\n memset(_PyAIT(it)->coordinates, 0, \\n (_PyAIT(it)->nd_m1+1)*sizeof(npy_intp)); \\n} while (0)\n\n#define _PyArray_ITER_NEXT1(it) do { \\n (it)->dataptr += _PyAIT(it)->strides[0]; \\n (it)->coordinates[0]++; \\n} while (0)\n\n#define _PyArray_ITER_NEXT2(it) do { \\n if ((it)->coordinates[1] < (it)->dims_m1[1]) { \\n (it)->coordinates[1]++; \\n (it)->dataptr += (it)->strides[1]; \\n } \\n else { \\n (it)->coordinates[1] = 0; \\n (it)->coordinates[0]++; \\n (it)->dataptr += (it)->strides[0] - \\n (it)->backstrides[1]; \\n } \\n} while (0)\n\n#define PyArray_ITER_NEXT(it) do { \\n _PyAIT(it)->index++; \\n if (_PyAIT(it)->nd_m1 == 0) { \\n _PyArray_ITER_NEXT1(_PyAIT(it)); \\n } \\n else if (_PyAIT(it)->contiguous) \\n _PyAIT(it)->dataptr += PyArray_ITEMSIZE(_PyAIT(it)->ao); \\n else if (_PyAIT(it)->nd_m1 == 1) { \\n _PyArray_ITER_NEXT2(_PyAIT(it)); \\n } \\n else { \\n int __npy_i; \\n for (__npy_i=_PyAIT(it)->nd_m1; __npy_i >= 0; __npy_i--) { \\n if (_PyAIT(it)->coordinates[__npy_i] < \\n _PyAIT(it)->dims_m1[__npy_i]) { \\n _PyAIT(it)->coordinates[__npy_i]++; \\n _PyAIT(it)->dataptr += \\n _PyAIT(it)->strides[__npy_i]; \\n break; \\n } \\n else { \\n _PyAIT(it)->coordinates[__npy_i] = 0; \\n _PyAIT(it)->dataptr -= \\n _PyAIT(it)->backstrides[__npy_i]; \\n } \\n } \\n } \\n} while (0)\n\n#define PyArray_ITER_GOTO(it, destination) do { \\n int __npy_i; \\n _PyAIT(it)->index = 0; \\n _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \\n for (__npy_i = _PyAIT(it)->nd_m1; __npy_i>=0; __npy_i--) { \\n if (destination[__npy_i] < 0) { \\n destination[__npy_i] += \\n _PyAIT(it)->dims_m1[__npy_i]+1; \\n } \\n _PyAIT(it)->dataptr += destination[__npy_i] * \\n _PyAIT(it)->strides[__npy_i]; \\n _PyAIT(it)->coordinates[__npy_i] = \\n destination[__npy_i]; \\n _PyAIT(it)->index += destination[__npy_i] * \\n ( __npy_i==_PyAIT(it)->nd_m1 ? 1 : \\n _PyAIT(it)->dims_m1[__npy_i+1]+1) ; \\n } \\n} while (0)\n\n#define PyArray_ITER_GOTO1D(it, ind) do { \\n int __npy_i; \\n npy_intp __npy_ind = (npy_intp)(ind); \\n if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \\n _PyAIT(it)->index = __npy_ind; \\n if (_PyAIT(it)->nd_m1 == 0) { \\n _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \\n __npy_ind * _PyAIT(it)->strides[0]; \\n } \\n else if (_PyAIT(it)->contiguous) \\n _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao) + \\n __npy_ind * PyArray_ITEMSIZE(_PyAIT(it)->ao); \\n else { \\n _PyAIT(it)->dataptr = PyArray_BYTES(_PyAIT(it)->ao); \\n for (__npy_i = 0; __npy_i<=_PyAIT(it)->nd_m1; \\n __npy_i++) { \\n _PyAIT(it)->coordinates[__npy_i] = \\n (__npy_ind / _PyAIT(it)->factors[__npy_i]); \\n _PyAIT(it)->dataptr += \\n (__npy_ind / _PyAIT(it)->factors[__npy_i]) \\n * _PyAIT(it)->strides[__npy_i]; \\n __npy_ind %= _PyAIT(it)->factors[__npy_i]; \\n } \\n } \\n} while (0)\n\n#define PyArray_ITER_DATA(it) ((void *)(_PyAIT(it)->dataptr))\n\n#define PyArray_ITER_NOTDONE(it) (_PyAIT(it)->index < _PyAIT(it)->size)\n\n\n/*\n * Any object passed to PyArray_Broadcast must be binary compatible\n * with this structure.\n */\n\ntypedef struct {\n PyObject_HEAD\n int numiter; /* number of iters */\n npy_intp size; /* broadcasted size */\n npy_intp index; /* current index */\n int nd; /* number of dims */\n npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS]; /* dimensions */\n /*\n * Space for the individual iterators, do not specify size publicly\n * to allow changing it more easily.\n * One reason is that Cython uses this for checks and only allows\n * growing structs (as of Cython 3.0.6). It also allows NPY_MAXARGS\n * to be runtime dependent.\n */\n#if (defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)\n PyArrayIterObject *iters[64];\n#elif defined(__cplusplus)\n /*\n * C++ doesn't strictly support flexible members and gives compilers\n * warnings (pedantic only), so we lie. We can't make it 64 because\n * then Cython is unhappy (larger struct at runtime is OK smaller not).\n */\n PyArrayIterObject *iters[32];\n#else\n PyArrayIterObject *iters[];\n#endif\n} PyArrayMultiIterObject;\n\n#define _PyMIT(m) ((PyArrayMultiIterObject *)(m))\n#define PyArray_MultiIter_RESET(multi) do { \\n int __npy_mi; \\n _PyMIT(multi)->index = 0; \\n for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \\n PyArray_ITER_RESET(_PyMIT(multi)->iters[__npy_mi]); \\n } \\n} while (0)\n\n#define PyArray_MultiIter_NEXT(multi) do { \\n int __npy_mi; \\n _PyMIT(multi)->index++; \\n for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \\n PyArray_ITER_NEXT(_PyMIT(multi)->iters[__npy_mi]); \\n } \\n} while (0)\n\n#define PyArray_MultiIter_GOTO(multi, dest) do { \\n int __npy_mi; \\n for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \\n PyArray_ITER_GOTO(_PyMIT(multi)->iters[__npy_mi], dest); \\n } \\n _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \\n} while (0)\n\n#define PyArray_MultiIter_GOTO1D(multi, ind) do { \\n int __npy_mi; \\n for (__npy_mi=0; __npy_mi < _PyMIT(multi)->numiter; __npy_mi++) { \\n PyArray_ITER_GOTO1D(_PyMIT(multi)->iters[__npy_mi], ind); \\n } \\n _PyMIT(multi)->index = _PyMIT(multi)->iters[0]->index; \\n} while (0)\n\n#define PyArray_MultiIter_DATA(multi, i) \\n ((void *)(_PyMIT(multi)->iters[i]->dataptr))\n\n#define PyArray_MultiIter_NEXTi(multi, i) \\n PyArray_ITER_NEXT(_PyMIT(multi)->iters[i])\n\n#define PyArray_MultiIter_NOTDONE(multi) \\n (_PyMIT(multi)->index < _PyMIT(multi)->size)\n\n\nstatic NPY_INLINE int\nPyArray_MultiIter_NUMITER(PyArrayMultiIterObject *multi)\n{\n return multi->numiter;\n}\n\n\nstatic NPY_INLINE npy_intp\nPyArray_MultiIter_SIZE(PyArrayMultiIterObject *multi)\n{\n return multi->size;\n}\n\n\nstatic NPY_INLINE npy_intp\nPyArray_MultiIter_INDEX(PyArrayMultiIterObject *multi)\n{\n return multi->index;\n}\n\n\nstatic NPY_INLINE int\nPyArray_MultiIter_NDIM(PyArrayMultiIterObject *multi)\n{\n return multi->nd;\n}\n\n\nstatic NPY_INLINE npy_intp *\nPyArray_MultiIter_DIMS(PyArrayMultiIterObject *multi)\n{\n return multi->dimensions;\n}\n\n\nstatic NPY_INLINE void **\nPyArray_MultiIter_ITERS(PyArrayMultiIterObject *multi)\n{\n return (void**)multi->iters;\n}\n\n\nenum {\n NPY_NEIGHBORHOOD_ITER_ZERO_PADDING,\n NPY_NEIGHBORHOOD_ITER_ONE_PADDING,\n NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING,\n NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING,\n NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING\n};\n\ntypedef struct {\n PyObject_HEAD\n\n /*\n * PyArrayIterObject part: keep this in this exact order\n */\n int nd_m1; /* number of dimensions - 1 */\n npy_intp index, size;\n npy_intp coordinates[NPY_MAXDIMS_LEGACY_ITERS];/* N-dimensional loop */\n npy_intp dims_m1[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->dimensions - 1 */\n npy_intp strides[NPY_MAXDIMS_LEGACY_ITERS]; /* ao->strides or fake */\n npy_intp backstrides[NPY_MAXDIMS_LEGACY_ITERS];/* how far to jump back */\n npy_intp factors[NPY_MAXDIMS_LEGACY_ITERS]; /* shape factors */\n PyArrayObject *ao;\n char *dataptr; /* pointer to current item*/\n npy_bool contiguous;\n\n npy_intp bounds[NPY_MAXDIMS_LEGACY_ITERS][2];\n npy_intp limits[NPY_MAXDIMS_LEGACY_ITERS][2];\n npy_intp limits_sizes[NPY_MAXDIMS_LEGACY_ITERS];\n npy_iter_get_dataptr_t translate;\n\n /*\n * New members\n */\n npy_intp nd;\n\n /* Dimensions is the dimension of the array */\n npy_intp dimensions[NPY_MAXDIMS_LEGACY_ITERS];\n\n /*\n * Neighborhood points coordinates are computed relatively to the\n * point pointed by _internal_iter\n */\n PyArrayIterObject* _internal_iter;\n /*\n * To keep a reference to the representation of the constant value\n * for constant padding\n */\n char* constant;\n\n int mode;\n} PyArrayNeighborhoodIterObject;\n\n/*\n * Neighborhood iterator API\n */\n\n/* General: those work for any mode */\nstatic inline int\nPyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter);\nstatic inline int\nPyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter);\n#if 0\nstatic inline int\nPyArrayNeighborhoodIter_Next2D(PyArrayNeighborhoodIterObject* iter);\n#endif\n\n/*\n * Include inline implementations - functions defined there are not\n * considered public API\n */\n#define NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_\n#include "_neighborhood_iterator_imp.h"\n#undef NUMPY_CORE_INCLUDE_NUMPY__NEIGHBORHOOD_IMP_H_\n\n\n\n/* The default array type */\n#define NPY_DEFAULT_TYPE NPY_DOUBLE\n/* default integer type defined in npy_2_compat header */\n\n/*\n * All sorts of useful ways to look into a PyArrayObject. It is recommended\n * to use PyArrayObject * objects instead of always casting from PyObject *,\n * for improved type checking.\n *\n * In many cases here the macro versions of the accessors are deprecated,\n * but can't be immediately changed to inline functions because the\n * preexisting macros accept PyObject * and do automatic casts. Inline\n * functions accepting PyArrayObject * provides for some compile-time\n * checking of correctness when working with these objects in C.\n */\n\n#define PyArray_ISONESEGMENT(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS) || \\n PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS))\n\n#define PyArray_ISFORTRAN(m) (PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) && \\n (!PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)))\n\n#define PyArray_FORTRAN_IF(m) ((PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS) ? \\n NPY_ARRAY_F_CONTIGUOUS : 0))\n\nstatic inline int\nPyArray_NDIM(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->nd;\n}\n\nstatic inline void *\nPyArray_DATA(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->data;\n}\n\nstatic inline char *\nPyArray_BYTES(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->data;\n}\n\nstatic inline npy_intp *\nPyArray_DIMS(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->dimensions;\n}\n\nstatic inline npy_intp *\nPyArray_STRIDES(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->strides;\n}\n\nstatic inline npy_intp\nPyArray_DIM(const PyArrayObject *arr, int idim)\n{\n return ((PyArrayObject_fields *)arr)->dimensions[idim];\n}\n\nstatic inline npy_intp\nPyArray_STRIDE(const PyArrayObject *arr, int istride)\n{\n return ((PyArrayObject_fields *)arr)->strides[istride];\n}\n\nstatic inline NPY_RETURNS_BORROWED_REF PyObject *\nPyArray_BASE(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->base;\n}\n\nstatic inline NPY_RETURNS_BORROWED_REF PyArray_Descr *\nPyArray_DESCR(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->descr;\n}\n\nstatic inline int\nPyArray_FLAGS(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->flags;\n}\n\n\nstatic inline int\nPyArray_TYPE(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->descr->type_num;\n}\n\nstatic inline int\nPyArray_CHKFLAGS(const PyArrayObject *arr, int flags)\n{\n return (PyArray_FLAGS(arr) & flags) == flags;\n}\n\nstatic inline PyArray_Descr *\nPyArray_DTYPE(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->descr;\n}\n\nstatic inline npy_intp *\nPyArray_SHAPE(const PyArrayObject *arr)\n{\n return ((PyArrayObject_fields *)arr)->dimensions;\n}\n\n/*\n * Enables the specified array flags. Does no checking,\n * assumes you know what you're doing.\n */\nstatic inline void\nPyArray_ENABLEFLAGS(PyArrayObject *arr, int flags)\n{\n ((PyArrayObject_fields *)arr)->flags |= flags;\n}\n\n/*\n * Clears the specified array flags. Does no checking,\n * assumes you know what you're doing.\n */\nstatic inline void\nPyArray_CLEARFLAGS(PyArrayObject *arr, int flags)\n{\n ((PyArrayObject_fields *)arr)->flags &= ~flags;\n}\n\n#if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION\n static inline NPY_RETURNS_BORROWED_REF PyObject *\n PyArray_HANDLER(PyArrayObject *arr)\n {\n return ((PyArrayObject_fields *)arr)->mem_handler;\n }\n#endif\n\n#define PyTypeNum_ISBOOL(type) ((type) == NPY_BOOL)\n\n#define PyTypeNum_ISUNSIGNED(type) (((type) == NPY_UBYTE) || \\n ((type) == NPY_USHORT) || \\n ((type) == NPY_UINT) || \\n ((type) == NPY_ULONG) || \\n ((type) == NPY_ULONGLONG))\n\n#define PyTypeNum_ISSIGNED(type) (((type) == NPY_BYTE) || \\n ((type) == NPY_SHORT) || \\n ((type) == NPY_INT) || \\n ((type) == NPY_LONG) || \\n ((type) == NPY_LONGLONG))\n\n#define PyTypeNum_ISINTEGER(type) (((type) >= NPY_BYTE) && \\n ((type) <= NPY_ULONGLONG))\n\n#define PyTypeNum_ISFLOAT(type) ((((type) >= NPY_FLOAT) && \\n ((type) <= NPY_LONGDOUBLE)) || \\n ((type) == NPY_HALF))\n\n#define PyTypeNum_ISNUMBER(type) (((type) <= NPY_CLONGDOUBLE) || \\n ((type) == NPY_HALF))\n\n#define PyTypeNum_ISSTRING(type) (((type) == NPY_STRING) || \\n ((type) == NPY_UNICODE))\n\n#define PyTypeNum_ISCOMPLEX(type) (((type) >= NPY_CFLOAT) && \\n ((type) <= NPY_CLONGDOUBLE))\n\n#define PyTypeNum_ISFLEXIBLE(type) (((type) >=NPY_STRING) && \\n ((type) <=NPY_VOID))\n\n#define PyTypeNum_ISDATETIME(type) (((type) >=NPY_DATETIME) && \\n ((type) <=NPY_TIMEDELTA))\n\n#define PyTypeNum_ISUSERDEF(type) (((type) >= NPY_USERDEF) && \\n ((type) < NPY_USERDEF+ \\n NPY_NUMUSERTYPES))\n\n#define PyTypeNum_ISEXTENDED(type) (PyTypeNum_ISFLEXIBLE(type) || \\n PyTypeNum_ISUSERDEF(type))\n\n#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)\n\n\n#define PyDataType_ISLEGACY(dtype) ((dtype)->type_num < NPY_VSTRING && ((dtype)->type_num >= 0))\n#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )\n#define PyDataType_ISFLOAT(obj) PyTypeNum_ISFLOAT(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISNUMBER(obj) PyTypeNum_ISNUMBER(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISSTRING(obj) PyTypeNum_ISSTRING(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISDATETIME(obj) PyTypeNum_ISDATETIME(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)\n#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)\n/*\n * PyDataType_* FLAGS, FLACHK, REFCHK, HASFIELDS, HASSUBARRAY, UNSIZED,\n * SUBARRAY, NAMES, FIELDS, C_METADATA, and METADATA require version specific\n * lookup and are defined in npy_2_compat.h.\n */\n\n\n#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))\n#define PyArray_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(PyArray_TYPE(obj))\n#define PyArray_ISSIGNED(obj) PyTypeNum_ISSIGNED(PyArray_TYPE(obj))\n#define PyArray_ISINTEGER(obj) PyTypeNum_ISINTEGER(PyArray_TYPE(obj))\n#define PyArray_ISFLOAT(obj) PyTypeNum_ISFLOAT(PyArray_TYPE(obj))\n#define PyArray_ISNUMBER(obj) PyTypeNum_ISNUMBER(PyArray_TYPE(obj))\n#define PyArray_ISSTRING(obj) PyTypeNum_ISSTRING(PyArray_TYPE(obj))\n#define PyArray_ISCOMPLEX(obj) PyTypeNum_ISCOMPLEX(PyArray_TYPE(obj))\n#define PyArray_ISFLEXIBLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))\n#define PyArray_ISDATETIME(obj) PyTypeNum_ISDATETIME(PyArray_TYPE(obj))\n#define PyArray_ISUSERDEF(obj) PyTypeNum_ISUSERDEF(PyArray_TYPE(obj))\n#define PyArray_ISEXTENDED(obj) PyTypeNum_ISEXTENDED(PyArray_TYPE(obj))\n#define PyArray_ISOBJECT(obj) PyTypeNum_ISOBJECT(PyArray_TYPE(obj))\n#define PyArray_HASFIELDS(obj) PyDataType_HASFIELDS(PyArray_DESCR(obj))\n\n /*\n * FIXME: This should check for a flag on the data-type that\n * states whether or not it is variable length. Because the\n * ISFLEXIBLE check is hard-coded to the built-in data-types.\n */\n#define PyArray_ISVARIABLE(obj) PyTypeNum_ISFLEXIBLE(PyArray_TYPE(obj))\n\n#define PyArray_SAFEALIGNEDCOPY(obj) (PyArray_ISALIGNED(obj) && !PyArray_ISVARIABLE(obj))\n\n\n#define NPY_LITTLE '<'\n#define NPY_BIG '>'\n#define NPY_NATIVE '='\n#define NPY_SWAP 's'\n#define NPY_IGNORE '|'\n\n#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN\n#define NPY_NATBYTE NPY_BIG\n#define NPY_OPPBYTE NPY_LITTLE\n#else\n#define NPY_NATBYTE NPY_LITTLE\n#define NPY_OPPBYTE NPY_BIG\n#endif\n\n#define PyArray_ISNBO(arg) ((arg) != NPY_OPPBYTE)\n#define PyArray_IsNativeByteOrder PyArray_ISNBO\n#define PyArray_ISNOTSWAPPED(m) PyArray_ISNBO(PyArray_DESCR(m)->byteorder)\n#define PyArray_ISBYTESWAPPED(m) (!PyArray_ISNOTSWAPPED(m))\n\n#define PyArray_FLAGSWAP(m, flags) (PyArray_CHKFLAGS(m, flags) && \\n PyArray_ISNOTSWAPPED(m))\n\n#define PyArray_ISCARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY)\n#define PyArray_ISCARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_CARRAY_RO)\n#define PyArray_ISFARRAY(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY)\n#define PyArray_ISFARRAY_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_FARRAY_RO)\n#define PyArray_ISBEHAVED(m) PyArray_FLAGSWAP(m, NPY_ARRAY_BEHAVED)\n#define PyArray_ISBEHAVED_RO(m) PyArray_FLAGSWAP(m, NPY_ARRAY_ALIGNED)\n\n\n#define PyDataType_ISNOTSWAPPED(d) PyArray_ISNBO(((PyArray_Descr *)(d))->byteorder)\n#define PyDataType_ISBYTESWAPPED(d) (!PyDataType_ISNOTSWAPPED(d))\n\n/************************************************************\n * A struct used by PyArray_CreateSortedStridePerm, new in 1.7.\n ************************************************************/\n\ntypedef struct {\n npy_intp perm, stride;\n} npy_stride_sort_item;\n\n/************************************************************\n * This is the form of the struct that's stored in the\n * PyCapsule returned by an array's __array_struct__ attribute. See\n * https://docs.scipy.org/doc/numpy/reference/arrays.interface.html for the full\n * documentation.\n ************************************************************/\ntypedef struct {\n int two; /*\n * contains the integer 2 as a sanity\n * check\n */\n\n int nd; /* number of dimensions */\n\n char typekind; /*\n * kind in array --- character code of\n * typestr\n */\n\n int itemsize; /* size of each element */\n\n int flags; /*\n * how should be data interpreted. Valid\n * flags are CONTIGUOUS (1), F_CONTIGUOUS (2),\n * ALIGNED (0x100), NOTSWAPPED (0x200), and\n * WRITEABLE (0x400). ARR_HAS_DESCR (0x800)\n * states that arrdescr field is present in\n * structure\n */\n\n npy_intp *shape; /*\n * A length-nd array of shape\n * information\n */\n\n npy_intp *strides; /* A length-nd array of stride information */\n\n void *data; /* A pointer to the first element of the array */\n\n PyObject *descr; /*\n * A list of fields or NULL (ignored if flags\n * does not have ARR_HAS_DESCR flag set)\n */\n} PyArrayInterface;\n\n\n/****************************************\n * NpyString\n *\n * Types used by the NpyString API.\n ****************************************/\n\n/*\n * A "packed" encoded string. The string data must be accessed by first unpacking the string.\n */\ntypedef struct npy_packed_static_string npy_packed_static_string;\n\n/*\n * An unpacked read-only view onto the data in a packed string\n */\ntypedef struct npy_unpacked_static_string {\n size_t size;\n const char *buf;\n} npy_static_string;\n\n/*\n * Handles heap allocations for static strings.\n */\ntypedef struct npy_string_allocator npy_string_allocator;\n\ntypedef struct {\n PyArray_Descr base;\n // The object representing a null value\n PyObject *na_object;\n // Flag indicating whether or not to coerce arbitrary objects to strings\n char coerce;\n // Flag indicating the na object is NaN-like\n char has_nan_na;\n // Flag indicating the na object is a string\n char has_string_na;\n // If nonzero, indicates that this instance is owned by an array already\n char array_owned;\n // The string data to use when a default string is needed\n npy_static_string default_string;\n // The name of the missing data object, if any\n npy_static_string na_name;\n // the allocator should only be directly accessed after\n // acquiring the allocator_lock and the lock should\n // be released immediately after the allocator is\n // no longer needed\n npy_string_allocator *allocator;\n} PyArray_StringDTypeObject;\n\n/*\n * PyArray_DTypeMeta related definitions.\n *\n * As of now, this API is preliminary and will be extended as necessary.\n */\n#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD\n /*\n * The Structures defined in this block are currently considered\n * private API and may change without warning!\n * Part of this (at least the size) is expected to be public API without\n * further modifications.\n */\n /* TODO: Make this definition public in the API, as soon as its settled */\n NPY_NO_EXPORT extern PyTypeObject PyArrayDTypeMeta_Type;\n\n /*\n * While NumPy DTypes would not need to be heap types the plan is to\n * make DTypes available in Python at which point they will be heap types.\n * Since we also wish to add fields to the DType class, this looks like\n * a typical instance definition, but with PyHeapTypeObject instead of\n * only the PyObject_HEAD.\n * This must only be exposed very extremely careful consideration, since\n * it is a fairly complex construct which may be better to allow\n * refactoring of.\n */\n typedef struct {\n PyHeapTypeObject super;\n\n /*\n * Most DTypes will have a singleton default instance, for the\n * parametric legacy DTypes (bytes, string, void, datetime) this\n * may be a pointer to the *prototype* instance?\n */\n PyArray_Descr *singleton;\n /* Copy of the legacy DTypes type number, usually invalid. */\n int type_num;\n\n /* The type object of the scalar instances (may be NULL?) */\n PyTypeObject *scalar_type;\n /*\n * DType flags to signal legacy, parametric, or\n * abstract. But plenty of space for additional information/flags.\n */\n npy_uint64 flags;\n\n /*\n * Use indirection in order to allow a fixed size for this struct.\n * A stable ABI size makes creating a static DType less painful\n * while also ensuring flexibility for all opaque API (with one\n * indirection due the pointer lookup).\n */\n void *dt_slots;\n void *reserved[3];\n } PyArray_DTypeMeta;\n\n#endif /* NPY_INTERNAL_BUILD */\n\n\n/*\n * Use the keyword NPY_DEPRECATED_INCLUDES to ensure that the header files\n * npy_*_*_deprecated_api.h are only included from here and nowhere else.\n */\n#ifdef NPY_DEPRECATED_INCLUDES\n#error "Do not use the reserved keyword NPY_DEPRECATED_INCLUDES."\n#endif\n#define NPY_DEPRECATED_INCLUDES\n/*\n * There is no file npy_1_8_deprecated_api.h since there are no additional\n * deprecated API features in NumPy 1.8.\n *\n * Note to maintainers: insert code like the following in future NumPy\n * versions.\n *\n * #if !defined(NPY_NO_DEPRECATED_API) || \\n * (NPY_NO_DEPRECATED_API < NPY_1_9_API_VERSION)\n * #include "npy_1_9_deprecated_api.h"\n * #endif\n * Then in the npy_1_9_deprecated_api.h header add something like this\n * --------------------\n * #ifndef NPY_DEPRECATED_INCLUDES\n * #error "Should never include npy_*_*_deprecated_api directly."\n * #endif\n * #ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_\n * #define NUMPY_CORE_INCLUDE_NUMPY_NPY_1_7_DEPRECATED_API_H_\n * \n * #ifndef NPY_NO_DEPRECATED_API\n * #if defined(_WIN32)\n * #define _WARN___STR2__(x) #x\n * #define _WARN___STR1__(x) _WARN___STR2__(x)\n * #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "\n * #pragma message(_WARN___LOC__"Using deprecated NumPy API, disable it with " \\n * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION")\n * #else\n * #warning "Using deprecated NumPy API, disable it with " \\n * "#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"\n * #endif\n * #endif\n * --------------------\n */\n#undef NPY_DEPRECATED_INCLUDES\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\ndarraytypes.h
ndarraytypes.h
C
67,760
0.75
0.065641
0.574685
python-kit
381
2024-08-11T23:17:54.207200
BSD-3-Clause
false
185d4395a5f61b640db66856f8cd75f7
/*\n * This header file defines relevant features which:\n * - Require runtime inspection depending on the NumPy version.\n * - May be needed when compiling with an older version of NumPy to allow\n * a smooth transition.\n *\n * As such, it is shipped with NumPy 2.0, but designed to be vendored in full\n * or parts by downstream projects.\n *\n * It must be included after any other includes. `import_array()` must have\n * been called in the scope or version dependency will misbehave, even when\n * only `PyUFunc_` API is used.\n *\n * If required complicated defs (with inline functions) should be written as:\n *\n * #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n * Simple definition when NumPy 2.0 API is guaranteed.\n * #else\n * static inline definition of a 1.x compatibility shim\n * #if NPY_ABI_VERSION < 0x02000000\n * Make 1.x compatibility shim the public API (1.x only branch)\n * #else\n * Runtime dispatched version (1.x or 2.x)\n * #endif\n * #endif\n *\n * An internal build always passes NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n */\n\n#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_\n\n/*\n * New macros for accessing real and complex part of a complex number can be\n * found in "npy_2_complexcompat.h".\n */\n\n\n/*\n * This header is meant to be included by downstream directly for 1.x compat.\n * In that case we need to ensure that users first included the full headers\n * and not just `ndarraytypes.h`.\n */\n\n#ifndef NPY_FEATURE_VERSION\n #error "The NumPy 2 compat header requires `import_array()` for which " \\n "the `ndarraytypes.h` header include is not sufficient. Please " \\n "include it after `numpy/ndarrayobject.h` or similar.\n" \\n "To simplify inclusion, you may use `PyArray_ImportNumPy()` " \\n "which is defined in the compat header and is lightweight (can be)."\n#endif\n\n#if NPY_ABI_VERSION < 0x02000000\n /*\n * Define 2.0 feature version as it is needed below to decide whether we\n * compile for both 1.x and 2.x (defining it guarantees 1.x only).\n */\n #define NPY_2_0_API_VERSION 0x00000012\n /*\n * If we are compiling with NumPy 1.x, PyArray_RUNTIME_VERSION so we\n * pretend the `PyArray_RUNTIME_VERSION` is `NPY_FEATURE_VERSION`.\n * This allows downstream to use `PyArray_RUNTIME_VERSION` if they need to.\n */\n #define PyArray_RUNTIME_VERSION NPY_FEATURE_VERSION\n /* Compiling on NumPy 1.x where these are the same: */\n #define PyArray_DescrProto PyArray_Descr\n#endif\n\n\n/*\n * Define a better way to call `_import_array()` to simplify backporting as\n * we now require imports more often (necessary to make ABI flexible).\n */\n#ifdef import_array1\n\nstatic inline int\nPyArray_ImportNumPyAPI(void)\n{\n if (NPY_UNLIKELY(PyArray_API == NULL)) {\n import_array1(-1);\n }\n return 0;\n}\n\n#endif /* import_array1 */\n\n\n/*\n * NPY_DEFAULT_INT\n *\n * The default integer has changed, `NPY_DEFAULT_INT` is available at runtime\n * for use as type number, e.g. `PyArray_DescrFromType(NPY_DEFAULT_INT)`.\n *\n * NPY_RAVEL_AXIS\n *\n * This was introduced in NumPy 2.0 to allow indicating that an axis should be\n * raveled in an operation. Before NumPy 2.0, NPY_MAXDIMS was used for this purpose.\n *\n * NPY_MAXDIMS\n *\n * A constant indicating the maximum number dimensions allowed when creating\n * an ndarray.\n *\n * NPY_NTYPES_LEGACY\n *\n * The number of built-in NumPy dtypes.\n */\n#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n #define NPY_DEFAULT_INT NPY_INTP\n #define NPY_RAVEL_AXIS NPY_MIN_INT\n #define NPY_MAXARGS 64\n\n#elif NPY_ABI_VERSION < 0x02000000\n #define NPY_DEFAULT_INT NPY_LONG\n #define NPY_RAVEL_AXIS 32\n #define NPY_MAXARGS 32\n\n /* Aliases of 2.x names to 1.x only equivalent names */\n #define NPY_NTYPES NPY_NTYPES_LEGACY\n #define PyArray_DescrProto PyArray_Descr\n #define _PyArray_LegacyDescr PyArray_Descr\n /* NumPy 2 definition always works, but add it for 1.x only */\n #define PyDataType_ISLEGACY(dtype) (1)\n#else\n #define NPY_DEFAULT_INT \\n (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_INTP : NPY_LONG)\n #define NPY_RAVEL_AXIS \\n (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? NPY_MIN_INT : 32)\n #define NPY_MAXARGS \\n (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION ? 64 : 32)\n#endif\n\n\n/*\n * Access inline functions for descriptor fields. Except for the first\n * few fields, these needed to be moved (elsize, alignment) for\n * additional space. Or they are descriptor specific and are not generally\n * available anymore (metadata, c_metadata, subarray, names, fields).\n *\n * Most of these are defined via the `DESCR_ACCESSOR` macro helper.\n */\n#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION || NPY_ABI_VERSION < 0x02000000\n /* Compiling for 1.x or 2.x only, direct field access is OK: */\n\n static inline void\n PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)\n {\n dtype->elsize = size;\n }\n\n static inline npy_uint64\n PyDataType_FLAGS(const PyArray_Descr *dtype)\n {\n #if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n return dtype->flags;\n #else\n return (unsigned char)dtype->flags; /* Need unsigned cast on 1.x */\n #endif\n }\n\n #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \\n static inline type \\n PyDataType_##FIELD(const PyArray_Descr *dtype) { \\n if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \\n return (type)0; \\n } \\n return ((_PyArray_LegacyDescr *)dtype)->field; \\n }\n#else /* compiling for both 1.x and 2.x */\n\n static inline void\n PyDataType_SET_ELSIZE(PyArray_Descr *dtype, npy_intp size)\n {\n if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {\n ((_PyArray_DescrNumPy2 *)dtype)->elsize = size;\n }\n else {\n ((PyArray_DescrProto *)dtype)->elsize = (int)size;\n }\n }\n\n static inline npy_uint64\n PyDataType_FLAGS(const PyArray_Descr *dtype)\n {\n if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {\n return ((_PyArray_DescrNumPy2 *)dtype)->flags;\n }\n else {\n return (unsigned char)((PyArray_DescrProto *)dtype)->flags;\n }\n }\n\n /* Cast to LegacyDescr always fine but needed when `legacy_only` */\n #define DESCR_ACCESSOR(FIELD, field, type, legacy_only) \\n static inline type \\n PyDataType_##FIELD(const PyArray_Descr *dtype) { \\n if (legacy_only && !PyDataType_ISLEGACY(dtype)) { \\n return (type)0; \\n } \\n if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) { \\n return ((_PyArray_LegacyDescr *)dtype)->field; \\n } \\n else { \\n return ((PyArray_DescrProto *)dtype)->field; \\n } \\n }\n#endif\n\nDESCR_ACCESSOR(ELSIZE, elsize, npy_intp, 0)\nDESCR_ACCESSOR(ALIGNMENT, alignment, npy_intp, 0)\nDESCR_ACCESSOR(METADATA, metadata, PyObject *, 1)\nDESCR_ACCESSOR(SUBARRAY, subarray, PyArray_ArrayDescr *, 1)\nDESCR_ACCESSOR(NAMES, names, PyObject *, 1)\nDESCR_ACCESSOR(FIELDS, fields, PyObject *, 1)\nDESCR_ACCESSOR(C_METADATA, c_metadata, NpyAuxData *, 1)\n\n#undef DESCR_ACCESSOR\n\n\n#if !(defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD)\n#if NPY_FEATURE_VERSION >= NPY_2_0_API_VERSION\n static inline PyArray_ArrFuncs *\n PyDataType_GetArrFuncs(const PyArray_Descr *descr)\n {\n return _PyDataType_GetArrFuncs(descr);\n }\n#elif NPY_ABI_VERSION < 0x02000000\n static inline PyArray_ArrFuncs *\n PyDataType_GetArrFuncs(const PyArray_Descr *descr)\n {\n return descr->f;\n }\n#else\n static inline PyArray_ArrFuncs *\n PyDataType_GetArrFuncs(const PyArray_Descr *descr)\n {\n if (PyArray_RUNTIME_VERSION >= NPY_2_0_API_VERSION) {\n return _PyDataType_GetArrFuncs(descr);\n }\n else {\n return ((PyArray_DescrProto *)descr)->f;\n }\n }\n#endif\n\n\n#endif /* not internal build */\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPAT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_2_compat.h
npy_2_compat.h
C
8,795
0.95
0.11245
0.581818
awesome-app
878
2025-05-15T19:15:18.136419
MIT
false
ba47551381833f768631cd5132d16b16
/* This header is designed to be copy-pasted into downstream packages, since it provides\n a compatibility layer between the old C struct complex types and the new native C99\n complex types. The new macros are in numpy/npy_math.h, which is why it is included here. */\n#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_2_COMPLEXCOMPAT_H_\n\n#include <numpy/npy_math.h>\n\n#ifndef NPY_CSETREALF\n#define NPY_CSETREALF(c, r) (c)->real = (r)\n#endif\n#ifndef NPY_CSETIMAGF\n#define NPY_CSETIMAGF(c, i) (c)->imag = (i)\n#endif\n#ifndef NPY_CSETREAL\n#define NPY_CSETREAL(c, r) (c)->real = (r)\n#endif\n#ifndef NPY_CSETIMAG\n#define NPY_CSETIMAG(c, i) (c)->imag = (i)\n#endif\n#ifndef NPY_CSETREALL\n#define NPY_CSETREALL(c, r) (c)->real = (r)\n#endif\n#ifndef NPY_CSETIMAGL\n#define NPY_CSETIMAGL(c, i) (c)->imag = (i)\n#endif\n\n#endif\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_2_complexcompat.h
npy_2_complexcompat.h
C
885
0.95
0
0.92
node-utils
233
2025-05-05T22:12:32.810445
BSD-3-Clause
false
6aeabbf500c8b675aef5c6e6fcbf2296
/*\n * This is a convenience header file providing compatibility utilities\n * for supporting different minor versions of Python 3.\n * It was originally used to support the transition from Python 2,\n * hence the "3k" naming.\n *\n * If you want to use this for your own projects, it's recommended to make a\n * copy of it. We don't provide backwards compatibility guarantees.\n */\n\n#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_\n\n#include <Python.h>\n#include <stdio.h>\n\n#include "npy_common.h"\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n/* Python13 removes _PyLong_AsInt */\nstatic inline int\nNpy__PyLong_AsInt(PyObject *obj)\n{\n int overflow;\n long result = PyLong_AsLongAndOverflow(obj, &overflow);\n\n /* INT_MAX and INT_MIN are defined in Python.h */\n if (overflow || result > INT_MAX || result < INT_MIN) {\n /* XXX: could be cute and give a different\n message for overflow == -1 */\n PyErr_SetString(PyExc_OverflowError,\n "Python int too large to convert to C int");\n return -1;\n }\n return (int)result;\n}\n\n#if defined _MSC_VER && _MSC_VER >= 1900\n\n#include <stdlib.h>\n\n/*\n * Macros to protect CRT calls against instant termination when passed an\n * invalid parameter (https://bugs.python.org/issue23524).\n */\nextern _invalid_parameter_handler _Py_silent_invalid_parameter_handler;\n#define NPY_BEGIN_SUPPRESS_IPH { _invalid_parameter_handler _Py_old_handler = \\n _set_thread_local_invalid_parameter_handler(_Py_silent_invalid_parameter_handler);\n#define NPY_END_SUPPRESS_IPH _set_thread_local_invalid_parameter_handler(_Py_old_handler); }\n\n#else\n\n#define NPY_BEGIN_SUPPRESS_IPH\n#define NPY_END_SUPPRESS_IPH\n\n#endif /* _MSC_VER >= 1900 */\n\n/*\n * PyFile_* compatibility\n */\n\n/*\n * Get a FILE* handle to the file represented by the Python object\n */\nstatic inline FILE*\nnpy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)\n{\n int fd, fd2, unbuf;\n Py_ssize_t fd2_tmp;\n PyObject *ret, *os, *io, *io_raw;\n npy_off_t pos;\n FILE *handle;\n\n /* Flush first to ensure things end up in the file in the correct order */\n ret = PyObject_CallMethod(file, "flush", "");\n if (ret == NULL) {\n return NULL;\n }\n Py_DECREF(ret);\n fd = PyObject_AsFileDescriptor(file);\n if (fd == -1) {\n return NULL;\n }\n\n /*\n * The handle needs to be dup'd because we have to call fclose\n * at the end\n */\n os = PyImport_ImportModule("os");\n if (os == NULL) {\n return NULL;\n }\n ret = PyObject_CallMethod(os, "dup", "i", fd);\n Py_DECREF(os);\n if (ret == NULL) {\n return NULL;\n }\n fd2_tmp = PyNumber_AsSsize_t(ret, PyExc_IOError);\n Py_DECREF(ret);\n if (fd2_tmp == -1 && PyErr_Occurred()) {\n return NULL;\n }\n if (fd2_tmp < INT_MIN || fd2_tmp > INT_MAX) {\n PyErr_SetString(PyExc_IOError,\n "Getting an 'int' from os.dup() failed");\n return NULL;\n }\n fd2 = (int)fd2_tmp;\n\n /* Convert to FILE* handle */\n#ifdef _WIN32\n NPY_BEGIN_SUPPRESS_IPH\n handle = _fdopen(fd2, mode);\n NPY_END_SUPPRESS_IPH\n#else\n handle = fdopen(fd2, mode);\n#endif\n if (handle == NULL) {\n PyErr_SetString(PyExc_IOError,\n "Getting a FILE* from a Python file object via "\n "_fdopen failed. If you built NumPy, you probably "\n "linked with the wrong debug/release runtime");\n return NULL;\n }\n\n /* Record the original raw file handle position */\n *orig_pos = npy_ftell(handle);\n if (*orig_pos == -1) {\n /* The io module is needed to determine if buffering is used */\n io = PyImport_ImportModule("io");\n if (io == NULL) {\n fclose(handle);\n return NULL;\n }\n /* File object instances of RawIOBase are unbuffered */\n io_raw = PyObject_GetAttrString(io, "RawIOBase");\n Py_DECREF(io);\n if (io_raw == NULL) {\n fclose(handle);\n return NULL;\n }\n unbuf = PyObject_IsInstance(file, io_raw);\n Py_DECREF(io_raw);\n if (unbuf == 1) {\n /* Succeed if the IO is unbuffered */\n return handle;\n }\n else {\n PyErr_SetString(PyExc_IOError, "obtaining file position failed");\n fclose(handle);\n return NULL;\n }\n }\n\n /* Seek raw handle to the Python-side position */\n ret = PyObject_CallMethod(file, "tell", "");\n if (ret == NULL) {\n fclose(handle);\n return NULL;\n }\n pos = PyLong_AsLongLong(ret);\n Py_DECREF(ret);\n if (PyErr_Occurred()) {\n fclose(handle);\n return NULL;\n }\n if (npy_fseek(handle, pos, SEEK_SET) == -1) {\n PyErr_SetString(PyExc_IOError, "seeking file failed");\n fclose(handle);\n return NULL;\n }\n return handle;\n}\n\n/*\n * Close the dup-ed file handle, and seek the Python one to the current position\n */\nstatic inline int\nnpy_PyFile_DupClose2(PyObject *file, FILE* handle, npy_off_t orig_pos)\n{\n int fd, unbuf;\n PyObject *ret, *io, *io_raw;\n npy_off_t position;\n\n position = npy_ftell(handle);\n\n /* Close the FILE* handle */\n fclose(handle);\n\n /*\n * Restore original file handle position, in order to not confuse\n * Python-side data structures\n */\n fd = PyObject_AsFileDescriptor(file);\n if (fd == -1) {\n return -1;\n }\n\n if (npy_lseek(fd, orig_pos, SEEK_SET) == -1) {\n\n /* The io module is needed to determine if buffering is used */\n io = PyImport_ImportModule("io");\n if (io == NULL) {\n return -1;\n }\n /* File object instances of RawIOBase are unbuffered */\n io_raw = PyObject_GetAttrString(io, "RawIOBase");\n Py_DECREF(io);\n if (io_raw == NULL) {\n return -1;\n }\n unbuf = PyObject_IsInstance(file, io_raw);\n Py_DECREF(io_raw);\n if (unbuf == 1) {\n /* Succeed if the IO is unbuffered */\n return 0;\n }\n else {\n PyErr_SetString(PyExc_IOError, "seeking file failed");\n return -1;\n }\n }\n\n if (position == -1) {\n PyErr_SetString(PyExc_IOError, "obtaining file position failed");\n return -1;\n }\n\n /* Seek Python-side handle to the FILE* handle position */\n ret = PyObject_CallMethod(file, "seek", NPY_OFF_T_PYFMT "i", position, 0);\n if (ret == NULL) {\n return -1;\n }\n Py_DECREF(ret);\n return 0;\n}\n\nstatic inline PyObject*\nnpy_PyFile_OpenFile(PyObject *filename, const char *mode)\n{\n PyObject *open;\n open = PyDict_GetItemString(PyEval_GetBuiltins(), "open");\n if (open == NULL) {\n return NULL;\n }\n return PyObject_CallFunction(open, "Os", filename, mode);\n}\n\nstatic inline int\nnpy_PyFile_CloseFile(PyObject *file)\n{\n PyObject *ret;\n\n ret = PyObject_CallMethod(file, "close", NULL);\n if (ret == NULL) {\n return -1;\n }\n Py_DECREF(ret);\n return 0;\n}\n\n/* This is a copy of _PyErr_ChainExceptions, which\n * is no longer exported from Python3.12\n */\nstatic inline void\nnpy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)\n{\n if (exc == NULL)\n return;\n\n if (PyErr_Occurred()) {\n PyObject *exc2, *val2, *tb2;\n PyErr_Fetch(&exc2, &val2, &tb2);\n PyErr_NormalizeException(&exc, &val, &tb);\n if (tb != NULL) {\n PyException_SetTraceback(val, tb);\n Py_DECREF(tb);\n }\n Py_DECREF(exc);\n PyErr_NormalizeException(&exc2, &val2, &tb2);\n PyException_SetContext(val2, val);\n PyErr_Restore(exc2, val2, tb2);\n }\n else {\n PyErr_Restore(exc, val, tb);\n }\n}\n\n/* This is a copy of _PyErr_ChainExceptions, with:\n * __cause__ used instead of __context__\n */\nstatic inline void\nnpy_PyErr_ChainExceptionsCause(PyObject *exc, PyObject *val, PyObject *tb)\n{\n if (exc == NULL)\n return;\n\n if (PyErr_Occurred()) {\n PyObject *exc2, *val2, *tb2;\n PyErr_Fetch(&exc2, &val2, &tb2);\n PyErr_NormalizeException(&exc, &val, &tb);\n if (tb != NULL) {\n PyException_SetTraceback(val, tb);\n Py_DECREF(tb);\n }\n Py_DECREF(exc);\n PyErr_NormalizeException(&exc2, &val2, &tb2);\n PyException_SetCause(val2, val);\n PyErr_Restore(exc2, val2, tb2);\n }\n else {\n PyErr_Restore(exc, val, tb);\n }\n}\n\n/*\n * PyCObject functions adapted to PyCapsules.\n *\n * The main job here is to get rid of the improved error handling\n * of PyCapsules. It's a shame...\n */\nstatic inline PyObject *\nNpyCapsule_FromVoidPtr(void *ptr, void (*dtor)(PyObject *))\n{\n PyObject *ret = PyCapsule_New(ptr, NULL, dtor);\n if (ret == NULL) {\n PyErr_Clear();\n }\n return ret;\n}\n\nstatic inline PyObject *\nNpyCapsule_FromVoidPtrAndDesc(void *ptr, void* context, void (*dtor)(PyObject *))\n{\n PyObject *ret = NpyCapsule_FromVoidPtr(ptr, dtor);\n if (ret != NULL && PyCapsule_SetContext(ret, context) != 0) {\n PyErr_Clear();\n Py_DECREF(ret);\n ret = NULL;\n }\n return ret;\n}\n\nstatic inline void *\nNpyCapsule_AsVoidPtr(PyObject *obj)\n{\n void *ret = PyCapsule_GetPointer(obj, NULL);\n if (ret == NULL) {\n PyErr_Clear();\n }\n return ret;\n}\n\nstatic inline void *\nNpyCapsule_GetDesc(PyObject *obj)\n{\n return PyCapsule_GetContext(obj);\n}\n\nstatic inline int\nNpyCapsule_Check(PyObject *ptr)\n{\n return PyCapsule_CheckExact(ptr);\n}\n\n#ifdef __cplusplus\n}\n#endif\n\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_3KCOMPAT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_3kcompat.h
npy_3kcompat.h
C
10,022
0.95
0.109626
0.237952
python-kit
748
2025-04-30T07:06:08.115392
MIT
false
21a649152e66026b2667b478d5e8f6c3
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_\n\n/* need Python.h for npy_intp, npy_uintp */\n#include <Python.h>\n\n/* numpconfig.h is auto-generated */\n#include "numpyconfig.h"\n#ifdef HAVE_NPY_CONFIG_H\n#include <npy_config.h>\n#endif\n\n/*\n * using static inline modifiers when defining npy_math functions\n * allows the compiler to make optimizations when possible\n */\n#ifndef NPY_INLINE_MATH\n#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD\n #define NPY_INLINE_MATH 1\n#else\n #define NPY_INLINE_MATH 0\n#endif\n#endif\n\n/*\n * gcc does not unroll even with -O3\n * use with care, unrolling on modern cpus rarely speeds things up\n */\n#ifdef HAVE_ATTRIBUTE_OPTIMIZE_UNROLL_LOOPS\n#define NPY_GCC_UNROLL_LOOPS \\n __attribute__((optimize("unroll-loops")))\n#else\n#define NPY_GCC_UNROLL_LOOPS\n#endif\n\n/* highest gcc optimization level, enabled autovectorizer */\n#ifdef HAVE_ATTRIBUTE_OPTIMIZE_OPT_3\n#define NPY_GCC_OPT_3 __attribute__((optimize("O3")))\n#else\n#define NPY_GCC_OPT_3\n#endif\n\n/*\n * mark an argument (starting from 1) that must not be NULL and is not checked\n * DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check\n */\n#ifdef HAVE_ATTRIBUTE_NONNULL\n#define NPY_GCC_NONNULL(n) __attribute__((nonnull(n)))\n#else\n#define NPY_GCC_NONNULL(n)\n#endif\n\n/*\n * give a hint to the compiler which branch is more likely or unlikely\n * to occur, e.g. rare error cases:\n *\n * if (NPY_UNLIKELY(failure == 0))\n * return NULL;\n *\n * the double !! is to cast the expression (e.g. NULL) to a boolean required by\n * the intrinsic\n */\n#ifdef HAVE___BUILTIN_EXPECT\n#define NPY_LIKELY(x) __builtin_expect(!!(x), 1)\n#define NPY_UNLIKELY(x) __builtin_expect(!!(x), 0)\n#else\n#define NPY_LIKELY(x) (x)\n#define NPY_UNLIKELY(x) (x)\n#endif\n\n#ifdef HAVE___BUILTIN_PREFETCH\n/* unlike _mm_prefetch also works on non-x86 */\n#define NPY_PREFETCH(x, rw, loc) __builtin_prefetch((x), (rw), (loc))\n#else\n#ifdef NPY_HAVE_SSE\n/* _MM_HINT_ET[01] (rw = 1) unsupported, only available in gcc >= 4.9 */\n#define NPY_PREFETCH(x, rw, loc) _mm_prefetch((x), loc == 0 ? _MM_HINT_NTA : \\n (loc == 1 ? _MM_HINT_T2 : \\n (loc == 2 ? _MM_HINT_T1 : \\n (loc == 3 ? _MM_HINT_T0 : -1))))\n#else\n#define NPY_PREFETCH(x, rw,loc)\n#endif\n#endif\n\n/* `NPY_INLINE` kept for backwards compatibility; use `inline` instead */\n#if defined(_MSC_VER) && !defined(__clang__)\n #define NPY_INLINE __inline\n/* clang included here to handle clang-cl on Windows */\n#elif defined(__GNUC__) || defined(__clang__)\n #if defined(__STRICT_ANSI__)\n #define NPY_INLINE __inline__\n #else\n #define NPY_INLINE inline\n #endif\n#else\n #define NPY_INLINE\n#endif\n\n#ifdef _MSC_VER\n #define NPY_FINLINE static __forceinline\n#elif defined(__GNUC__)\n #define NPY_FINLINE static inline __attribute__((always_inline))\n#else\n #define NPY_FINLINE static\n#endif\n\n#if defined(_MSC_VER)\n #define NPY_NOINLINE static __declspec(noinline)\n#elif defined(__GNUC__) || defined(__clang__)\n #define NPY_NOINLINE static __attribute__((noinline))\n#else\n #define NPY_NOINLINE static\n#endif\n\n#ifdef __cplusplus\n #define NPY_TLS thread_local\n#elif defined(HAVE_THREAD_LOCAL)\n #define NPY_TLS thread_local\n#elif defined(HAVE__THREAD_LOCAL)\n #define NPY_TLS _Thread_local\n#elif defined(HAVE___THREAD)\n #define NPY_TLS __thread\n#elif defined(HAVE___DECLSPEC_THREAD_)\n #define NPY_TLS __declspec(thread)\n#else\n #define NPY_TLS\n#endif\n\n#ifdef WITH_CPYCHECKER_RETURNS_BORROWED_REF_ATTRIBUTE\n #define NPY_RETURNS_BORROWED_REF \\n __attribute__((cpychecker_returns_borrowed_ref))\n#else\n #define NPY_RETURNS_BORROWED_REF\n#endif\n\n#ifdef WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE\n #define NPY_STEALS_REF_TO_ARG(n) \\n __attribute__((cpychecker_steals_reference_to_arg(n)))\n#else\n #define NPY_STEALS_REF_TO_ARG(n)\n#endif\n\n/* 64 bit file position support, also on win-amd64. Issue gh-2256 */\n#if defined(_MSC_VER) && defined(_WIN64) && (_MSC_VER > 1400) || \\n defined(__MINGW32__) || defined(__MINGW64__)\n #include <io.h>\n\n #define npy_fseek _fseeki64\n #define npy_ftell _ftelli64\n #define npy_lseek _lseeki64\n #define npy_off_t npy_int64\n\n #if NPY_SIZEOF_INT == 8\n #define NPY_OFF_T_PYFMT "i"\n #elif NPY_SIZEOF_LONG == 8\n #define NPY_OFF_T_PYFMT "l"\n #elif NPY_SIZEOF_LONGLONG == 8\n #define NPY_OFF_T_PYFMT "L"\n #else\n #error Unsupported size for type off_t\n #endif\n#else\n#ifdef HAVE_FSEEKO\n #define npy_fseek fseeko\n#else\n #define npy_fseek fseek\n#endif\n#ifdef HAVE_FTELLO\n #define npy_ftell ftello\n#else\n #define npy_ftell ftell\n#endif\n #include <sys/types.h>\n #ifndef _WIN32\n #include <unistd.h>\n #endif\n #define npy_lseek lseek\n #define npy_off_t off_t\n\n #if NPY_SIZEOF_OFF_T == NPY_SIZEOF_SHORT\n #define NPY_OFF_T_PYFMT "h"\n #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_INT\n #define NPY_OFF_T_PYFMT "i"\n #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONG\n #define NPY_OFF_T_PYFMT "l"\n #elif NPY_SIZEOF_OFF_T == NPY_SIZEOF_LONGLONG\n #define NPY_OFF_T_PYFMT "L"\n #else\n #error Unsupported size for type off_t\n #endif\n#endif\n\n/* enums for detected endianness */\nenum {\n NPY_CPU_UNKNOWN_ENDIAN,\n NPY_CPU_LITTLE,\n NPY_CPU_BIG\n};\n\n/*\n * This is to typedef npy_intp to the appropriate size for Py_ssize_t.\n * (Before NumPy 2.0 we used Py_intptr_t and Py_uintptr_t from `pyport.h`.)\n */\ntypedef Py_ssize_t npy_intp;\ntypedef size_t npy_uintp;\n\n/*\n * Define sizes that were not defined in numpyconfig.h.\n */\n#define NPY_SIZEOF_CHAR 1\n#define NPY_SIZEOF_BYTE 1\n#define NPY_SIZEOF_DATETIME 8\n#define NPY_SIZEOF_TIMEDELTA 8\n#define NPY_SIZEOF_HALF 2\n#define NPY_SIZEOF_CFLOAT NPY_SIZEOF_COMPLEX_FLOAT\n#define NPY_SIZEOF_CDOUBLE NPY_SIZEOF_COMPLEX_DOUBLE\n#define NPY_SIZEOF_CLONGDOUBLE NPY_SIZEOF_COMPLEX_LONGDOUBLE\n\n#ifdef constchar\n#undef constchar\n#endif\n\n#define NPY_SSIZE_T_PYFMT "n"\n#define constchar char\n\n/* NPY_INTP_FMT Note:\n * Unlike the other NPY_*_FMT macros, which are used with PyOS_snprintf,\n * NPY_INTP_FMT is used with PyErr_Format and PyUnicode_FromFormat. Those\n * functions use different formatting codes that are portably specified\n * according to the Python documentation. See issue gh-2388.\n */\n#if NPY_SIZEOF_INTP == NPY_SIZEOF_LONG\n #define NPY_INTP NPY_LONG\n #define NPY_UINTP NPY_ULONG\n #define PyIntpArrType_Type PyLongArrType_Type\n #define PyUIntpArrType_Type PyULongArrType_Type\n #define NPY_MAX_INTP NPY_MAX_LONG\n #define NPY_MIN_INTP NPY_MIN_LONG\n #define NPY_MAX_UINTP NPY_MAX_ULONG\n #define NPY_INTP_FMT "ld"\n#elif NPY_SIZEOF_INTP == NPY_SIZEOF_INT\n #define NPY_INTP NPY_INT\n #define NPY_UINTP NPY_UINT\n #define PyIntpArrType_Type PyIntArrType_Type\n #define PyUIntpArrType_Type PyUIntArrType_Type\n #define NPY_MAX_INTP NPY_MAX_INT\n #define NPY_MIN_INTP NPY_MIN_INT\n #define NPY_MAX_UINTP NPY_MAX_UINT\n #define NPY_INTP_FMT "d"\n#elif defined(PY_LONG_LONG) && (NPY_SIZEOF_INTP == NPY_SIZEOF_LONGLONG)\n #define NPY_INTP NPY_LONGLONG\n #define NPY_UINTP NPY_ULONGLONG\n #define PyIntpArrType_Type PyLongLongArrType_Type\n #define PyUIntpArrType_Type PyULongLongArrType_Type\n #define NPY_MAX_INTP NPY_MAX_LONGLONG\n #define NPY_MIN_INTP NPY_MIN_LONGLONG\n #define NPY_MAX_UINTP NPY_MAX_ULONGLONG\n #define NPY_INTP_FMT "lld"\n#else\n #error "Failed to correctly define NPY_INTP and NPY_UINTP"\n#endif\n\n\n/*\n * Some platforms don't define bool, long long, or long double.\n * Handle that here.\n */\n#define NPY_BYTE_FMT "hhd"\n#define NPY_UBYTE_FMT "hhu"\n#define NPY_SHORT_FMT "hd"\n#define NPY_USHORT_FMT "hu"\n#define NPY_INT_FMT "d"\n#define NPY_UINT_FMT "u"\n#define NPY_LONG_FMT "ld"\n#define NPY_ULONG_FMT "lu"\n#define NPY_HALF_FMT "g"\n#define NPY_FLOAT_FMT "g"\n#define NPY_DOUBLE_FMT "g"\n\n\n#ifdef PY_LONG_LONG\ntypedef PY_LONG_LONG npy_longlong;\ntypedef unsigned PY_LONG_LONG npy_ulonglong;\n# ifdef _MSC_VER\n# define NPY_LONGLONG_FMT "I64d"\n# define NPY_ULONGLONG_FMT "I64u"\n# else\n# define NPY_LONGLONG_FMT "lld"\n# define NPY_ULONGLONG_FMT "llu"\n# endif\n# ifdef _MSC_VER\n# define NPY_LONGLONG_SUFFIX(x) (x##i64)\n# define NPY_ULONGLONG_SUFFIX(x) (x##Ui64)\n# else\n# define NPY_LONGLONG_SUFFIX(x) (x##LL)\n# define NPY_ULONGLONG_SUFFIX(x) (x##ULL)\n# endif\n#else\ntypedef long npy_longlong;\ntypedef unsigned long npy_ulonglong;\n# define NPY_LONGLONG_SUFFIX(x) (x##L)\n# define NPY_ULONGLONG_SUFFIX(x) (x##UL)\n#endif\n\n\ntypedef unsigned char npy_bool;\n#define NPY_FALSE 0\n#define NPY_TRUE 1\n/*\n * `NPY_SIZEOF_LONGDOUBLE` isn't usually equal to sizeof(long double).\n * In some certain cases, it may forced to be equal to sizeof(double)\n * even against the compiler implementation and the same goes for\n * `complex long double`.\n *\n * Therefore, avoid `long double`, use `npy_longdouble` instead,\n * and when it comes to standard math functions make sure of using\n * the double version when `NPY_SIZEOF_LONGDOUBLE` == `NPY_SIZEOF_DOUBLE`.\n * For example:\n * npy_longdouble *ptr, x;\n * #if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE\n * npy_longdouble r = modf(x, ptr);\n * #else\n * npy_longdouble r = modfl(x, ptr);\n * #endif\n *\n * See https://github.com/numpy/numpy/issues/20348\n */\n#if NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE\n #define NPY_LONGDOUBLE_FMT "g"\n #define longdouble_t double\n typedef double npy_longdouble;\n#else\n #define NPY_LONGDOUBLE_FMT "Lg"\n #define longdouble_t long double\n typedef long double npy_longdouble;\n#endif\n\n#ifndef Py_USING_UNICODE\n#error Must use Python with unicode enabled.\n#endif\n\n\ntypedef signed char npy_byte;\ntypedef unsigned char npy_ubyte;\ntypedef unsigned short npy_ushort;\ntypedef unsigned int npy_uint;\ntypedef unsigned long npy_ulong;\n\n/* These are for completeness */\ntypedef char npy_char;\ntypedef short npy_short;\ntypedef int npy_int;\ntypedef long npy_long;\ntypedef float npy_float;\ntypedef double npy_double;\n\ntypedef Py_hash_t npy_hash_t;\n#define NPY_SIZEOF_HASH_T NPY_SIZEOF_INTP\n\n#if defined(__cplusplus)\n\ntypedef struct\n{\n double _Val[2];\n} npy_cdouble;\n\ntypedef struct\n{\n float _Val[2];\n} npy_cfloat;\n\ntypedef struct\n{\n long double _Val[2];\n} npy_clongdouble;\n\n#else\n\n#include <complex.h>\n\n\n#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)\ntypedef _Dcomplex npy_cdouble;\ntypedef _Fcomplex npy_cfloat;\ntypedef _Lcomplex npy_clongdouble;\n#else /* !defined(_MSC_VER) || defined(__INTEL_COMPILER) */\ntypedef double _Complex npy_cdouble;\ntypedef float _Complex npy_cfloat;\ntypedef longdouble_t _Complex npy_clongdouble;\n#endif\n\n#endif\n\n/*\n * numarray-style bit-width typedefs\n */\n#define NPY_MAX_INT8 127\n#define NPY_MIN_INT8 -128\n#define NPY_MAX_UINT8 255\n#define NPY_MAX_INT16 32767\n#define NPY_MIN_INT16 -32768\n#define NPY_MAX_UINT16 65535\n#define NPY_MAX_INT32 2147483647\n#define NPY_MIN_INT32 (-NPY_MAX_INT32 - 1)\n#define NPY_MAX_UINT32 4294967295U\n#define NPY_MAX_INT64 NPY_LONGLONG_SUFFIX(9223372036854775807)\n#define NPY_MIN_INT64 (-NPY_MAX_INT64 - NPY_LONGLONG_SUFFIX(1))\n#define NPY_MAX_UINT64 NPY_ULONGLONG_SUFFIX(18446744073709551615)\n#define NPY_MAX_INT128 NPY_LONGLONG_SUFFIX(85070591730234615865843651857942052864)\n#define NPY_MIN_INT128 (-NPY_MAX_INT128 - NPY_LONGLONG_SUFFIX(1))\n#define NPY_MAX_UINT128 NPY_ULONGLONG_SUFFIX(170141183460469231731687303715884105728)\n#define NPY_MIN_DATETIME NPY_MIN_INT64\n#define NPY_MAX_DATETIME NPY_MAX_INT64\n#define NPY_MIN_TIMEDELTA NPY_MIN_INT64\n#define NPY_MAX_TIMEDELTA NPY_MAX_INT64\n\n /* Need to find the number of bits for each type and\n make definitions accordingly.\n\n C states that sizeof(char) == 1 by definition\n\n So, just using the sizeof keyword won't help.\n\n It also looks like Python itself uses sizeof(char) quite a\n bit, which by definition should be 1 all the time.\n\n Idea: Make Use of CHAR_BIT which should tell us how many\n BITS per CHARACTER\n */\n\n /* Include platform definitions -- These are in the C89/90 standard */\n#include <limits.h>\n#define NPY_MAX_BYTE SCHAR_MAX\n#define NPY_MIN_BYTE SCHAR_MIN\n#define NPY_MAX_UBYTE UCHAR_MAX\n#define NPY_MAX_SHORT SHRT_MAX\n#define NPY_MIN_SHORT SHRT_MIN\n#define NPY_MAX_USHORT USHRT_MAX\n#define NPY_MAX_INT INT_MAX\n#ifndef INT_MIN\n#define INT_MIN (-INT_MAX - 1)\n#endif\n#define NPY_MIN_INT INT_MIN\n#define NPY_MAX_UINT UINT_MAX\n#define NPY_MAX_LONG LONG_MAX\n#define NPY_MIN_LONG LONG_MIN\n#define NPY_MAX_ULONG ULONG_MAX\n\n#define NPY_BITSOF_BOOL (sizeof(npy_bool) * CHAR_BIT)\n#define NPY_BITSOF_CHAR CHAR_BIT\n#define NPY_BITSOF_BYTE (NPY_SIZEOF_BYTE * CHAR_BIT)\n#define NPY_BITSOF_SHORT (NPY_SIZEOF_SHORT * CHAR_BIT)\n#define NPY_BITSOF_INT (NPY_SIZEOF_INT * CHAR_BIT)\n#define NPY_BITSOF_LONG (NPY_SIZEOF_LONG * CHAR_BIT)\n#define NPY_BITSOF_LONGLONG (NPY_SIZEOF_LONGLONG * CHAR_BIT)\n#define NPY_BITSOF_INTP (NPY_SIZEOF_INTP * CHAR_BIT)\n#define NPY_BITSOF_HALF (NPY_SIZEOF_HALF * CHAR_BIT)\n#define NPY_BITSOF_FLOAT (NPY_SIZEOF_FLOAT * CHAR_BIT)\n#define NPY_BITSOF_DOUBLE (NPY_SIZEOF_DOUBLE * CHAR_BIT)\n#define NPY_BITSOF_LONGDOUBLE (NPY_SIZEOF_LONGDOUBLE * CHAR_BIT)\n#define NPY_BITSOF_CFLOAT (NPY_SIZEOF_CFLOAT * CHAR_BIT)\n#define NPY_BITSOF_CDOUBLE (NPY_SIZEOF_CDOUBLE * CHAR_BIT)\n#define NPY_BITSOF_CLONGDOUBLE (NPY_SIZEOF_CLONGDOUBLE * CHAR_BIT)\n#define NPY_BITSOF_DATETIME (NPY_SIZEOF_DATETIME * CHAR_BIT)\n#define NPY_BITSOF_TIMEDELTA (NPY_SIZEOF_TIMEDELTA * CHAR_BIT)\n\n#if NPY_BITSOF_LONG == 8\n#define NPY_INT8 NPY_LONG\n#define NPY_UINT8 NPY_ULONG\n typedef long npy_int8;\n typedef unsigned long npy_uint8;\n#define PyInt8ScalarObject PyLongScalarObject\n#define PyInt8ArrType_Type PyLongArrType_Type\n#define PyUInt8ScalarObject PyULongScalarObject\n#define PyUInt8ArrType_Type PyULongArrType_Type\n#define NPY_INT8_FMT NPY_LONG_FMT\n#define NPY_UINT8_FMT NPY_ULONG_FMT\n#elif NPY_BITSOF_LONG == 16\n#define NPY_INT16 NPY_LONG\n#define NPY_UINT16 NPY_ULONG\n typedef long npy_int16;\n typedef unsigned long npy_uint16;\n#define PyInt16ScalarObject PyLongScalarObject\n#define PyInt16ArrType_Type PyLongArrType_Type\n#define PyUInt16ScalarObject PyULongScalarObject\n#define PyUInt16ArrType_Type PyULongArrType_Type\n#define NPY_INT16_FMT NPY_LONG_FMT\n#define NPY_UINT16_FMT NPY_ULONG_FMT\n#elif NPY_BITSOF_LONG == 32\n#define NPY_INT32 NPY_LONG\n#define NPY_UINT32 NPY_ULONG\n typedef long npy_int32;\n typedef unsigned long npy_uint32;\n typedef unsigned long npy_ucs4;\n#define PyInt32ScalarObject PyLongScalarObject\n#define PyInt32ArrType_Type PyLongArrType_Type\n#define PyUInt32ScalarObject PyULongScalarObject\n#define PyUInt32ArrType_Type PyULongArrType_Type\n#define NPY_INT32_FMT NPY_LONG_FMT\n#define NPY_UINT32_FMT NPY_ULONG_FMT\n#elif NPY_BITSOF_LONG == 64\n#define NPY_INT64 NPY_LONG\n#define NPY_UINT64 NPY_ULONG\n typedef long npy_int64;\n typedef unsigned long npy_uint64;\n#define PyInt64ScalarObject PyLongScalarObject\n#define PyInt64ArrType_Type PyLongArrType_Type\n#define PyUInt64ScalarObject PyULongScalarObject\n#define PyUInt64ArrType_Type PyULongArrType_Type\n#define NPY_INT64_FMT NPY_LONG_FMT\n#define NPY_UINT64_FMT NPY_ULONG_FMT\n#define MyPyLong_FromInt64 PyLong_FromLong\n#define MyPyLong_AsInt64 PyLong_AsLong\n#endif\n\n#if NPY_BITSOF_LONGLONG == 8\n# ifndef NPY_INT8\n# define NPY_INT8 NPY_LONGLONG\n# define NPY_UINT8 NPY_ULONGLONG\n typedef npy_longlong npy_int8;\n typedef npy_ulonglong npy_uint8;\n# define PyInt8ScalarObject PyLongLongScalarObject\n# define PyInt8ArrType_Type PyLongLongArrType_Type\n# define PyUInt8ScalarObject PyULongLongScalarObject\n# define PyUInt8ArrType_Type PyULongLongArrType_Type\n#define NPY_INT8_FMT NPY_LONGLONG_FMT\n#define NPY_UINT8_FMT NPY_ULONGLONG_FMT\n# endif\n# define NPY_MAX_LONGLONG NPY_MAX_INT8\n# define NPY_MIN_LONGLONG NPY_MIN_INT8\n# define NPY_MAX_ULONGLONG NPY_MAX_UINT8\n#elif NPY_BITSOF_LONGLONG == 16\n# ifndef NPY_INT16\n# define NPY_INT16 NPY_LONGLONG\n# define NPY_UINT16 NPY_ULONGLONG\n typedef npy_longlong npy_int16;\n typedef npy_ulonglong npy_uint16;\n# define PyInt16ScalarObject PyLongLongScalarObject\n# define PyInt16ArrType_Type PyLongLongArrType_Type\n# define PyUInt16ScalarObject PyULongLongScalarObject\n# define PyUInt16ArrType_Type PyULongLongArrType_Type\n#define NPY_INT16_FMT NPY_LONGLONG_FMT\n#define NPY_UINT16_FMT NPY_ULONGLONG_FMT\n# endif\n# define NPY_MAX_LONGLONG NPY_MAX_INT16\n# define NPY_MIN_LONGLONG NPY_MIN_INT16\n# define NPY_MAX_ULONGLONG NPY_MAX_UINT16\n#elif NPY_BITSOF_LONGLONG == 32\n# ifndef NPY_INT32\n# define NPY_INT32 NPY_LONGLONG\n# define NPY_UINT32 NPY_ULONGLONG\n typedef npy_longlong npy_int32;\n typedef npy_ulonglong npy_uint32;\n typedef npy_ulonglong npy_ucs4;\n# define PyInt32ScalarObject PyLongLongScalarObject\n# define PyInt32ArrType_Type PyLongLongArrType_Type\n# define PyUInt32ScalarObject PyULongLongScalarObject\n# define PyUInt32ArrType_Type PyULongLongArrType_Type\n#define NPY_INT32_FMT NPY_LONGLONG_FMT\n#define NPY_UINT32_FMT NPY_ULONGLONG_FMT\n# endif\n# define NPY_MAX_LONGLONG NPY_MAX_INT32\n# define NPY_MIN_LONGLONG NPY_MIN_INT32\n# define NPY_MAX_ULONGLONG NPY_MAX_UINT32\n#elif NPY_BITSOF_LONGLONG == 64\n# ifndef NPY_INT64\n# define NPY_INT64 NPY_LONGLONG\n# define NPY_UINT64 NPY_ULONGLONG\n typedef npy_longlong npy_int64;\n typedef npy_ulonglong npy_uint64;\n# define PyInt64ScalarObject PyLongLongScalarObject\n# define PyInt64ArrType_Type PyLongLongArrType_Type\n# define PyUInt64ScalarObject PyULongLongScalarObject\n# define PyUInt64ArrType_Type PyULongLongArrType_Type\n#define NPY_INT64_FMT NPY_LONGLONG_FMT\n#define NPY_UINT64_FMT NPY_ULONGLONG_FMT\n# define MyPyLong_FromInt64 PyLong_FromLongLong\n# define MyPyLong_AsInt64 PyLong_AsLongLong\n# endif\n# define NPY_MAX_LONGLONG NPY_MAX_INT64\n# define NPY_MIN_LONGLONG NPY_MIN_INT64\n# define NPY_MAX_ULONGLONG NPY_MAX_UINT64\n#endif\n\n#if NPY_BITSOF_INT == 8\n#ifndef NPY_INT8\n#define NPY_INT8 NPY_INT\n#define NPY_UINT8 NPY_UINT\n typedef int npy_int8;\n typedef unsigned int npy_uint8;\n# define PyInt8ScalarObject PyIntScalarObject\n# define PyInt8ArrType_Type PyIntArrType_Type\n# define PyUInt8ScalarObject PyUIntScalarObject\n# define PyUInt8ArrType_Type PyUIntArrType_Type\n#define NPY_INT8_FMT NPY_INT_FMT\n#define NPY_UINT8_FMT NPY_UINT_FMT\n#endif\n#elif NPY_BITSOF_INT == 16\n#ifndef NPY_INT16\n#define NPY_INT16 NPY_INT\n#define NPY_UINT16 NPY_UINT\n typedef int npy_int16;\n typedef unsigned int npy_uint16;\n# define PyInt16ScalarObject PyIntScalarObject\n# define PyInt16ArrType_Type PyIntArrType_Type\n# define PyUInt16ScalarObject PyIntUScalarObject\n# define PyUInt16ArrType_Type PyIntUArrType_Type\n#define NPY_INT16_FMT NPY_INT_FMT\n#define NPY_UINT16_FMT NPY_UINT_FMT\n#endif\n#elif NPY_BITSOF_INT == 32\n#ifndef NPY_INT32\n#define NPY_INT32 NPY_INT\n#define NPY_UINT32 NPY_UINT\n typedef int npy_int32;\n typedef unsigned int npy_uint32;\n typedef unsigned int npy_ucs4;\n# define PyInt32ScalarObject PyIntScalarObject\n# define PyInt32ArrType_Type PyIntArrType_Type\n# define PyUInt32ScalarObject PyUIntScalarObject\n# define PyUInt32ArrType_Type PyUIntArrType_Type\n#define NPY_INT32_FMT NPY_INT_FMT\n#define NPY_UINT32_FMT NPY_UINT_FMT\n#endif\n#elif NPY_BITSOF_INT == 64\n#ifndef NPY_INT64\n#define NPY_INT64 NPY_INT\n#define NPY_UINT64 NPY_UINT\n typedef int npy_int64;\n typedef unsigned int npy_uint64;\n# define PyInt64ScalarObject PyIntScalarObject\n# define PyInt64ArrType_Type PyIntArrType_Type\n# define PyUInt64ScalarObject PyUIntScalarObject\n# define PyUInt64ArrType_Type PyUIntArrType_Type\n#define NPY_INT64_FMT NPY_INT_FMT\n#define NPY_UINT64_FMT NPY_UINT_FMT\n# define MyPyLong_FromInt64 PyLong_FromLong\n# define MyPyLong_AsInt64 PyLong_AsLong\n#endif\n#endif\n\n#if NPY_BITSOF_SHORT == 8\n#ifndef NPY_INT8\n#define NPY_INT8 NPY_SHORT\n#define NPY_UINT8 NPY_USHORT\n typedef short npy_int8;\n typedef unsigned short npy_uint8;\n# define PyInt8ScalarObject PyShortScalarObject\n# define PyInt8ArrType_Type PyShortArrType_Type\n# define PyUInt8ScalarObject PyUShortScalarObject\n# define PyUInt8ArrType_Type PyUShortArrType_Type\n#define NPY_INT8_FMT NPY_SHORT_FMT\n#define NPY_UINT8_FMT NPY_USHORT_FMT\n#endif\n#elif NPY_BITSOF_SHORT == 16\n#ifndef NPY_INT16\n#define NPY_INT16 NPY_SHORT\n#define NPY_UINT16 NPY_USHORT\n typedef short npy_int16;\n typedef unsigned short npy_uint16;\n# define PyInt16ScalarObject PyShortScalarObject\n# define PyInt16ArrType_Type PyShortArrType_Type\n# define PyUInt16ScalarObject PyUShortScalarObject\n# define PyUInt16ArrType_Type PyUShortArrType_Type\n#define NPY_INT16_FMT NPY_SHORT_FMT\n#define NPY_UINT16_FMT NPY_USHORT_FMT\n#endif\n#elif NPY_BITSOF_SHORT == 32\n#ifndef NPY_INT32\n#define NPY_INT32 NPY_SHORT\n#define NPY_UINT32 NPY_USHORT\n typedef short npy_int32;\n typedef unsigned short npy_uint32;\n typedef unsigned short npy_ucs4;\n# define PyInt32ScalarObject PyShortScalarObject\n# define PyInt32ArrType_Type PyShortArrType_Type\n# define PyUInt32ScalarObject PyUShortScalarObject\n# define PyUInt32ArrType_Type PyUShortArrType_Type\n#define NPY_INT32_FMT NPY_SHORT_FMT\n#define NPY_UINT32_FMT NPY_USHORT_FMT\n#endif\n#elif NPY_BITSOF_SHORT == 64\n#ifndef NPY_INT64\n#define NPY_INT64 NPY_SHORT\n#define NPY_UINT64 NPY_USHORT\n typedef short npy_int64;\n typedef unsigned short npy_uint64;\n# define PyInt64ScalarObject PyShortScalarObject\n# define PyInt64ArrType_Type PyShortArrType_Type\n# define PyUInt64ScalarObject PyUShortScalarObject\n# define PyUInt64ArrType_Type PyUShortArrType_Type\n#define NPY_INT64_FMT NPY_SHORT_FMT\n#define NPY_UINT64_FMT NPY_USHORT_FMT\n# define MyPyLong_FromInt64 PyLong_FromLong\n# define MyPyLong_AsInt64 PyLong_AsLong\n#endif\n#endif\n\n\n#if NPY_BITSOF_CHAR == 8\n#ifndef NPY_INT8\n#define NPY_INT8 NPY_BYTE\n#define NPY_UINT8 NPY_UBYTE\n typedef signed char npy_int8;\n typedef unsigned char npy_uint8;\n# define PyInt8ScalarObject PyByteScalarObject\n# define PyInt8ArrType_Type PyByteArrType_Type\n# define PyUInt8ScalarObject PyUByteScalarObject\n# define PyUInt8ArrType_Type PyUByteArrType_Type\n#define NPY_INT8_FMT NPY_BYTE_FMT\n#define NPY_UINT8_FMT NPY_UBYTE_FMT\n#endif\n#elif NPY_BITSOF_CHAR == 16\n#ifndef NPY_INT16\n#define NPY_INT16 NPY_BYTE\n#define NPY_UINT16 NPY_UBYTE\n typedef signed char npy_int16;\n typedef unsigned char npy_uint16;\n# define PyInt16ScalarObject PyByteScalarObject\n# define PyInt16ArrType_Type PyByteArrType_Type\n# define PyUInt16ScalarObject PyUByteScalarObject\n# define PyUInt16ArrType_Type PyUByteArrType_Type\n#define NPY_INT16_FMT NPY_BYTE_FMT\n#define NPY_UINT16_FMT NPY_UBYTE_FMT\n#endif\n#elif NPY_BITSOF_CHAR == 32\n#ifndef NPY_INT32\n#define NPY_INT32 NPY_BYTE\n#define NPY_UINT32 NPY_UBYTE\n typedef signed char npy_int32;\n typedef unsigned char npy_uint32;\n typedef unsigned char npy_ucs4;\n# define PyInt32ScalarObject PyByteScalarObject\n# define PyInt32ArrType_Type PyByteArrType_Type\n# define PyUInt32ScalarObject PyUByteScalarObject\n# define PyUInt32ArrType_Type PyUByteArrType_Type\n#define NPY_INT32_FMT NPY_BYTE_FMT\n#define NPY_UINT32_FMT NPY_UBYTE_FMT\n#endif\n#elif NPY_BITSOF_CHAR == 64\n#ifndef NPY_INT64\n#define NPY_INT64 NPY_BYTE\n#define NPY_UINT64 NPY_UBYTE\n typedef signed char npy_int64;\n typedef unsigned char npy_uint64;\n# define PyInt64ScalarObject PyByteScalarObject\n# define PyInt64ArrType_Type PyByteArrType_Type\n# define PyUInt64ScalarObject PyUByteScalarObject\n# define PyUInt64ArrType_Type PyUByteArrType_Type\n#define NPY_INT64_FMT NPY_BYTE_FMT\n#define NPY_UINT64_FMT NPY_UBYTE_FMT\n# define MyPyLong_FromInt64 PyLong_FromLong\n# define MyPyLong_AsInt64 PyLong_AsLong\n#endif\n#elif NPY_BITSOF_CHAR == 128\n#endif\n\n\n\n#if NPY_BITSOF_DOUBLE == 32\n#ifndef NPY_FLOAT32\n#define NPY_FLOAT32 NPY_DOUBLE\n#define NPY_COMPLEX64 NPY_CDOUBLE\n typedef double npy_float32;\n typedef npy_cdouble npy_complex64;\n# define PyFloat32ScalarObject PyDoubleScalarObject\n# define PyComplex64ScalarObject PyCDoubleScalarObject\n# define PyFloat32ArrType_Type PyDoubleArrType_Type\n# define PyComplex64ArrType_Type PyCDoubleArrType_Type\n#define NPY_FLOAT32_FMT NPY_DOUBLE_FMT\n#define NPY_COMPLEX64_FMT NPY_CDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_DOUBLE == 64\n#ifndef NPY_FLOAT64\n#define NPY_FLOAT64 NPY_DOUBLE\n#define NPY_COMPLEX128 NPY_CDOUBLE\n typedef double npy_float64;\n typedef npy_cdouble npy_complex128;\n# define PyFloat64ScalarObject PyDoubleScalarObject\n# define PyComplex128ScalarObject PyCDoubleScalarObject\n# define PyFloat64ArrType_Type PyDoubleArrType_Type\n# define PyComplex128ArrType_Type PyCDoubleArrType_Type\n#define NPY_FLOAT64_FMT NPY_DOUBLE_FMT\n#define NPY_COMPLEX128_FMT NPY_CDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_DOUBLE == 80\n#ifndef NPY_FLOAT80\n#define NPY_FLOAT80 NPY_DOUBLE\n#define NPY_COMPLEX160 NPY_CDOUBLE\n typedef double npy_float80;\n typedef npy_cdouble npy_complex160;\n# define PyFloat80ScalarObject PyDoubleScalarObject\n# define PyComplex160ScalarObject PyCDoubleScalarObject\n# define PyFloat80ArrType_Type PyDoubleArrType_Type\n# define PyComplex160ArrType_Type PyCDoubleArrType_Type\n#define NPY_FLOAT80_FMT NPY_DOUBLE_FMT\n#define NPY_COMPLEX160_FMT NPY_CDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_DOUBLE == 96\n#ifndef NPY_FLOAT96\n#define NPY_FLOAT96 NPY_DOUBLE\n#define NPY_COMPLEX192 NPY_CDOUBLE\n typedef double npy_float96;\n typedef npy_cdouble npy_complex192;\n# define PyFloat96ScalarObject PyDoubleScalarObject\n# define PyComplex192ScalarObject PyCDoubleScalarObject\n# define PyFloat96ArrType_Type PyDoubleArrType_Type\n# define PyComplex192ArrType_Type PyCDoubleArrType_Type\n#define NPY_FLOAT96_FMT NPY_DOUBLE_FMT\n#define NPY_COMPLEX192_FMT NPY_CDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_DOUBLE == 128\n#ifndef NPY_FLOAT128\n#define NPY_FLOAT128 NPY_DOUBLE\n#define NPY_COMPLEX256 NPY_CDOUBLE\n typedef double npy_float128;\n typedef npy_cdouble npy_complex256;\n# define PyFloat128ScalarObject PyDoubleScalarObject\n# define PyComplex256ScalarObject PyCDoubleScalarObject\n# define PyFloat128ArrType_Type PyDoubleArrType_Type\n# define PyComplex256ArrType_Type PyCDoubleArrType_Type\n#define NPY_FLOAT128_FMT NPY_DOUBLE_FMT\n#define NPY_COMPLEX256_FMT NPY_CDOUBLE_FMT\n#endif\n#endif\n\n\n\n#if NPY_BITSOF_FLOAT == 32\n#ifndef NPY_FLOAT32\n#define NPY_FLOAT32 NPY_FLOAT\n#define NPY_COMPLEX64 NPY_CFLOAT\n typedef float npy_float32;\n typedef npy_cfloat npy_complex64;\n# define PyFloat32ScalarObject PyFloatScalarObject\n# define PyComplex64ScalarObject PyCFloatScalarObject\n# define PyFloat32ArrType_Type PyFloatArrType_Type\n# define PyComplex64ArrType_Type PyCFloatArrType_Type\n#define NPY_FLOAT32_FMT NPY_FLOAT_FMT\n#define NPY_COMPLEX64_FMT NPY_CFLOAT_FMT\n#endif\n#elif NPY_BITSOF_FLOAT == 64\n#ifndef NPY_FLOAT64\n#define NPY_FLOAT64 NPY_FLOAT\n#define NPY_COMPLEX128 NPY_CFLOAT\n typedef float npy_float64;\n typedef npy_cfloat npy_complex128;\n# define PyFloat64ScalarObject PyFloatScalarObject\n# define PyComplex128ScalarObject PyCFloatScalarObject\n# define PyFloat64ArrType_Type PyFloatArrType_Type\n# define PyComplex128ArrType_Type PyCFloatArrType_Type\n#define NPY_FLOAT64_FMT NPY_FLOAT_FMT\n#define NPY_COMPLEX128_FMT NPY_CFLOAT_FMT\n#endif\n#elif NPY_BITSOF_FLOAT == 80\n#ifndef NPY_FLOAT80\n#define NPY_FLOAT80 NPY_FLOAT\n#define NPY_COMPLEX160 NPY_CFLOAT\n typedef float npy_float80;\n typedef npy_cfloat npy_complex160;\n# define PyFloat80ScalarObject PyFloatScalarObject\n# define PyComplex160ScalarObject PyCFloatScalarObject\n# define PyFloat80ArrType_Type PyFloatArrType_Type\n# define PyComplex160ArrType_Type PyCFloatArrType_Type\n#define NPY_FLOAT80_FMT NPY_FLOAT_FMT\n#define NPY_COMPLEX160_FMT NPY_CFLOAT_FMT\n#endif\n#elif NPY_BITSOF_FLOAT == 96\n#ifndef NPY_FLOAT96\n#define NPY_FLOAT96 NPY_FLOAT\n#define NPY_COMPLEX192 NPY_CFLOAT\n typedef float npy_float96;\n typedef npy_cfloat npy_complex192;\n# define PyFloat96ScalarObject PyFloatScalarObject\n# define PyComplex192ScalarObject PyCFloatScalarObject\n# define PyFloat96ArrType_Type PyFloatArrType_Type\n# define PyComplex192ArrType_Type PyCFloatArrType_Type\n#define NPY_FLOAT96_FMT NPY_FLOAT_FMT\n#define NPY_COMPLEX192_FMT NPY_CFLOAT_FMT\n#endif\n#elif NPY_BITSOF_FLOAT == 128\n#ifndef NPY_FLOAT128\n#define NPY_FLOAT128 NPY_FLOAT\n#define NPY_COMPLEX256 NPY_CFLOAT\n typedef float npy_float128;\n typedef npy_cfloat npy_complex256;\n# define PyFloat128ScalarObject PyFloatScalarObject\n# define PyComplex256ScalarObject PyCFloatScalarObject\n# define PyFloat128ArrType_Type PyFloatArrType_Type\n# define PyComplex256ArrType_Type PyCFloatArrType_Type\n#define NPY_FLOAT128_FMT NPY_FLOAT_FMT\n#define NPY_COMPLEX256_FMT NPY_CFLOAT_FMT\n#endif\n#endif\n\n/* half/float16 isn't a floating-point type in C */\n#define NPY_FLOAT16 NPY_HALF\ntypedef npy_uint16 npy_half;\ntypedef npy_half npy_float16;\n\n#if NPY_BITSOF_LONGDOUBLE == 32\n#ifndef NPY_FLOAT32\n#define NPY_FLOAT32 NPY_LONGDOUBLE\n#define NPY_COMPLEX64 NPY_CLONGDOUBLE\n typedef npy_longdouble npy_float32;\n typedef npy_clongdouble npy_complex64;\n# define PyFloat32ScalarObject PyLongDoubleScalarObject\n# define PyComplex64ScalarObject PyCLongDoubleScalarObject\n# define PyFloat32ArrType_Type PyLongDoubleArrType_Type\n# define PyComplex64ArrType_Type PyCLongDoubleArrType_Type\n#define NPY_FLOAT32_FMT NPY_LONGDOUBLE_FMT\n#define NPY_COMPLEX64_FMT NPY_CLONGDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_LONGDOUBLE == 64\n#ifndef NPY_FLOAT64\n#define NPY_FLOAT64 NPY_LONGDOUBLE\n#define NPY_COMPLEX128 NPY_CLONGDOUBLE\n typedef npy_longdouble npy_float64;\n typedef npy_clongdouble npy_complex128;\n# define PyFloat64ScalarObject PyLongDoubleScalarObject\n# define PyComplex128ScalarObject PyCLongDoubleScalarObject\n# define PyFloat64ArrType_Type PyLongDoubleArrType_Type\n# define PyComplex128ArrType_Type PyCLongDoubleArrType_Type\n#define NPY_FLOAT64_FMT NPY_LONGDOUBLE_FMT\n#define NPY_COMPLEX128_FMT NPY_CLONGDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_LONGDOUBLE == 80\n#ifndef NPY_FLOAT80\n#define NPY_FLOAT80 NPY_LONGDOUBLE\n#define NPY_COMPLEX160 NPY_CLONGDOUBLE\n typedef npy_longdouble npy_float80;\n typedef npy_clongdouble npy_complex160;\n# define PyFloat80ScalarObject PyLongDoubleScalarObject\n# define PyComplex160ScalarObject PyCLongDoubleScalarObject\n# define PyFloat80ArrType_Type PyLongDoubleArrType_Type\n# define PyComplex160ArrType_Type PyCLongDoubleArrType_Type\n#define NPY_FLOAT80_FMT NPY_LONGDOUBLE_FMT\n#define NPY_COMPLEX160_FMT NPY_CLONGDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_LONGDOUBLE == 96\n#ifndef NPY_FLOAT96\n#define NPY_FLOAT96 NPY_LONGDOUBLE\n#define NPY_COMPLEX192 NPY_CLONGDOUBLE\n typedef npy_longdouble npy_float96;\n typedef npy_clongdouble npy_complex192;\n# define PyFloat96ScalarObject PyLongDoubleScalarObject\n# define PyComplex192ScalarObject PyCLongDoubleScalarObject\n# define PyFloat96ArrType_Type PyLongDoubleArrType_Type\n# define PyComplex192ArrType_Type PyCLongDoubleArrType_Type\n#define NPY_FLOAT96_FMT NPY_LONGDOUBLE_FMT\n#define NPY_COMPLEX192_FMT NPY_CLONGDOUBLE_FMT\n#endif\n#elif NPY_BITSOF_LONGDOUBLE == 128\n#ifndef NPY_FLOAT128\n#define NPY_FLOAT128 NPY_LONGDOUBLE\n#define NPY_COMPLEX256 NPY_CLONGDOUBLE\n typedef npy_longdouble npy_float128;\n typedef npy_clongdouble npy_complex256;\n# define PyFloat128ScalarObject PyLongDoubleScalarObject\n# define PyComplex256ScalarObject PyCLongDoubleScalarObject\n# define PyFloat128ArrType_Type PyLongDoubleArrType_Type\n# define PyComplex256ArrType_Type PyCLongDoubleArrType_Type\n#define NPY_FLOAT128_FMT NPY_LONGDOUBLE_FMT\n#define NPY_COMPLEX256_FMT NPY_CLONGDOUBLE_FMT\n#endif\n#endif\n\n/* datetime typedefs */\ntypedef npy_int64 npy_timedelta;\ntypedef npy_int64 npy_datetime;\n#define NPY_DATETIME_FMT NPY_INT64_FMT\n#define NPY_TIMEDELTA_FMT NPY_INT64_FMT\n\n/* End of typedefs for numarray style bit-width names */\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_COMMON_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_common.h
npy_common.h
C
33,563
0.95
0.03173
0.849119
vue-tools
386
2024-07-23T06:24:53.023642
BSD-3-Clause
false
4fecb0c4aa3a61894dbeabbfe870e12d
/*\n * This set (target) cpu specific macros:\n * - Possible values:\n * NPY_CPU_X86\n * NPY_CPU_AMD64\n * NPY_CPU_PPC\n * NPY_CPU_PPC64\n * NPY_CPU_PPC64LE\n * NPY_CPU_SPARC\n * NPY_CPU_S390\n * NPY_CPU_IA64\n * NPY_CPU_HPPA\n * NPY_CPU_ALPHA\n * NPY_CPU_ARMEL\n * NPY_CPU_ARMEB\n * NPY_CPU_SH_LE\n * NPY_CPU_SH_BE\n * NPY_CPU_ARCEL\n * NPY_CPU_ARCEB\n * NPY_CPU_RISCV64\n * NPY_CPU_RISCV32\n * NPY_CPU_LOONGARCH\n * NPY_CPU_WASM\n */\n#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_\n\n#include "numpyconfig.h"\n\n#if defined( __i386__ ) || defined(i386) || defined(_M_IX86)\n /*\n * __i386__ is defined by gcc and Intel compiler on Linux,\n * _M_IX86 by VS compiler,\n * i386 by Sun compilers on opensolaris at least\n */\n #define NPY_CPU_X86\n#elif defined(__x86_64__) || defined(__amd64__) || defined(__x86_64) || defined(_M_AMD64)\n /*\n * both __x86_64__ and __amd64__ are defined by gcc\n * __x86_64 defined by sun compiler on opensolaris at least\n * _M_AMD64 defined by MS compiler\n */\n #define NPY_CPU_AMD64\n#elif defined(__powerpc64__) && defined(__LITTLE_ENDIAN__)\n #define NPY_CPU_PPC64LE\n#elif defined(__powerpc64__) && defined(__BIG_ENDIAN__)\n #define NPY_CPU_PPC64\n#elif defined(__ppc__) || defined(__powerpc__) || defined(_ARCH_PPC)\n /*\n * __ppc__ is defined by gcc, I remember having seen __powerpc__ once,\n * but can't find it ATM\n * _ARCH_PPC is used by at least gcc on AIX\n * As __powerpc__ and _ARCH_PPC are also defined by PPC64 check\n * for those specifically first before defaulting to ppc\n */\n #define NPY_CPU_PPC\n#elif defined(__sparc__) || defined(__sparc)\n /* __sparc__ is defined by gcc and Forte (e.g. Sun) compilers */\n #define NPY_CPU_SPARC\n#elif defined(__s390__)\n #define NPY_CPU_S390\n#elif defined(__ia64)\n #define NPY_CPU_IA64\n#elif defined(__hppa)\n #define NPY_CPU_HPPA\n#elif defined(__alpha__)\n #define NPY_CPU_ALPHA\n#elif defined(__arm__) || defined(__aarch64__) || defined(_M_ARM64)\n /* _M_ARM64 is defined in MSVC for ARM64 compilation on Windows */\n #if defined(__ARMEB__) || defined(__AARCH64EB__)\n #if defined(__ARM_32BIT_STATE)\n #define NPY_CPU_ARMEB_AARCH32\n #elif defined(__ARM_64BIT_STATE)\n #define NPY_CPU_ARMEB_AARCH64\n #else\n #define NPY_CPU_ARMEB\n #endif\n #elif defined(__ARMEL__) || defined(__AARCH64EL__) || defined(_M_ARM64)\n #if defined(__ARM_32BIT_STATE)\n #define NPY_CPU_ARMEL_AARCH32\n #elif defined(__ARM_64BIT_STATE) || defined(_M_ARM64) || defined(__AARCH64EL__)\n #define NPY_CPU_ARMEL_AARCH64\n #else\n #define NPY_CPU_ARMEL\n #endif\n #else\n # error Unknown ARM CPU, please report this to numpy maintainers with \\n information about your platform (OS, CPU and compiler)\n #endif\n#elif defined(__sh__) && defined(__LITTLE_ENDIAN__)\n #define NPY_CPU_SH_LE\n#elif defined(__sh__) && defined(__BIG_ENDIAN__)\n #define NPY_CPU_SH_BE\n#elif defined(__MIPSEL__)\n #define NPY_CPU_MIPSEL\n#elif defined(__MIPSEB__)\n #define NPY_CPU_MIPSEB\n#elif defined(__or1k__)\n #define NPY_CPU_OR1K\n#elif defined(__mc68000__)\n #define NPY_CPU_M68K\n#elif defined(__arc__) && defined(__LITTLE_ENDIAN__)\n #define NPY_CPU_ARCEL\n#elif defined(__arc__) && defined(__BIG_ENDIAN__)\n #define NPY_CPU_ARCEB\n#elif defined(__riscv)\n #if __riscv_xlen == 64\n #define NPY_CPU_RISCV64\n #elif __riscv_xlen == 32\n #define NPY_CPU_RISCV32\n #endif\n#elif defined(__loongarch_lp64)\n #define NPY_CPU_LOONGARCH64\n#elif defined(__EMSCRIPTEN__)\n /* __EMSCRIPTEN__ is defined by emscripten: an LLVM-to-Web compiler */\n #define NPY_CPU_WASM\n#else\n #error Unknown CPU, please report this to numpy maintainers with \\n information about your platform (OS, CPU and compiler)\n#endif\n\n#define NPY_ALIGNMENT_REQUIRED 1\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_CPU_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_cpu.h
npy_cpu.h
C
4,349
0.95
0.056452
0.983333
vue-tools
505
2024-06-20T17:46:13.034579
Apache-2.0
false
de54f9ac02af7b10a1098bd3dada1d6e
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_\n\n/*\n * NPY_BYTE_ORDER is set to the same value as BYTE_ORDER set by glibc in\n * endian.h\n */\n\n#if defined(NPY_HAVE_ENDIAN_H) || defined(NPY_HAVE_SYS_ENDIAN_H)\n /* Use endian.h if available */\n\n #if defined(NPY_HAVE_ENDIAN_H)\n #include <endian.h>\n #elif defined(NPY_HAVE_SYS_ENDIAN_H)\n #include <sys/endian.h>\n #endif\n\n #if defined(BYTE_ORDER) && defined(BIG_ENDIAN) && defined(LITTLE_ENDIAN)\n #define NPY_BYTE_ORDER BYTE_ORDER\n #define NPY_LITTLE_ENDIAN LITTLE_ENDIAN\n #define NPY_BIG_ENDIAN BIG_ENDIAN\n #elif defined(_BYTE_ORDER) && defined(_BIG_ENDIAN) && defined(_LITTLE_ENDIAN)\n #define NPY_BYTE_ORDER _BYTE_ORDER\n #define NPY_LITTLE_ENDIAN _LITTLE_ENDIAN\n #define NPY_BIG_ENDIAN _BIG_ENDIAN\n #elif defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && defined(__LITTLE_ENDIAN)\n #define NPY_BYTE_ORDER __BYTE_ORDER\n #define NPY_LITTLE_ENDIAN __LITTLE_ENDIAN\n #define NPY_BIG_ENDIAN __BIG_ENDIAN\n #endif\n#endif\n\n#ifndef NPY_BYTE_ORDER\n /* Set endianness info using target CPU */\n #include "npy_cpu.h"\n\n #define NPY_LITTLE_ENDIAN 1234\n #define NPY_BIG_ENDIAN 4321\n\n #if defined(NPY_CPU_X86) \\n || defined(NPY_CPU_AMD64) \\n || defined(NPY_CPU_IA64) \\n || defined(NPY_CPU_ALPHA) \\n || defined(NPY_CPU_ARMEL) \\n || defined(NPY_CPU_ARMEL_AARCH32) \\n || defined(NPY_CPU_ARMEL_AARCH64) \\n || defined(NPY_CPU_SH_LE) \\n || defined(NPY_CPU_MIPSEL) \\n || defined(NPY_CPU_PPC64LE) \\n || defined(NPY_CPU_ARCEL) \\n || defined(NPY_CPU_RISCV64) \\n || defined(NPY_CPU_RISCV32) \\n || defined(NPY_CPU_LOONGARCH) \\n || defined(NPY_CPU_WASM)\n #define NPY_BYTE_ORDER NPY_LITTLE_ENDIAN\n\n #elif defined(NPY_CPU_PPC) \\n || defined(NPY_CPU_SPARC) \\n || defined(NPY_CPU_S390) \\n || defined(NPY_CPU_HPPA) \\n || defined(NPY_CPU_PPC64) \\n || defined(NPY_CPU_ARMEB) \\n || defined(NPY_CPU_ARMEB_AARCH32) \\n || defined(NPY_CPU_ARMEB_AARCH64) \\n || defined(NPY_CPU_SH_BE) \\n || defined(NPY_CPU_MIPSEB) \\n || defined(NPY_CPU_OR1K) \\n || defined(NPY_CPU_M68K) \\n || defined(NPY_CPU_ARCEB)\n #define NPY_BYTE_ORDER NPY_BIG_ENDIAN\n\n #else\n #error Unknown CPU: can not set endianness\n #endif\n\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_ENDIAN_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_endian.h
npy_endian.h
C
2,912
0.95
0.064103
0.61194
awesome-app
513
2024-04-17T03:08:09.316170
Apache-2.0
false
46484be9ec6b29b73f863017eb51e8fd
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_\n\n#include <numpy/npy_common.h>\n\n#include <math.h>\n\n/* By adding static inline specifiers to npy_math function definitions when\n appropriate, compiler is given the opportunity to optimize */\n#if NPY_INLINE_MATH\n#define NPY_INPLACE static inline\n#else\n#define NPY_INPLACE\n#endif\n\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n#define PyArray_MAX(a,b) (((a)>(b))?(a):(b))\n#define PyArray_MIN(a,b) (((a)<(b))?(a):(b))\n\n/*\n * NAN and INFINITY like macros (same behavior as glibc for NAN, same as C99\n * for INFINITY)\n *\n * XXX: I should test whether INFINITY and NAN are available on the platform\n */\nstatic inline float __npy_inff(void)\n{\n const union { npy_uint32 __i; float __f;} __bint = {0x7f800000UL};\n return __bint.__f;\n}\n\nstatic inline float __npy_nanf(void)\n{\n const union { npy_uint32 __i; float __f;} __bint = {0x7fc00000UL};\n return __bint.__f;\n}\n\nstatic inline float __npy_pzerof(void)\n{\n const union { npy_uint32 __i; float __f;} __bint = {0x00000000UL};\n return __bint.__f;\n}\n\nstatic inline float __npy_nzerof(void)\n{\n const union { npy_uint32 __i; float __f;} __bint = {0x80000000UL};\n return __bint.__f;\n}\n\n#define NPY_INFINITYF __npy_inff()\n#define NPY_NANF __npy_nanf()\n#define NPY_PZEROF __npy_pzerof()\n#define NPY_NZEROF __npy_nzerof()\n\n#define NPY_INFINITY ((npy_double)NPY_INFINITYF)\n#define NPY_NAN ((npy_double)NPY_NANF)\n#define NPY_PZERO ((npy_double)NPY_PZEROF)\n#define NPY_NZERO ((npy_double)NPY_NZEROF)\n\n#define NPY_INFINITYL ((npy_longdouble)NPY_INFINITYF)\n#define NPY_NANL ((npy_longdouble)NPY_NANF)\n#define NPY_PZEROL ((npy_longdouble)NPY_PZEROF)\n#define NPY_NZEROL ((npy_longdouble)NPY_NZEROF)\n\n/*\n * Useful constants\n */\n#define NPY_E 2.718281828459045235360287471352662498 /* e */\n#define NPY_LOG2E 1.442695040888963407359924681001892137 /* log_2 e */\n#define NPY_LOG10E 0.434294481903251827651128918916605082 /* log_10 e */\n#define NPY_LOGE2 0.693147180559945309417232121458176568 /* log_e 2 */\n#define NPY_LOGE10 2.302585092994045684017991454684364208 /* log_e 10 */\n#define NPY_PI 3.141592653589793238462643383279502884 /* pi */\n#define NPY_PI_2 1.570796326794896619231321691639751442 /* pi/2 */\n#define NPY_PI_4 0.785398163397448309615660845819875721 /* pi/4 */\n#define NPY_1_PI 0.318309886183790671537767526745028724 /* 1/pi */\n#define NPY_2_PI 0.636619772367581343075535053490057448 /* 2/pi */\n#define NPY_EULER 0.577215664901532860606512090082402431 /* Euler constant */\n#define NPY_SQRT2 1.414213562373095048801688724209698079 /* sqrt(2) */\n#define NPY_SQRT1_2 0.707106781186547524400844362104849039 /* 1/sqrt(2) */\n\n#define NPY_Ef 2.718281828459045235360287471352662498F /* e */\n#define NPY_LOG2Ef 1.442695040888963407359924681001892137F /* log_2 e */\n#define NPY_LOG10Ef 0.434294481903251827651128918916605082F /* log_10 e */\n#define NPY_LOGE2f 0.693147180559945309417232121458176568F /* log_e 2 */\n#define NPY_LOGE10f 2.302585092994045684017991454684364208F /* log_e 10 */\n#define NPY_PIf 3.141592653589793238462643383279502884F /* pi */\n#define NPY_PI_2f 1.570796326794896619231321691639751442F /* pi/2 */\n#define NPY_PI_4f 0.785398163397448309615660845819875721F /* pi/4 */\n#define NPY_1_PIf 0.318309886183790671537767526745028724F /* 1/pi */\n#define NPY_2_PIf 0.636619772367581343075535053490057448F /* 2/pi */\n#define NPY_EULERf 0.577215664901532860606512090082402431F /* Euler constant */\n#define NPY_SQRT2f 1.414213562373095048801688724209698079F /* sqrt(2) */\n#define NPY_SQRT1_2f 0.707106781186547524400844362104849039F /* 1/sqrt(2) */\n\n#define NPY_El 2.718281828459045235360287471352662498L /* e */\n#define NPY_LOG2El 1.442695040888963407359924681001892137L /* log_2 e */\n#define NPY_LOG10El 0.434294481903251827651128918916605082L /* log_10 e */\n#define NPY_LOGE2l 0.693147180559945309417232121458176568L /* log_e 2 */\n#define NPY_LOGE10l 2.302585092994045684017991454684364208L /* log_e 10 */\n#define NPY_PIl 3.141592653589793238462643383279502884L /* pi */\n#define NPY_PI_2l 1.570796326794896619231321691639751442L /* pi/2 */\n#define NPY_PI_4l 0.785398163397448309615660845819875721L /* pi/4 */\n#define NPY_1_PIl 0.318309886183790671537767526745028724L /* 1/pi */\n#define NPY_2_PIl 0.636619772367581343075535053490057448L /* 2/pi */\n#define NPY_EULERl 0.577215664901532860606512090082402431L /* Euler constant */\n#define NPY_SQRT2l 1.414213562373095048801688724209698079L /* sqrt(2) */\n#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */\n\n/*\n * Integer functions.\n */\nNPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);\nNPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);\nNPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);\nNPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);\nNPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);\nNPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);\n\nNPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);\nNPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);\nNPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);\nNPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);\nNPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);\nNPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);\n\nNPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);\nNPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);\nNPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);\nNPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);\nNPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);\nNPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);\nNPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);\nNPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);\nNPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);\nNPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);\n\nNPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);\nNPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);\nNPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);\nNPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);\nNPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);\nNPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);\nNPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);\nNPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);\nNPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);\nNPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);\n\nNPY_INPLACE uint8_t npy_popcountuhh(npy_ubyte a);\nNPY_INPLACE uint8_t npy_popcountuh(npy_ushort a);\nNPY_INPLACE uint8_t npy_popcountu(npy_uint a);\nNPY_INPLACE uint8_t npy_popcountul(npy_ulong a);\nNPY_INPLACE uint8_t npy_popcountull(npy_ulonglong a);\nNPY_INPLACE uint8_t npy_popcounthh(npy_byte a);\nNPY_INPLACE uint8_t npy_popcounth(npy_short a);\nNPY_INPLACE uint8_t npy_popcount(npy_int a);\nNPY_INPLACE uint8_t npy_popcountl(npy_long a);\nNPY_INPLACE uint8_t npy_popcountll(npy_longlong a);\n\n/*\n * C99 double math funcs that need fixups or are blocklist-able\n */\nNPY_INPLACE double npy_sin(double x);\nNPY_INPLACE double npy_cos(double x);\nNPY_INPLACE double npy_tan(double x);\nNPY_INPLACE double npy_hypot(double x, double y);\nNPY_INPLACE double npy_log2(double x);\nNPY_INPLACE double npy_atan2(double x, double y);\n\n/* Mandatory C99 double math funcs, no blocklisting or fixups */\n/* defined for legacy reasons, should be deprecated at some point */\n#define npy_sinh sinh\n#define npy_cosh cosh\n#define npy_tanh tanh\n#define npy_asin asin\n#define npy_acos acos\n#define npy_atan atan\n#define npy_log log\n#define npy_log10 log10\n#define npy_cbrt cbrt\n#define npy_fabs fabs\n#define npy_ceil ceil\n#define npy_fmod fmod\n#define npy_floor floor\n#define npy_expm1 expm1\n#define npy_log1p log1p\n#define npy_acosh acosh\n#define npy_asinh asinh\n#define npy_atanh atanh\n#define npy_rint rint\n#define npy_trunc trunc\n#define npy_exp2 exp2\n#define npy_frexp frexp\n#define npy_ldexp ldexp\n#define npy_copysign copysign\n#define npy_exp exp\n#define npy_sqrt sqrt\n#define npy_pow pow\n#define npy_modf modf\n#define npy_nextafter nextafter\n\ndouble npy_spacing(double x);\n\n/*\n * IEEE 754 fpu handling\n */\n\n/* use builtins to avoid function calls in tight loops\n * only available if npy_config.h is available (= numpys own build) */\n#ifdef HAVE___BUILTIN_ISNAN\n #define npy_isnan(x) __builtin_isnan(x)\n#else\n #define npy_isnan(x) isnan(x)\n#endif\n\n\n/* only available if npy_config.h is available (= numpys own build) */\n#ifdef HAVE___BUILTIN_ISFINITE\n #define npy_isfinite(x) __builtin_isfinite(x)\n#else\n #define npy_isfinite(x) isfinite((x))\n#endif\n\n/* only available if npy_config.h is available (= numpys own build) */\n#ifdef HAVE___BUILTIN_ISINF\n #define npy_isinf(x) __builtin_isinf(x)\n#else\n #define npy_isinf(x) isinf((x))\n#endif\n\n#define npy_signbit(x) signbit((x))\n\n/*\n * float C99 math funcs that need fixups or are blocklist-able\n */\nNPY_INPLACE float npy_sinf(float x);\nNPY_INPLACE float npy_cosf(float x);\nNPY_INPLACE float npy_tanf(float x);\nNPY_INPLACE float npy_expf(float x);\nNPY_INPLACE float npy_sqrtf(float x);\nNPY_INPLACE float npy_hypotf(float x, float y);\nNPY_INPLACE float npy_log2f(float x);\nNPY_INPLACE float npy_atan2f(float x, float y);\nNPY_INPLACE float npy_powf(float x, float y);\nNPY_INPLACE float npy_modff(float x, float* y);\n\n/* Mandatory C99 float math funcs, no blocklisting or fixups */\n/* defined for legacy reasons, should be deprecated at some point */\n\n#define npy_sinhf sinhf\n#define npy_coshf coshf\n#define npy_tanhf tanhf\n#define npy_asinf asinf\n#define npy_acosf acosf\n#define npy_atanf atanf\n#define npy_logf logf\n#define npy_log10f log10f\n#define npy_cbrtf cbrtf\n#define npy_fabsf fabsf\n#define npy_ceilf ceilf\n#define npy_fmodf fmodf\n#define npy_floorf floorf\n#define npy_expm1f expm1f\n#define npy_log1pf log1pf\n#define npy_asinhf asinhf\n#define npy_acoshf acoshf\n#define npy_atanhf atanhf\n#define npy_rintf rintf\n#define npy_truncf truncf\n#define npy_exp2f exp2f\n#define npy_frexpf frexpf\n#define npy_ldexpf ldexpf\n#define npy_copysignf copysignf\n#define npy_nextafterf nextafterf\n\nfloat npy_spacingf(float x);\n\n/*\n * long double C99 double math funcs that need fixups or are blocklist-able\n */\nNPY_INPLACE npy_longdouble npy_sinl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_cosl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_tanl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_expl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_sqrtl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_hypotl(npy_longdouble x, npy_longdouble y);\nNPY_INPLACE npy_longdouble npy_log2l(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_atan2l(npy_longdouble x, npy_longdouble y);\nNPY_INPLACE npy_longdouble npy_powl(npy_longdouble x, npy_longdouble y);\nNPY_INPLACE npy_longdouble npy_modfl(npy_longdouble x, npy_longdouble* y);\n\n/* Mandatory C99 double math funcs, no blocklisting or fixups */\n/* defined for legacy reasons, should be deprecated at some point */\n#define npy_sinhl sinhl\n#define npy_coshl coshl\n#define npy_tanhl tanhl\n#define npy_fabsl fabsl\n#define npy_floorl floorl\n#define npy_ceill ceill\n#define npy_rintl rintl\n#define npy_truncl truncl\n#define npy_cbrtl cbrtl\n#define npy_log10l log10l\n#define npy_logl logl\n#define npy_expm1l expm1l\n#define npy_asinl asinl\n#define npy_acosl acosl\n#define npy_atanl atanl\n#define npy_asinhl asinhl\n#define npy_acoshl acoshl\n#define npy_atanhl atanhl\n#define npy_log1pl log1pl\n#define npy_exp2l exp2l\n#define npy_fmodl fmodl\n#define npy_frexpl frexpl\n#define npy_ldexpl ldexpl\n#define npy_copysignl copysignl\n#define npy_nextafterl nextafterl\n\nnpy_longdouble npy_spacingl(npy_longdouble x);\n\n/*\n * Non standard functions\n */\nNPY_INPLACE double npy_deg2rad(double x);\nNPY_INPLACE double npy_rad2deg(double x);\nNPY_INPLACE double npy_logaddexp(double x, double y);\nNPY_INPLACE double npy_logaddexp2(double x, double y);\nNPY_INPLACE double npy_divmod(double x, double y, double *modulus);\nNPY_INPLACE double npy_heaviside(double x, double h0);\n\nNPY_INPLACE float npy_deg2radf(float x);\nNPY_INPLACE float npy_rad2degf(float x);\nNPY_INPLACE float npy_logaddexpf(float x, float y);\nNPY_INPLACE float npy_logaddexp2f(float x, float y);\nNPY_INPLACE float npy_divmodf(float x, float y, float *modulus);\nNPY_INPLACE float npy_heavisidef(float x, float h0);\n\nNPY_INPLACE npy_longdouble npy_deg2radl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_rad2degl(npy_longdouble x);\nNPY_INPLACE npy_longdouble npy_logaddexpl(npy_longdouble x, npy_longdouble y);\nNPY_INPLACE npy_longdouble npy_logaddexp2l(npy_longdouble x, npy_longdouble y);\nNPY_INPLACE npy_longdouble npy_divmodl(npy_longdouble x, npy_longdouble y,\n npy_longdouble *modulus);\nNPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0);\n\n#define npy_degrees npy_rad2deg\n#define npy_degreesf npy_rad2degf\n#define npy_degreesl npy_rad2degl\n\n#define npy_radians npy_deg2rad\n#define npy_radiansf npy_deg2radf\n#define npy_radiansl npy_deg2radl\n\n/*\n * Complex declarations\n */\n\nstatic inline double npy_creal(const npy_cdouble z)\n{\n#if defined(__cplusplus)\n return z._Val[0];\n#else\n return creal(z);\n#endif\n}\n\nstatic inline void npy_csetreal(npy_cdouble *z, const double r)\n{\n ((double *) z)[0] = r;\n}\n\nstatic inline double npy_cimag(const npy_cdouble z)\n{\n#if defined(__cplusplus)\n return z._Val[1];\n#else\n return cimag(z);\n#endif\n}\n\nstatic inline void npy_csetimag(npy_cdouble *z, const double i)\n{\n ((double *) z)[1] = i;\n}\n\nstatic inline float npy_crealf(const npy_cfloat z)\n{\n#if defined(__cplusplus)\n return z._Val[0];\n#else\n return crealf(z);\n#endif\n}\n\nstatic inline void npy_csetrealf(npy_cfloat *z, const float r)\n{\n ((float *) z)[0] = r;\n}\n\nstatic inline float npy_cimagf(const npy_cfloat z)\n{\n#if defined(__cplusplus)\n return z._Val[1];\n#else\n return cimagf(z);\n#endif\n}\n\nstatic inline void npy_csetimagf(npy_cfloat *z, const float i)\n{\n ((float *) z)[1] = i;\n}\n\nstatic inline npy_longdouble npy_creall(const npy_clongdouble z)\n{\n#if defined(__cplusplus)\n return (npy_longdouble)z._Val[0];\n#else\n return creall(z);\n#endif\n}\n\nstatic inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r)\n{\n ((longdouble_t *) z)[0] = r;\n}\n\nstatic inline npy_longdouble npy_cimagl(const npy_clongdouble z)\n{\n#if defined(__cplusplus)\n return (npy_longdouble)z._Val[1];\n#else\n return cimagl(z);\n#endif\n}\n\nstatic inline void npy_csetimagl(npy_clongdouble *z, const longdouble_t i)\n{\n ((longdouble_t *) z)[1] = i;\n}\n\n#define NPY_CSETREAL(z, r) npy_csetreal(z, r)\n#define NPY_CSETIMAG(z, i) npy_csetimag(z, i)\n#define NPY_CSETREALF(z, r) npy_csetrealf(z, r)\n#define NPY_CSETIMAGF(z, i) npy_csetimagf(z, i)\n#define NPY_CSETREALL(z, r) npy_csetreall(z, r)\n#define NPY_CSETIMAGL(z, i) npy_csetimagl(z, i)\n\nstatic inline npy_cdouble npy_cpack(double x, double y)\n{\n npy_cdouble z;\n npy_csetreal(&z, x);\n npy_csetimag(&z, y);\n return z;\n}\n\nstatic inline npy_cfloat npy_cpackf(float x, float y)\n{\n npy_cfloat z;\n npy_csetrealf(&z, x);\n npy_csetimagf(&z, y);\n return z;\n}\n\nstatic inline npy_clongdouble npy_cpackl(npy_longdouble x, npy_longdouble y)\n{\n npy_clongdouble z;\n npy_csetreall(&z, x);\n npy_csetimagl(&z, y);\n return z;\n}\n\n/*\n * Double precision complex functions\n */\ndouble npy_cabs(npy_cdouble z);\ndouble npy_carg(npy_cdouble z);\n\nnpy_cdouble npy_cexp(npy_cdouble z);\nnpy_cdouble npy_clog(npy_cdouble z);\nnpy_cdouble npy_cpow(npy_cdouble x, npy_cdouble y);\n\nnpy_cdouble npy_csqrt(npy_cdouble z);\n\nnpy_cdouble npy_ccos(npy_cdouble z);\nnpy_cdouble npy_csin(npy_cdouble z);\nnpy_cdouble npy_ctan(npy_cdouble z);\n\nnpy_cdouble npy_ccosh(npy_cdouble z);\nnpy_cdouble npy_csinh(npy_cdouble z);\nnpy_cdouble npy_ctanh(npy_cdouble z);\n\nnpy_cdouble npy_cacos(npy_cdouble z);\nnpy_cdouble npy_casin(npy_cdouble z);\nnpy_cdouble npy_catan(npy_cdouble z);\n\nnpy_cdouble npy_cacosh(npy_cdouble z);\nnpy_cdouble npy_casinh(npy_cdouble z);\nnpy_cdouble npy_catanh(npy_cdouble z);\n\n/*\n * Single precision complex functions\n */\nfloat npy_cabsf(npy_cfloat z);\nfloat npy_cargf(npy_cfloat z);\n\nnpy_cfloat npy_cexpf(npy_cfloat z);\nnpy_cfloat npy_clogf(npy_cfloat z);\nnpy_cfloat npy_cpowf(npy_cfloat x, npy_cfloat y);\n\nnpy_cfloat npy_csqrtf(npy_cfloat z);\n\nnpy_cfloat npy_ccosf(npy_cfloat z);\nnpy_cfloat npy_csinf(npy_cfloat z);\nnpy_cfloat npy_ctanf(npy_cfloat z);\n\nnpy_cfloat npy_ccoshf(npy_cfloat z);\nnpy_cfloat npy_csinhf(npy_cfloat z);\nnpy_cfloat npy_ctanhf(npy_cfloat z);\n\nnpy_cfloat npy_cacosf(npy_cfloat z);\nnpy_cfloat npy_casinf(npy_cfloat z);\nnpy_cfloat npy_catanf(npy_cfloat z);\n\nnpy_cfloat npy_cacoshf(npy_cfloat z);\nnpy_cfloat npy_casinhf(npy_cfloat z);\nnpy_cfloat npy_catanhf(npy_cfloat z);\n\n\n/*\n * Extended precision complex functions\n */\nnpy_longdouble npy_cabsl(npy_clongdouble z);\nnpy_longdouble npy_cargl(npy_clongdouble z);\n\nnpy_clongdouble npy_cexpl(npy_clongdouble z);\nnpy_clongdouble npy_clogl(npy_clongdouble z);\nnpy_clongdouble npy_cpowl(npy_clongdouble x, npy_clongdouble y);\n\nnpy_clongdouble npy_csqrtl(npy_clongdouble z);\n\nnpy_clongdouble npy_ccosl(npy_clongdouble z);\nnpy_clongdouble npy_csinl(npy_clongdouble z);\nnpy_clongdouble npy_ctanl(npy_clongdouble z);\n\nnpy_clongdouble npy_ccoshl(npy_clongdouble z);\nnpy_clongdouble npy_csinhl(npy_clongdouble z);\nnpy_clongdouble npy_ctanhl(npy_clongdouble z);\n\nnpy_clongdouble npy_cacosl(npy_clongdouble z);\nnpy_clongdouble npy_casinl(npy_clongdouble z);\nnpy_clongdouble npy_catanl(npy_clongdouble z);\n\nnpy_clongdouble npy_cacoshl(npy_clongdouble z);\nnpy_clongdouble npy_casinhl(npy_clongdouble z);\nnpy_clongdouble npy_catanhl(npy_clongdouble z);\n\n\n/*\n * Functions that set the floating point error\n * status word.\n */\n\n/*\n * platform-dependent code translates floating point\n * status to an integer sum of these values\n */\n#define NPY_FPE_DIVIDEBYZERO 1\n#define NPY_FPE_OVERFLOW 2\n#define NPY_FPE_UNDERFLOW 4\n#define NPY_FPE_INVALID 8\n\nint npy_clear_floatstatus_barrier(char*);\nint npy_get_floatstatus_barrier(char*);\n/*\n * use caution with these - clang and gcc8.1 are known to reorder calls\n * to this form of the function which can defeat the check. The _barrier\n * form of the call is preferable, where the argument is\n * (char*)&local_variable\n */\nint npy_clear_floatstatus(void);\nint npy_get_floatstatus(void);\n\nvoid npy_set_floatstatus_divbyzero(void);\nvoid npy_set_floatstatus_overflow(void);\nvoid npy_set_floatstatus_underflow(void);\nvoid npy_set_floatstatus_invalid(void);\n\n#ifdef __cplusplus\n}\n#endif\n\n#if NPY_INLINE_MATH\n#include "npy_math_internal.h"\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_MATH_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_math.h
npy_math.h
C
19,460
0.95
0.031561
0.512671
node-utils
69
2024-08-27T01:12:50.706523
MIT
false
85ca6cd78afd023523342f9b941be867
/*\n * This include file is provided for inclusion in Cython *.pyd files where\n * one would like to define the NPY_NO_DEPRECATED_API macro. It can be\n * included by\n *\n * cdef extern from "npy_no_deprecated_api.h": pass\n *\n */\n#ifndef NPY_NO_DEPRECATED_API\n\n/* put this check here since there may be multiple includes in C extensions. */\n#if defined(NUMPY_CORE_INCLUDE_NUMPY_NDARRAYTYPES_H_) || \\n defined(NUMPY_CORE_INCLUDE_NUMPY_NPY_DEPRECATED_API_H) || \\n defined(NUMPY_CORE_INCLUDE_NUMPY_OLD_DEFINES_H_)\n#error "npy_no_deprecated_api.h" must be first among numpy includes.\n#else\n#define NPY_NO_DEPRECATED_API NPY_API_VERSION\n#endif\n\n#endif /* NPY_NO_DEPRECATED_API */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_no_deprecated_api.h
npy_no_deprecated_api.h
C
698
0.95
0.1
0.888889
react-lib
665
2025-04-11T00:03:38.799130
Apache-2.0
false
c735cd1c24e6c99ab591477f76f9d138
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_\n\n#if defined(linux) || defined(__linux) || defined(__linux__)\n #define NPY_OS_LINUX\n#elif defined(__FreeBSD__) || defined(__NetBSD__) || \\n defined(__OpenBSD__) || defined(__DragonFly__)\n #define NPY_OS_BSD\n #ifdef __FreeBSD__\n #define NPY_OS_FREEBSD\n #elif defined(__NetBSD__)\n #define NPY_OS_NETBSD\n #elif defined(__OpenBSD__)\n #define NPY_OS_OPENBSD\n #elif defined(__DragonFly__)\n #define NPY_OS_DRAGONFLY\n #endif\n#elif defined(sun) || defined(__sun)\n #define NPY_OS_SOLARIS\n#elif defined(__CYGWIN__)\n #define NPY_OS_CYGWIN\n/* We are on Windows.*/\n#elif defined(_WIN32)\n /* We are using MinGW (64-bit or 32-bit)*/\n #if defined(__MINGW32__) || defined(__MINGW64__)\n #define NPY_OS_MINGW\n /* Otherwise, if _WIN64 is defined, we are targeting 64-bit Windows*/\n #elif defined(_WIN64)\n #define NPY_OS_WIN64\n /* Otherwise assume we are targeting 32-bit Windows*/\n #else\n #define NPY_OS_WIN32\n #endif\n#elif defined(__APPLE__)\n #define NPY_OS_DARWIN\n#elif defined(__HAIKU__)\n #define NPY_OS_HAIKU\n#else\n #define NPY_OS_UNKNOWN\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_OS_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\npy_os.h
npy_os.h
C
1,298
0.95
0.071429
0.975
awesome-app
421
2024-01-08T18:46:15.464624
GPL-3.0
false
6543fdc6b9b24cef66312b10f96ad9c8
#ifndef NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_\n\n#include "_numpyconfig.h"\n\n/*\n * On Mac OS X, because there is only one configuration stage for all the archs\n * in universal builds, any macro which depends on the arch needs to be\n * hardcoded.\n *\n * Note that distutils/pip will attempt a universal2 build when Python itself\n * is built as universal2, hence this hardcoding is needed even if we do not\n * support universal2 wheels anymore (see gh-22796).\n * This code block can be removed after we have dropped the setup.py based\n * build completely.\n */\n#ifdef __APPLE__\n #undef NPY_SIZEOF_LONG\n\n #ifdef __LP64__\n #define NPY_SIZEOF_LONG 8\n #else\n #define NPY_SIZEOF_LONG 4\n #endif\n\n #undef NPY_SIZEOF_LONGDOUBLE\n #undef NPY_SIZEOF_COMPLEX_LONGDOUBLE\n #ifdef HAVE_LDOUBLE_IEEE_DOUBLE_LE\n #undef HAVE_LDOUBLE_IEEE_DOUBLE_LE\n #endif\n #ifdef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE\n #undef HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE\n #endif\n\n #if defined(__arm64__)\n #define NPY_SIZEOF_LONGDOUBLE 8\n #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 16\n #define HAVE_LDOUBLE_IEEE_DOUBLE_LE 1\n #elif defined(__x86_64)\n #define NPY_SIZEOF_LONGDOUBLE 16\n #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32\n #define HAVE_LDOUBLE_INTEL_EXTENDED_16_BYTES_LE 1\n #elif defined (__i386)\n #define NPY_SIZEOF_LONGDOUBLE 12\n #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 24\n #elif defined(__ppc__) || defined (__ppc64__)\n #define NPY_SIZEOF_LONGDOUBLE 16\n #define NPY_SIZEOF_COMPLEX_LONGDOUBLE 32\n #else\n #error "unknown architecture"\n #endif\n#endif\n\n\n/**\n * To help with both NPY_TARGET_VERSION and the NPY_NO_DEPRECATED_API macro,\n * we include API version numbers for specific versions of NumPy.\n * To exclude all API that was deprecated as of 1.7, add the following before\n * #including any NumPy headers:\n * #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION\n * The same is true for NPY_TARGET_VERSION, although NumPy will default to\n * a backwards compatible build anyway.\n */\n#define NPY_1_7_API_VERSION 0x00000007\n#define NPY_1_8_API_VERSION 0x00000008\n#define NPY_1_9_API_VERSION 0x00000009\n#define NPY_1_10_API_VERSION 0x0000000a\n#define NPY_1_11_API_VERSION 0x0000000a\n#define NPY_1_12_API_VERSION 0x0000000a\n#define NPY_1_13_API_VERSION 0x0000000b\n#define NPY_1_14_API_VERSION 0x0000000c\n#define NPY_1_15_API_VERSION 0x0000000c\n#define NPY_1_16_API_VERSION 0x0000000d\n#define NPY_1_17_API_VERSION 0x0000000d\n#define NPY_1_18_API_VERSION 0x0000000d\n#define NPY_1_19_API_VERSION 0x0000000d\n#define NPY_1_20_API_VERSION 0x0000000e\n#define NPY_1_21_API_VERSION 0x0000000e\n#define NPY_1_22_API_VERSION 0x0000000f\n#define NPY_1_23_API_VERSION 0x00000010\n#define NPY_1_24_API_VERSION 0x00000010\n#define NPY_1_25_API_VERSION 0x00000011\n#define NPY_2_0_API_VERSION 0x00000012\n#define NPY_2_1_API_VERSION 0x00000013\n#define NPY_2_2_API_VERSION 0x00000013\n#define NPY_2_3_API_VERSION 0x00000014\n\n\n/*\n * Binary compatibility version number. This number is increased\n * whenever the C-API is changed such that binary compatibility is\n * broken, i.e. whenever a recompile of extension modules is needed.\n */\n#define NPY_VERSION NPY_ABI_VERSION\n\n/*\n * Minor API version we are compiling to be compatible with. The version\n * Number is always increased when the API changes via: `NPY_API_VERSION`\n * (and should maybe just track the NumPy version).\n *\n * If we have an internal build, we always target the current version of\n * course.\n *\n * For downstream users, we default to an older version to provide them with\n * maximum compatibility by default. Downstream can choose to extend that\n * default, or narrow it down if they wish to use newer API. If you adjust\n * this, consider the Python version support (example for 1.25.x):\n *\n * NumPy 1.25.x supports Python: 3.9 3.10 3.11 (3.12)\n * NumPy 1.19.x supports Python: 3.6 3.7 3.8 3.9\n * NumPy 1.17.x supports Python: 3.5 3.6 3.7 3.8\n * NumPy 1.15.x supports Python: ... 3.6 3.7\n *\n * Users of the stable ABI may wish to target the last Python that is not\n * end of life. This would be 3.8 at NumPy 1.25 release time.\n * 1.17 as default was the choice of oldest-support-numpy at the time and\n * has in practice no limit (compared to 1.19). Even earlier becomes legacy.\n */\n#if defined(NPY_INTERNAL_BUILD) && NPY_INTERNAL_BUILD\n /* NumPy internal build, always use current version. */\n #define NPY_FEATURE_VERSION NPY_API_VERSION\n#elif defined(NPY_TARGET_VERSION) && NPY_TARGET_VERSION\n /* user provided a target version, use it */\n #define NPY_FEATURE_VERSION NPY_TARGET_VERSION\n#else\n /* Use the default (increase when dropping Python 3.11 support) */\n #define NPY_FEATURE_VERSION NPY_1_23_API_VERSION\n#endif\n\n/* Sanity check the (requested) feature version */\n#if NPY_FEATURE_VERSION > NPY_API_VERSION\n #error "NPY_TARGET_VERSION higher than NumPy headers!"\n#elif NPY_FEATURE_VERSION < NPY_1_15_API_VERSION\n /* No support for irrelevant old targets, no need for error, but warn. */\n #ifndef _MSC_VER\n #warning "Requested NumPy target lower than supported NumPy 1.15."\n #else\n #define _WARN___STR2__(x) #x\n #define _WARN___STR1__(x) _WARN___STR2__(x)\n #define _WARN___LOC__ __FILE__ "(" _WARN___STR1__(__LINE__) ") : Warning Msg: "\n #pragma message(_WARN___LOC__"Requested NumPy target lower than supported NumPy 1.15.")\n #endif\n#endif\n\n/*\n * We define a human readable translation to the Python version of NumPy\n * for error messages (and also to allow grepping the binaries for conda).\n */\n#if NPY_FEATURE_VERSION == NPY_1_7_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.7"\n#elif NPY_FEATURE_VERSION == NPY_1_8_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.8"\n#elif NPY_FEATURE_VERSION == NPY_1_9_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.9"\n#elif NPY_FEATURE_VERSION == NPY_1_10_API_VERSION /* also 1.11, 1.12 */\n #define NPY_FEATURE_VERSION_STRING "1.10"\n#elif NPY_FEATURE_VERSION == NPY_1_13_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.13"\n#elif NPY_FEATURE_VERSION == NPY_1_14_API_VERSION /* also 1.15 */\n #define NPY_FEATURE_VERSION_STRING "1.14"\n#elif NPY_FEATURE_VERSION == NPY_1_16_API_VERSION /* also 1.17, 1.18, 1.19 */\n #define NPY_FEATURE_VERSION_STRING "1.16"\n#elif NPY_FEATURE_VERSION == NPY_1_20_API_VERSION /* also 1.21 */\n #define NPY_FEATURE_VERSION_STRING "1.20"\n#elif NPY_FEATURE_VERSION == NPY_1_22_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.22"\n#elif NPY_FEATURE_VERSION == NPY_1_23_API_VERSION /* also 1.24 */\n #define NPY_FEATURE_VERSION_STRING "1.23"\n#elif NPY_FEATURE_VERSION == NPY_1_25_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "1.25"\n#elif NPY_FEATURE_VERSION == NPY_2_0_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "2.0"\n#elif NPY_FEATURE_VERSION == NPY_2_1_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "2.1"\n#elif NPY_FEATURE_VERSION == NPY_2_3_API_VERSION\n #define NPY_FEATURE_VERSION_STRING "2.3"\n#else\n #error "Missing version string define for new NumPy version."\n#endif\n\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_NPY_NUMPYCONFIG_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\numpyconfig.h
numpyconfig.h
C
7,515
0.95
0.082418
1
awesome-app
789
2023-12-01T16:47:07.665732
Apache-2.0
false
d0bb4ca911d6d656cc651b4ab7a76a5c
#ifndef NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_\n#define NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_\n\n#include <numpy/npy_math.h>\n#include <numpy/npy_common.h>\n\n#ifdef __cplusplus\nextern "C" {\n#endif\n\n/*\n * The legacy generic inner loop for a standard element-wise or\n * generalized ufunc.\n */\ntypedef void (*PyUFuncGenericFunction)\n (char **args,\n npy_intp const *dimensions,\n npy_intp const *strides,\n void *innerloopdata);\n\n/*\n * The most generic one-dimensional inner loop for\n * a masked standard element-wise ufunc. "Masked" here means that it skips\n * doing calculations on any items for which the maskptr array has a true\n * value.\n */\ntypedef void (PyUFunc_MaskedStridedInnerLoopFunc)(\n char **dataptrs, npy_intp *strides,\n char *maskptr, npy_intp mask_stride,\n npy_intp count,\n NpyAuxData *innerloopdata);\n\n/* Forward declaration for the type resolver and loop selector typedefs */\nstruct _tagPyUFuncObject;\n\n/*\n * Given the operands for calling a ufunc, should determine the\n * calculation input and output data types and return an inner loop function.\n * This function should validate that the casting rule is being followed,\n * and fail if it is not.\n *\n * For backwards compatibility, the regular type resolution function does not\n * support auxiliary data with object semantics. The type resolution call\n * which returns a masked generic function returns a standard NpyAuxData\n * object, for which the NPY_AUXDATA_FREE and NPY_AUXDATA_CLONE macros\n * work.\n *\n * ufunc: The ufunc object.\n * casting: The 'casting' parameter provided to the ufunc.\n * operands: An array of length (ufunc->nin + ufunc->nout),\n * with the output parameters possibly NULL.\n * type_tup: Either NULL, or the type_tup passed to the ufunc.\n * out_dtypes: An array which should be populated with new\n * references to (ufunc->nin + ufunc->nout) new\n * dtypes, one for each input and output. These\n * dtypes should all be in native-endian format.\n *\n * Should return 0 on success, -1 on failure (with exception set),\n * or -2 if Py_NotImplemented should be returned.\n */\ntypedef int (PyUFunc_TypeResolutionFunc)(\n struct _tagPyUFuncObject *ufunc,\n NPY_CASTING casting,\n PyArrayObject **operands,\n PyObject *type_tup,\n PyArray_Descr **out_dtypes);\n\n/*\n * This is the signature for the functions that may be assigned to the\n * `process_core_dims_func` field of the PyUFuncObject structure.\n * Implementation of this function is optional. This function is only used\n * by generalized ufuncs (i.e. those with the field `core_enabled` set to 1).\n * The function is called by the ufunc during the processing of the arguments\n * of a call of the ufunc. The function can check the core dimensions of the\n * input and output arrays and return -1 with an exception set if any\n * requirements are not satisfied. If the caller of the ufunc didn't provide\n * output arrays, the core dimensions associated with the output arrays (i.e.\n * those that are not also used in input arrays) will have the value -1 in\n * `core_dim_sizes`. This function can replace any output core dimensions\n * that are -1 with a value that is appropriate for the ufunc.\n *\n * Parameter Description\n * --------------- ------------------------------------------------------\n * ufunc The ufunc object\n * core_dim_sizes An array with length `ufunc->core_num_dim_ix`.\n * The core dimensions of the arrays passed to the ufunc\n * will have been set. If the caller of the ufunc didn't\n * provide the output array(s), the output-only core\n * dimensions will have the value -1.\n *\n * The function must not change any element in `core_dim_sizes` that is\n * not -1 on input. Doing so will result in incorrect output from the\n * ufunc, and could result in a crash of the Python interpreter.\n *\n * The function must return 0 on success, -1 on failure (with an exception\n * set).\n */\ntypedef int (PyUFunc_ProcessCoreDimsFunc)(\n struct _tagPyUFuncObject *ufunc,\n npy_intp *core_dim_sizes);\n\ntypedef struct _tagPyUFuncObject {\n PyObject_HEAD\n /*\n * nin: Number of inputs\n * nout: Number of outputs\n * nargs: Always nin + nout (Why is it stored?)\n */\n int nin, nout, nargs;\n\n /*\n * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero\n * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,\n * PyUFunc_IdentityValue.\n */\n int identity;\n\n /* Array of one-dimensional core loops */\n PyUFuncGenericFunction *functions;\n /* Array of funcdata that gets passed into the functions */\n void *const *data;\n /* The number of elements in 'functions' and 'data' */\n int ntypes;\n\n /* Used to be unused field 'check_return' */\n int reserved1;\n\n /* The name of the ufunc */\n const char *name;\n\n /* Array of type numbers, of size ('nargs' * 'ntypes') */\n const char *types;\n\n /* Documentation string */\n const char *doc;\n\n void *ptr;\n PyObject *obj;\n PyObject *userloops;\n\n /* generalized ufunc parameters */\n\n /* 0 for scalar ufunc; 1 for generalized ufunc */\n int core_enabled;\n /* number of distinct dimension names in signature */\n int core_num_dim_ix;\n\n /*\n * dimension indices of input/output argument k are stored in\n * core_dim_ixs[core_offsets[k]..core_offsets[k]+core_num_dims[k]-1]\n */\n\n /* numbers of core dimensions of each argument */\n int *core_num_dims;\n /*\n * dimension indices in a flatted form; indices\n * are in the range of [0,core_num_dim_ix)\n */\n int *core_dim_ixs;\n /*\n * positions of 1st core dimensions of each\n * argument in core_dim_ixs, equivalent to cumsum(core_num_dims)\n */\n int *core_offsets;\n /* signature string for printing purpose */\n char *core_signature;\n\n /*\n * A function which resolves the types and fills an array\n * with the dtypes for the inputs and outputs.\n */\n PyUFunc_TypeResolutionFunc *type_resolver;\n\n /* A dictionary to monkeypatch ufuncs */\n PyObject *dict;\n\n /*\n * This was blocked off to be the "new" inner loop selector in 1.7,\n * but this was never implemented. (This is also why the above\n * selector is called the "legacy" selector.)\n */\n #ifndef Py_LIMITED_API\n vectorcallfunc vectorcall;\n #else\n void *vectorcall;\n #endif\n\n /* Was previously the `PyUFunc_MaskedInnerLoopSelectionFunc` */\n void *reserved3;\n\n /*\n * List of flags for each operand when ufunc is called by nditer object.\n * These flags will be used in addition to the default flags for each\n * operand set by nditer object.\n */\n npy_uint32 *op_flags;\n\n /*\n * List of global flags used when ufunc is called by nditer object.\n * These flags will be used in addition to the default global flags\n * set by nditer object.\n */\n npy_uint32 iter_flags;\n\n /* New in NPY_API_VERSION 0x0000000D and above */\n #if NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION\n /*\n * for each core_num_dim_ix distinct dimension names,\n * the possible "frozen" size (-1 if not frozen).\n */\n npy_intp *core_dim_sizes;\n\n /*\n * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags\n */\n npy_uint32 *core_dim_flags;\n\n /* Identity for reduction, when identity == PyUFunc_IdentityValue */\n PyObject *identity_value;\n #endif /* NPY_FEATURE_VERSION >= NPY_1_16_API_VERSION */\n\n /* New in NPY_API_VERSION 0x0000000F and above */\n #if NPY_FEATURE_VERSION >= NPY_1_22_API_VERSION\n /* New private fields related to dispatching */\n void *_dispatch_cache;\n /* A PyListObject of `(tuple of DTypes, ArrayMethod/Promoter)` */\n PyObject *_loops;\n #endif\n #if NPY_FEATURE_VERSION >= NPY_2_1_API_VERSION\n /*\n * Optional function to process core dimensions of a gufunc.\n */\n PyUFunc_ProcessCoreDimsFunc *process_core_dims_func;\n #endif\n} PyUFuncObject;\n\n#include "arrayobject.h"\n/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */\n/* the core dimension's size will be determined by the operands. */\n#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002\n/* the core dimension may be absent */\n#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004\n/* flags inferred during execution */\n#define UFUNC_CORE_DIM_MISSING 0x00040000\n\n\n#define UFUNC_OBJ_ISOBJECT 1\n#define UFUNC_OBJ_NEEDS_API 2\n\n\n#if NPY_ALLOW_THREADS\n#define NPY_LOOP_BEGIN_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) _save = PyEval_SaveThread();} while (0);\n#define NPY_LOOP_END_THREADS do {if (!(loop->obj & UFUNC_OBJ_NEEDS_API)) PyEval_RestoreThread(_save);} while (0);\n#else\n#define NPY_LOOP_BEGIN_THREADS\n#define NPY_LOOP_END_THREADS\n#endif\n\n/*\n * UFunc has unit of 0, and the order of operations can be reordered\n * This case allows reduction with multiple axes at once.\n */\n#define PyUFunc_Zero 0\n/*\n * UFunc has unit of 1, and the order of operations can be reordered\n * This case allows reduction with multiple axes at once.\n */\n#define PyUFunc_One 1\n/*\n * UFunc has unit of -1, and the order of operations can be reordered\n * This case allows reduction with multiple axes at once. Intended for\n * bitwise_and reduction.\n */\n#define PyUFunc_MinusOne 2\n/*\n * UFunc has no unit, and the order of operations cannot be reordered.\n * This case does not allow reduction with multiple axes at once.\n */\n#define PyUFunc_None -1\n/*\n * UFunc has no unit, and the order of operations can be reordered\n * This case allows reduction with multiple axes at once.\n */\n#define PyUFunc_ReorderableNone -2\n/*\n * UFunc unit is an identity_value, and the order of operations can be reordered\n * This case allows reduction with multiple axes at once.\n */\n#define PyUFunc_IdentityValue -3\n\n\n#define UFUNC_REDUCE 0\n#define UFUNC_ACCUMULATE 1\n#define UFUNC_REDUCEAT 2\n#define UFUNC_OUTER 3\n\n\ntypedef struct {\n int nin;\n int nout;\n PyObject *callable;\n} PyUFunc_PyFuncData;\n\n/* A linked-list of function information for\n user-defined 1-d loops.\n */\ntypedef struct _loop1d_info {\n PyUFuncGenericFunction func;\n void *data;\n int *arg_types;\n struct _loop1d_info *next;\n int nargs;\n PyArray_Descr **arg_dtypes;\n} PyUFunc_Loop1d;\n\n\n#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"\n\n/* THESE MACROS ARE DEPRECATED.\n * Use npy_set_floatstatus_* in the npymath library.\n */\n#define UFUNC_FPE_DIVIDEBYZERO NPY_FPE_DIVIDEBYZERO\n#define UFUNC_FPE_OVERFLOW NPY_FPE_OVERFLOW\n#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW\n#define UFUNC_FPE_INVALID NPY_FPE_INVALID\n\n/* Make sure it gets defined if it isn't already */\n#ifndef UFUNC_NOFPE\n/* Clear the floating point exception default of Borland C++ */\n#if defined(__BORLANDC__)\n#define UFUNC_NOFPE _control87(MCW_EM, MCW_EM);\n#else\n#define UFUNC_NOFPE\n#endif\n#endif\n\n#include "__ufunc_api.h"\n\n#ifdef __cplusplus\n}\n#endif\n\n#endif /* NUMPY_CORE_INCLUDE_NUMPY_UFUNCOBJECT_H_ */\n
.venv\Lib\site-packages\numpy\_core\include\numpy\ufuncobject.h
ufuncobject.h
C
12,123
0.95
0.145773
0.763514
react-lib
164
2025-02-26T22:07:20.189253
GPL-3.0
false
331697346edfe6b5e4a17e8322a034ab