dheeena commited on
Commit
a481509
·
verified ·
1 Parent(s): e5dbee7

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.13/site-packages/numpy/__pycache__/__config__.cpython-313.pyc +0 -0
  2. venv/lib/python3.13/site-packages/numpy/__pycache__/__init__.cpython-313.pyc +0 -0
  3. venv/lib/python3.13/site-packages/numpy/__pycache__/_array_api_info.cpython-313.pyc +0 -0
  4. venv/lib/python3.13/site-packages/numpy/__pycache__/_configtool.cpython-313.pyc +0 -0
  5. venv/lib/python3.13/site-packages/numpy/__pycache__/_distributor_init.cpython-313.pyc +0 -0
  6. venv/lib/python3.13/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-313.pyc +0 -0
  7. venv/lib/python3.13/site-packages/numpy/__pycache__/_globals.cpython-313.pyc +0 -0
  8. venv/lib/python3.13/site-packages/numpy/__pycache__/_pytesttester.cpython-313.pyc +0 -0
  9. venv/lib/python3.13/site-packages/numpy/__pycache__/conftest.cpython-313.pyc +0 -0
  10. venv/lib/python3.13/site-packages/numpy/__pycache__/dtypes.cpython-313.pyc +0 -0
  11. venv/lib/python3.13/site-packages/numpy/__pycache__/exceptions.cpython-313.pyc +0 -0
  12. venv/lib/python3.13/site-packages/numpy/__pycache__/matlib.cpython-313.pyc +0 -0
  13. venv/lib/python3.13/site-packages/numpy/__pycache__/version.cpython-313.pyc +0 -0
  14. venv/lib/python3.13/site-packages/numpy/_core/__init__.py +186 -0
  15. venv/lib/python3.13/site-packages/numpy/_core/__init__.pyi +2 -0
  16. venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs.py +0 -0
  17. venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs.pyi +3 -0
  18. venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs_scalars.py +390 -0
  19. venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs_scalars.pyi +16 -0
  20. venv/lib/python3.13/site-packages/numpy/_core/_asarray.py +134 -0
  21. venv/lib/python3.13/site-packages/numpy/_core/_asarray.pyi +41 -0
  22. venv/lib/python3.13/site-packages/numpy/_core/_dtype.py +366 -0
  23. venv/lib/python3.13/site-packages/numpy/_core/_dtype.pyi +58 -0
  24. venv/lib/python3.13/site-packages/numpy/_core/_dtype_ctypes.py +120 -0
  25. venv/lib/python3.13/site-packages/numpy/_core/_dtype_ctypes.pyi +83 -0
  26. venv/lib/python3.13/site-packages/numpy/_core/_exceptions.py +162 -0
  27. venv/lib/python3.13/site-packages/numpy/_core/_exceptions.pyi +55 -0
  28. venv/lib/python3.13/site-packages/numpy/_core/_internal.py +958 -0
  29. venv/lib/python3.13/site-packages/numpy/_core/_internal.pyi +72 -0
  30. venv/lib/python3.13/site-packages/numpy/_core/_machar.py +355 -0
  31. venv/lib/python3.13/site-packages/numpy/_core/_machar.pyi +55 -0
  32. venv/lib/python3.13/site-packages/numpy/_core/_methods.py +255 -0
  33. venv/lib/python3.13/site-packages/numpy/_core/_methods.pyi +22 -0
  34. venv/lib/python3.13/site-packages/numpy/_core/_operand_flag_tests.cpython-313-x86_64-linux-gnu.so +0 -0
  35. venv/lib/python3.13/site-packages/numpy/_core/_rational_tests.cpython-313-x86_64-linux-gnu.so +0 -0
  36. venv/lib/python3.13/site-packages/numpy/_core/_simd.pyi +25 -0
  37. venv/lib/python3.13/site-packages/numpy/_core/_string_helpers.py +100 -0
  38. venv/lib/python3.13/site-packages/numpy/_core/_string_helpers.pyi +12 -0
  39. venv/lib/python3.13/site-packages/numpy/_core/_struct_ufunc_tests.cpython-313-x86_64-linux-gnu.so +0 -0
  40. venv/lib/python3.13/site-packages/numpy/_core/_type_aliases.py +119 -0
  41. venv/lib/python3.13/site-packages/numpy/_core/_type_aliases.pyi +97 -0
  42. venv/lib/python3.13/site-packages/numpy/_core/_ufunc_config.py +491 -0
  43. venv/lib/python3.13/site-packages/numpy/_core/_ufunc_config.pyi +78 -0
  44. venv/lib/python3.13/site-packages/numpy/_core/_umath_tests.cpython-313-x86_64-linux-gnu.so +0 -0
  45. venv/lib/python3.13/site-packages/numpy/_core/arrayprint.py +1775 -0
  46. venv/lib/python3.13/site-packages/numpy/_core/arrayprint.pyi +238 -0
  47. venv/lib/python3.13/site-packages/numpy/_core/cversions.py +13 -0
  48. venv/lib/python3.13/site-packages/numpy/_core/defchararray.py +1427 -0
  49. venv/lib/python3.13/site-packages/numpy/_core/defchararray.pyi +1135 -0
  50. venv/lib/python3.13/site-packages/numpy/_core/einsumfunc.py +1498 -0
venv/lib/python3.13/site-packages/numpy/__pycache__/__config__.cpython-313.pyc ADDED
Binary file (5.15 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/__init__.cpython-313.pyc ADDED
Binary file (28.2 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_array_api_info.cpython-313.pyc ADDED
Binary file (9.96 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_configtool.cpython-313.pyc ADDED
Binary file (1.68 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_distributor_init.cpython-313.pyc ADDED
Binary file (647 Bytes). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_expired_attrs_2_0.cpython-313.pyc ADDED
Binary file (4.28 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_globals.cpython-313.pyc ADDED
Binary file (3.89 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/_pytesttester.cpython-313.pyc ADDED
Binary file (6.45 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/conftest.cpython-313.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/dtypes.cpython-313.pyc ADDED
Binary file (1.53 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/exceptions.cpython-313.pyc ADDED
Binary file (8.45 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/matlib.cpython-313.pyc ADDED
Binary file (10.9 kB). View file
 
venv/lib/python3.13/site-packages/numpy/__pycache__/version.cpython-313.pyc ADDED
Binary file (549 Bytes). View file
 
venv/lib/python3.13/site-packages/numpy/_core/__init__.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
3
+
4
+ Please note that this module is private. All functions and objects
5
+ are available in the main ``numpy`` namespace - use that instead.
6
+
7
+ """
8
+
9
+ import os
10
+
11
+ from numpy.version import version as __version__
12
+
13
+ # disables OpenBLAS affinity setting of the main thread that limits
14
+ # python threads or processes to one core
15
+ env_added = []
16
+ for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
17
+ if envkey not in os.environ:
18
+ os.environ[envkey] = '1'
19
+ env_added.append(envkey)
20
+
21
+ try:
22
+ from . import multiarray
23
+ except ImportError as exc:
24
+ import sys
25
+ msg = """
26
+
27
+ IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
28
+
29
+ Importing the numpy C-extensions failed. This error can happen for
30
+ many reasons, often due to issues with your setup or how NumPy was
31
+ installed.
32
+
33
+ We have compiled some common reasons and troubleshooting tips at:
34
+
35
+ https://numpy.org/devdocs/user/troubleshooting-importerror.html
36
+
37
+ Please note and check the following:
38
+
39
+ * The Python version is: Python%d.%d from "%s"
40
+ * The NumPy version is: "%s"
41
+
42
+ and make sure that they are the versions you expect.
43
+ Please carefully study the documentation linked above for further help.
44
+
45
+ Original error was: %s
46
+ """ % (sys.version_info[0], sys.version_info[1], sys.executable,
47
+ __version__, exc)
48
+ raise ImportError(msg) from exc
49
+ finally:
50
+ for envkey in env_added:
51
+ del os.environ[envkey]
52
+ del envkey
53
+ del env_added
54
+ del os
55
+
56
+ from . import umath
57
+
58
+ # Check that multiarray,umath are pure python modules wrapping
59
+ # _multiarray_umath and not either of the old c-extension modules
60
+ if not (hasattr(multiarray, '_multiarray_umath') and
61
+ hasattr(umath, '_multiarray_umath')):
62
+ import sys
63
+ path = sys.modules['numpy'].__path__
64
+ msg = ("Something is wrong with the numpy installation. "
65
+ "While importing we detected an older version of "
66
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
67
+ "numpy until none is found, then reinstall this version.")
68
+ raise ImportError(msg.format(path))
69
+
70
+ from . import numerictypes as nt
71
+ from .numerictypes import sctypeDict, sctypes
72
+
73
+ multiarray.set_typeDict(nt.sctypeDict)
74
+ from . import (
75
+ _machar,
76
+ einsumfunc,
77
+ fromnumeric,
78
+ function_base,
79
+ getlimits,
80
+ numeric,
81
+ shape_base,
82
+ )
83
+ from .einsumfunc import *
84
+ from .fromnumeric import *
85
+ from .function_base import *
86
+ from .getlimits import *
87
+
88
+ # Note: module name memmap is overwritten by a class with same name
89
+ from .memmap import *
90
+ from .numeric import *
91
+ from .records import recarray, record
92
+ from .shape_base import *
93
+
94
+ del nt
95
+
96
+ # do this after everything else, to minimize the chance of this misleadingly
97
+ # appearing in an import-time traceback
98
+ # add these for module-freeze analysis (like PyInstaller)
99
+ from . import (
100
+ _add_newdocs,
101
+ _add_newdocs_scalars,
102
+ _dtype,
103
+ _dtype_ctypes,
104
+ _internal,
105
+ _methods,
106
+ )
107
+ from .numeric import absolute as abs
108
+
109
+ acos = numeric.arccos
110
+ acosh = numeric.arccosh
111
+ asin = numeric.arcsin
112
+ asinh = numeric.arcsinh
113
+ atan = numeric.arctan
114
+ atanh = numeric.arctanh
115
+ atan2 = numeric.arctan2
116
+ concat = numeric.concatenate
117
+ bitwise_left_shift = numeric.left_shift
118
+ bitwise_invert = numeric.invert
119
+ bitwise_right_shift = numeric.right_shift
120
+ permute_dims = numeric.transpose
121
+ pow = numeric.power
122
+
123
+ __all__ = [
124
+ "abs", "acos", "acosh", "asin", "asinh", "atan", "atanh", "atan2",
125
+ "bitwise_invert", "bitwise_left_shift", "bitwise_right_shift", "concat",
126
+ "pow", "permute_dims", "memmap", "sctypeDict", "record", "recarray"
127
+ ]
128
+ __all__ += numeric.__all__
129
+ __all__ += function_base.__all__
130
+ __all__ += getlimits.__all__
131
+ __all__ += shape_base.__all__
132
+ __all__ += einsumfunc.__all__
133
+
134
+
135
+ def _ufunc_reduce(func):
136
+ # Report the `__name__`. pickle will try to find the module. Note that
137
+ # pickle supports for this `__name__` to be a `__qualname__`. It may
138
+ # make sense to add a `__qualname__` to ufuncs, to allow this more
139
+ # explicitly (Numba has ufuncs as attributes).
140
+ # See also: https://github.com/dask/distributed/issues/3450
141
+ return func.__name__
142
+
143
+
144
+ def _DType_reconstruct(scalar_type):
145
+ # This is a work-around to pickle type(np.dtype(np.float64)), etc.
146
+ # and it should eventually be replaced with a better solution, e.g. when
147
+ # DTypes become HeapTypes.
148
+ return type(dtype(scalar_type))
149
+
150
+
151
+ def _DType_reduce(DType):
152
+ # As types/classes, most DTypes can simply be pickled by their name:
153
+ if not DType._legacy or DType.__module__ == "numpy.dtypes":
154
+ return DType.__name__
155
+
156
+ # However, user defined legacy dtypes (like rational) do not end up in
157
+ # `numpy.dtypes` as module and do not have a public class at all.
158
+ # For these, we pickle them by reconstructing them from the scalar type:
159
+ scalar_type = DType.type
160
+ return _DType_reconstruct, (scalar_type,)
161
+
162
+
163
+ def __getattr__(name):
164
+ # Deprecated 2022-11-22, NumPy 1.25.
165
+ if name == "MachAr":
166
+ import warnings
167
+ warnings.warn(
168
+ "The `np._core.MachAr` is considered private API (NumPy 1.24)",
169
+ DeprecationWarning, stacklevel=2,
170
+ )
171
+ return _machar.MachAr
172
+ raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
173
+
174
+
175
+ import copyreg
176
+
177
+ copyreg.pickle(ufunc, _ufunc_reduce)
178
+ copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
179
+
180
+ # Unclutter namespace (must keep _*_reconstruct for unpickling)
181
+ del copyreg, _ufunc_reduce, _DType_reduce
182
+
183
+ from numpy._pytesttester import PytestTester
184
+
185
+ test = PytestTester(__name__)
186
+ del PytestTester
venv/lib/python3.13/site-packages/numpy/_core/__init__.pyi ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # NOTE: The `np._core` namespace is deliberately kept empty due to it
2
+ # being private
venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs.pyi ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from .overrides import get_array_function_like_doc as get_array_function_like_doc
2
+
3
+ def refer_to_array_attribute(attr: str, method: bool = True) -> tuple[str, str]: ...
venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs_scalars.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
3
+ our sphinx ``conf.py`` during doc builds, where we want to avoid showing
4
+ platform-dependent information.
5
+ """
6
+ import os
7
+ import sys
8
+
9
+ from numpy._core import dtype
10
+ from numpy._core import numerictypes as _numerictypes
11
+ from numpy._core.function_base import add_newdoc
12
+
13
+ ##############################################################################
14
+ #
15
+ # Documentation for concrete scalar classes
16
+ #
17
+ ##############################################################################
18
+
19
+ def numeric_type_aliases(aliases):
20
+ def type_aliases_gen():
21
+ for alias, doc in aliases:
22
+ try:
23
+ alias_type = getattr(_numerictypes, alias)
24
+ except AttributeError:
25
+ # The set of aliases that actually exist varies between platforms
26
+ pass
27
+ else:
28
+ yield (alias_type, alias, doc)
29
+ return list(type_aliases_gen())
30
+
31
+
32
+ possible_aliases = numeric_type_aliases([
33
+ ('int8', '8-bit signed integer (``-128`` to ``127``)'),
34
+ ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
35
+ ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
36
+ ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
37
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
38
+ ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
39
+ ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
40
+ ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
41
+ ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
42
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
43
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
44
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
45
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
46
+ ('float96', '96-bit extended-precision floating-point number type'),
47
+ ('float128', '128-bit extended-precision floating-point number type'),
48
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
49
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
50
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
51
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
52
+ ])
53
+
54
+
55
+ def _get_platform_and_machine():
56
+ try:
57
+ system, _, _, _, machine = os.uname()
58
+ except AttributeError:
59
+ system = sys.platform
60
+ if system == 'win32':
61
+ machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
62
+ or os.environ.get('PROCESSOR_ARCHITECTURE', '')
63
+ else:
64
+ machine = 'unknown'
65
+ return system, machine
66
+
67
+
68
+ _system, _machine = _get_platform_and_machine()
69
+ _doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
70
+
71
+
72
+ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
73
+ # note: `:field: value` is rST syntax which renders as field lists.
74
+ o = getattr(_numerictypes, obj)
75
+
76
+ character_code = dtype(o).char
77
+ canonical_name_doc = "" if obj == o.__name__ else \
78
+ f":Canonical name: `numpy.{obj}`\n "
79
+ if fixed_aliases:
80
+ alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
81
+ for alias in fixed_aliases)
82
+ else:
83
+ alias_doc = ''
84
+ alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
85
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
86
+
87
+ docstring = f"""
88
+ {doc.strip()}
89
+
90
+ :Character code: ``'{character_code}'``
91
+ {canonical_name_doc}{alias_doc}
92
+ """
93
+
94
+ add_newdoc('numpy._core.numerictypes', obj, docstring)
95
+
96
+
97
+ _bool_docstring = (
98
+ """
99
+ Boolean type (True or False), stored as a byte.
100
+
101
+ .. warning::
102
+
103
+ The :class:`bool` type is not a subclass of the :class:`int_` type
104
+ (the :class:`bool` is not even a number type). This is different
105
+ than Python's default implementation of :class:`bool` as a
106
+ sub-class of :class:`int`.
107
+ """
108
+ )
109
+
110
+ add_newdoc_for_scalar_type('bool', [], _bool_docstring)
111
+
112
+ add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
113
+
114
+ add_newdoc_for_scalar_type('byte', [],
115
+ """
116
+ Signed integer type, compatible with C ``char``.
117
+ """)
118
+
119
+ add_newdoc_for_scalar_type('short', [],
120
+ """
121
+ Signed integer type, compatible with C ``short``.
122
+ """)
123
+
124
+ add_newdoc_for_scalar_type('intc', [],
125
+ """
126
+ Signed integer type, compatible with C ``int``.
127
+ """)
128
+
129
+ # TODO: These docs probably need an if to highlight the default rather than
130
+ # the C-types (and be correct).
131
+ add_newdoc_for_scalar_type('int_', [],
132
+ """
133
+ Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
134
+ systems.
135
+ """)
136
+
137
+ add_newdoc_for_scalar_type('longlong', [],
138
+ """
139
+ Signed integer type, compatible with C ``long long``.
140
+ """)
141
+
142
+ add_newdoc_for_scalar_type('ubyte', [],
143
+ """
144
+ Unsigned integer type, compatible with C ``unsigned char``.
145
+ """)
146
+
147
+ add_newdoc_for_scalar_type('ushort', [],
148
+ """
149
+ Unsigned integer type, compatible with C ``unsigned short``.
150
+ """)
151
+
152
+ add_newdoc_for_scalar_type('uintc', [],
153
+ """
154
+ Unsigned integer type, compatible with C ``unsigned int``.
155
+ """)
156
+
157
+ add_newdoc_for_scalar_type('uint', [],
158
+ """
159
+ Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
160
+ systems.
161
+ """)
162
+
163
+ add_newdoc_for_scalar_type('ulonglong', [],
164
+ """
165
+ Signed integer type, compatible with C ``unsigned long long``.
166
+ """)
167
+
168
+ add_newdoc_for_scalar_type('half', [],
169
+ """
170
+ Half-precision floating-point number type.
171
+ """)
172
+
173
+ add_newdoc_for_scalar_type('single', [],
174
+ """
175
+ Single-precision floating-point number type, compatible with C ``float``.
176
+ """)
177
+
178
+ add_newdoc_for_scalar_type('double', [],
179
+ """
180
+ Double-precision floating-point number type, compatible with Python
181
+ :class:`float` and C ``double``.
182
+ """)
183
+
184
+ add_newdoc_for_scalar_type('longdouble', [],
185
+ """
186
+ Extended-precision floating-point number type, compatible with C
187
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
188
+ """)
189
+
190
+ add_newdoc_for_scalar_type('csingle', [],
191
+ """
192
+ Complex number type composed of two single-precision floating-point
193
+ numbers.
194
+ """)
195
+
196
+ add_newdoc_for_scalar_type('cdouble', [],
197
+ """
198
+ Complex number type composed of two double-precision floating-point
199
+ numbers, compatible with Python :class:`complex`.
200
+ """)
201
+
202
+ add_newdoc_for_scalar_type('clongdouble', [],
203
+ """
204
+ Complex number type composed of two extended-precision floating-point
205
+ numbers.
206
+ """)
207
+
208
+ add_newdoc_for_scalar_type('object_', [],
209
+ """
210
+ Any Python object.
211
+ """)
212
+
213
+ add_newdoc_for_scalar_type('str_', [],
214
+ r"""
215
+ A unicode string.
216
+
217
+ This type strips trailing null codepoints.
218
+
219
+ >>> s = np.str_("abc\x00")
220
+ >>> s
221
+ 'abc'
222
+
223
+ Unlike the builtin :class:`str`, this supports the
224
+ :ref:`python:bufferobjects`, exposing its contents as UCS4:
225
+
226
+ >>> m = memoryview(np.str_("abc"))
227
+ >>> m.format
228
+ '3w'
229
+ >>> m.tobytes()
230
+ b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
231
+ """)
232
+
233
+ add_newdoc_for_scalar_type('bytes_', [],
234
+ r"""
235
+ A byte string.
236
+
237
+ When used in arrays, this type strips trailing null bytes.
238
+ """)
239
+
240
+ add_newdoc_for_scalar_type('void', [],
241
+ r"""
242
+ np.void(length_or_data, /, dtype=None)
243
+
244
+ Create a new structured or unstructured void scalar.
245
+
246
+ Parameters
247
+ ----------
248
+ length_or_data : int, array-like, bytes-like, object
249
+ One of multiple meanings (see notes). The length or
250
+ bytes data of an unstructured void. Or alternatively,
251
+ the data to be stored in the new scalar when `dtype`
252
+ is provided.
253
+ This can be an array-like, in which case an array may
254
+ be returned.
255
+ dtype : dtype, optional
256
+ If provided the dtype of the new scalar. This dtype must
257
+ be "void" dtype (i.e. a structured or unstructured void,
258
+ see also :ref:`defining-structured-types`).
259
+
260
+ .. versionadded:: 1.24
261
+
262
+ Notes
263
+ -----
264
+ For historical reasons and because void scalars can represent both
265
+ arbitrary byte data and structured dtypes, the void constructor
266
+ has three calling conventions:
267
+
268
+ 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
269
+ ``\0`` bytes. The 5 can be a Python or NumPy integer.
270
+ 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
271
+ The dtype itemsize will match the byte string length, here ``"V10"``.
272
+ 3. When a ``dtype=`` is passed the call is roughly the same as an
273
+ array creation. However, a void scalar rather than array is returned.
274
+
275
+ Please see the examples which show all three different conventions.
276
+
277
+ Examples
278
+ --------
279
+ >>> np.void(5)
280
+ np.void(b'\x00\x00\x00\x00\x00')
281
+ >>> np.void(b'abcd')
282
+ np.void(b'\x61\x62\x63\x64')
283
+ >>> np.void((3.2, b'eggs'), dtype="d,S5")
284
+ np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
285
+ >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
286
+ np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
287
+
288
+ """)
289
+
290
+ add_newdoc_for_scalar_type('datetime64', [],
291
+ """
292
+ If created from a 64-bit integer, it represents an offset from
293
+ ``1970-01-01T00:00:00``.
294
+ If created from string, the string can be in ISO 8601 date
295
+ or datetime format.
296
+
297
+ When parsing a string to create a datetime object, if the string contains
298
+ a trailing timezone (A 'Z' or a timezone offset), the timezone will be
299
+ dropped and a User Warning is given.
300
+
301
+ Datetime64 objects should be considered to be UTC and therefore have an
302
+ offset of +0000.
303
+
304
+ >>> np.datetime64(10, 'Y')
305
+ np.datetime64('1980')
306
+ >>> np.datetime64('1980', 'Y')
307
+ np.datetime64('1980')
308
+ >>> np.datetime64(10, 'D')
309
+ np.datetime64('1970-01-11')
310
+
311
+ See :ref:`arrays.datetime` for more information.
312
+ """)
313
+
314
+ add_newdoc_for_scalar_type('timedelta64', [],
315
+ """
316
+ A timedelta stored as a 64-bit integer.
317
+
318
+ See :ref:`arrays.datetime` for more information.
319
+ """)
320
+
321
+ add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
322
+ """
323
+ integer.is_integer() -> bool
324
+
325
+ Return ``True`` if the number is finite with integral value.
326
+
327
+ .. versionadded:: 1.22
328
+
329
+ Examples
330
+ --------
331
+ >>> import numpy as np
332
+ >>> np.int64(-2).is_integer()
333
+ True
334
+ >>> np.uint32(5).is_integer()
335
+ True
336
+ """))
337
+
338
+ # TODO: work out how to put this on the base class, np.floating
339
+ for float_name in ('half', 'single', 'double', 'longdouble'):
340
+ add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
341
+ f"""
342
+ {float_name}.as_integer_ratio() -> (int, int)
343
+
344
+ Return a pair of integers, whose ratio is exactly equal to the original
345
+ floating point number, and with a positive denominator.
346
+ Raise `OverflowError` on infinities and a `ValueError` on NaNs.
347
+
348
+ >>> np.{float_name}(10.0).as_integer_ratio()
349
+ (10, 1)
350
+ >>> np.{float_name}(0.0).as_integer_ratio()
351
+ (0, 1)
352
+ >>> np.{float_name}(-.25).as_integer_ratio()
353
+ (-1, 4)
354
+ """))
355
+
356
+ add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
357
+ f"""
358
+ {float_name}.is_integer() -> bool
359
+
360
+ Return ``True`` if the floating point number is finite with integral
361
+ value, and ``False`` otherwise.
362
+
363
+ .. versionadded:: 1.22
364
+
365
+ Examples
366
+ --------
367
+ >>> np.{float_name}(-2.0).is_integer()
368
+ True
369
+ >>> np.{float_name}(3.2).is_integer()
370
+ False
371
+ """))
372
+
373
+ for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
374
+ 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
375
+ # Add negative examples for signed cases by checking typecode
376
+ add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
377
+ f"""
378
+ {int_name}.bit_count() -> int
379
+
380
+ Computes the number of 1-bits in the absolute value of the input.
381
+ Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
382
+
383
+ Examples
384
+ --------
385
+ >>> np.{int_name}(127).bit_count()
386
+ 7""" +
387
+ (f"""
388
+ >>> np.{int_name}(-127).bit_count()
389
+ 7
390
+ """ if dtype(int_name).char.islower() else "")))
venv/lib/python3.13/site-packages/numpy/_core/_add_newdocs_scalars.pyi ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Final
3
+
4
+ import numpy as np
5
+
6
+ possible_aliases: Final[list[tuple[type[np.number], str, str]]] = ...
7
+ _system: Final[str] = ...
8
+ _machine: Final[str] = ...
9
+ _doc_alias_string: Final[str] = ...
10
+ _bool_docstring: Final[str] = ...
11
+ int_name: str = ...
12
+ float_name: str = ...
13
+
14
+ def numeric_type_aliases(aliases: list[tuple[str, str]]) -> list[tuple[type[np.number], str, str]]: ...
15
+ def add_newdoc_for_scalar_type(obj: str, fixed_aliases: Iterable[str], doc: str) -> None: ...
16
+ def _get_platform_and_machine() -> tuple[str, str]: ...
venv/lib/python3.13/site-packages/numpy/_core/_asarray.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions in the ``as*array`` family that promote array-likes into arrays.
3
+
4
+ `require` fits this category despite its name not matching this pattern.
5
+ """
6
+ from .multiarray import array, asanyarray
7
+ from .overrides import (
8
+ array_function_dispatch,
9
+ finalize_array_function_like,
10
+ set_module,
11
+ )
12
+
13
+ __all__ = ["require"]
14
+
15
+
16
+ POSSIBLE_FLAGS = {
17
+ 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
18
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
19
+ 'A': 'A', 'ALIGNED': 'A',
20
+ 'W': 'W', 'WRITEABLE': 'W',
21
+ 'O': 'O', 'OWNDATA': 'O',
22
+ 'E': 'E', 'ENSUREARRAY': 'E'
23
+ }
24
+
25
+
26
+ @finalize_array_function_like
27
+ @set_module('numpy')
28
+ def require(a, dtype=None, requirements=None, *, like=None):
29
+ """
30
+ Return an ndarray of the provided type that satisfies requirements.
31
+
32
+ This function is useful to be sure that an array with the correct flags
33
+ is returned for passing to compiled code (perhaps through ctypes).
34
+
35
+ Parameters
36
+ ----------
37
+ a : array_like
38
+ The object to be converted to a type-and-requirement-satisfying array.
39
+ dtype : data-type
40
+ The required data-type. If None preserve the current dtype. If your
41
+ application requires the data to be in native byteorder, include
42
+ a byteorder specification as a part of the dtype specification.
43
+ requirements : str or sequence of str
44
+ The requirements list can be any of the following
45
+
46
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
47
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
48
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
49
+ * 'WRITEABLE' ('W') - ensure a writable array
50
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
51
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
52
+ ${ARRAY_FUNCTION_LIKE}
53
+
54
+ .. versionadded:: 1.20.0
55
+
56
+ Returns
57
+ -------
58
+ out : ndarray
59
+ Array with specified requirements and type if given.
60
+
61
+ See Also
62
+ --------
63
+ asarray : Convert input to an ndarray.
64
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
65
+ ascontiguousarray : Convert input to a contiguous array.
66
+ asfortranarray : Convert input to an ndarray with column-major
67
+ memory order.
68
+ ndarray.flags : Information about the memory layout of the array.
69
+
70
+ Notes
71
+ -----
72
+ The returned array will be guaranteed to have the listed requirements
73
+ by making a copy if needed.
74
+
75
+ Examples
76
+ --------
77
+ >>> import numpy as np
78
+ >>> x = np.arange(6).reshape(2,3)
79
+ >>> x.flags
80
+ C_CONTIGUOUS : True
81
+ F_CONTIGUOUS : False
82
+ OWNDATA : False
83
+ WRITEABLE : True
84
+ ALIGNED : True
85
+ WRITEBACKIFCOPY : False
86
+
87
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
88
+ >>> y.flags
89
+ C_CONTIGUOUS : False
90
+ F_CONTIGUOUS : True
91
+ OWNDATA : True
92
+ WRITEABLE : True
93
+ ALIGNED : True
94
+ WRITEBACKIFCOPY : False
95
+
96
+ """
97
+ if like is not None:
98
+ return _require_with_like(
99
+ like,
100
+ a,
101
+ dtype=dtype,
102
+ requirements=requirements,
103
+ )
104
+
105
+ if not requirements:
106
+ return asanyarray(a, dtype=dtype)
107
+
108
+ requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
109
+
110
+ if 'E' in requirements:
111
+ requirements.remove('E')
112
+ subok = False
113
+ else:
114
+ subok = True
115
+
116
+ order = 'A'
117
+ if requirements >= {'C', 'F'}:
118
+ raise ValueError('Cannot specify both "C" and "F" order')
119
+ elif 'F' in requirements:
120
+ order = 'F'
121
+ requirements.remove('F')
122
+ elif 'C' in requirements:
123
+ order = 'C'
124
+ requirements.remove('C')
125
+
126
+ arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
127
+
128
+ for prop in requirements:
129
+ if not arr.flags[prop]:
130
+ return arr.copy(order)
131
+ return arr
132
+
133
+
134
+ _require_with_like = array_function_dispatch()(require)
venv/lib/python3.13/site-packages/numpy/_core/_asarray.pyi ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Any, Literal, TypeAlias, TypeVar, overload
3
+
4
+ from numpy._typing import DTypeLike, NDArray, _SupportsArrayFunc
5
+
6
+ _ArrayT = TypeVar("_ArrayT", bound=NDArray[Any])
7
+
8
+ _Requirements: TypeAlias = Literal[
9
+ "C", "C_CONTIGUOUS", "CONTIGUOUS",
10
+ "F", "F_CONTIGUOUS", "FORTRAN",
11
+ "A", "ALIGNED",
12
+ "W", "WRITEABLE",
13
+ "O", "OWNDATA"
14
+ ]
15
+ _E: TypeAlias = Literal["E", "ENSUREARRAY"]
16
+ _RequirementsWithE: TypeAlias = _Requirements | _E
17
+
18
+ @overload
19
+ def require(
20
+ a: _ArrayT,
21
+ dtype: None = ...,
22
+ requirements: _Requirements | Iterable[_Requirements] | None = ...,
23
+ *,
24
+ like: _SupportsArrayFunc = ...
25
+ ) -> _ArrayT: ...
26
+ @overload
27
+ def require(
28
+ a: object,
29
+ dtype: DTypeLike = ...,
30
+ requirements: _E | Iterable[_RequirementsWithE] = ...,
31
+ *,
32
+ like: _SupportsArrayFunc = ...
33
+ ) -> NDArray[Any]: ...
34
+ @overload
35
+ def require(
36
+ a: object,
37
+ dtype: DTypeLike = ...,
38
+ requirements: _Requirements | Iterable[_Requirements] | None = ...,
39
+ *,
40
+ like: _SupportsArrayFunc = ...
41
+ ) -> NDArray[Any]: ...
venv/lib/python3.13/site-packages/numpy/_core/_dtype.py ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A place for code to be called from the implementation of np.dtype
3
+
4
+ String handling is much easier to do correctly in python.
5
+ """
6
+ import numpy as np
7
+
8
+ _kind_to_stem = {
9
+ 'u': 'uint',
10
+ 'i': 'int',
11
+ 'c': 'complex',
12
+ 'f': 'float',
13
+ 'b': 'bool',
14
+ 'V': 'void',
15
+ 'O': 'object',
16
+ 'M': 'datetime',
17
+ 'm': 'timedelta',
18
+ 'S': 'bytes',
19
+ 'U': 'str',
20
+ }
21
+
22
+
23
+ def _kind_name(dtype):
24
+ try:
25
+ return _kind_to_stem[dtype.kind]
26
+ except KeyError as e:
27
+ raise RuntimeError(
28
+ f"internal dtype error, unknown kind {dtype.kind!r}"
29
+ ) from None
30
+
31
+
32
+ def __str__(dtype):
33
+ if dtype.fields is not None:
34
+ return _struct_str(dtype, include_align=True)
35
+ elif dtype.subdtype:
36
+ return _subarray_str(dtype)
37
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
38
+ return dtype.str
39
+ else:
40
+ return dtype.name
41
+
42
+
43
+ def __repr__(dtype):
44
+ arg_str = _construction_repr(dtype, include_align=False)
45
+ if dtype.isalignedstruct:
46
+ arg_str = arg_str + ", align=True"
47
+ return f"dtype({arg_str})"
48
+
49
+
50
+ def _unpack_field(dtype, offset, title=None):
51
+ """
52
+ Helper function to normalize the items in dtype.fields.
53
+
54
+ Call as:
55
+
56
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
57
+ """
58
+ return dtype, offset, title
59
+
60
+
61
+ def _isunsized(dtype):
62
+ # PyDataType_ISUNSIZED
63
+ return dtype.itemsize == 0
64
+
65
+
66
+ def _construction_repr(dtype, include_align=False, short=False):
67
+ """
68
+ Creates a string repr of the dtype, excluding the 'dtype()' part
69
+ surrounding the object. This object may be a string, a list, or
70
+ a dict depending on the nature of the dtype. This
71
+ is the object passed as the first parameter to the dtype
72
+ constructor, and if no additional constructor parameters are
73
+ given, will reproduce the exact memory layout.
74
+
75
+ Parameters
76
+ ----------
77
+ short : bool
78
+ If true, this creates a shorter repr using 'kind' and 'itemsize',
79
+ instead of the longer type name.
80
+
81
+ include_align : bool
82
+ If true, this includes the 'align=True' parameter
83
+ inside the struct dtype construction dict when needed. Use this flag
84
+ if you want a proper repr string without the 'dtype()' part around it.
85
+
86
+ If false, this does not preserve the
87
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
88
+ struct arrays like the regular repr does, because the 'align'
89
+ flag is not part of first dtype constructor parameter. This
90
+ mode is intended for a full 'repr', where the 'align=True' is
91
+ provided as the second parameter.
92
+ """
93
+ if dtype.fields is not None:
94
+ return _struct_str(dtype, include_align=include_align)
95
+ elif dtype.subdtype:
96
+ return _subarray_str(dtype)
97
+ else:
98
+ return _scalar_str(dtype, short=short)
99
+
100
+
101
+ def _scalar_str(dtype, short):
102
+ byteorder = _byte_order_str(dtype)
103
+
104
+ if dtype.type == np.bool:
105
+ if short:
106
+ return "'?'"
107
+ else:
108
+ return "'bool'"
109
+
110
+ elif dtype.type == np.object_:
111
+ # The object reference may be different sizes on different
112
+ # platforms, so it should never include the itemsize here.
113
+ return "'O'"
114
+
115
+ elif dtype.type == np.bytes_:
116
+ if _isunsized(dtype):
117
+ return "'S'"
118
+ else:
119
+ return "'S%d'" % dtype.itemsize
120
+
121
+ elif dtype.type == np.str_:
122
+ if _isunsized(dtype):
123
+ return f"'{byteorder}U'"
124
+ else:
125
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
126
+
127
+ elif dtype.type == str:
128
+ return "'T'"
129
+
130
+ elif not type(dtype)._legacy:
131
+ return f"'{byteorder}{type(dtype).__name__}{dtype.itemsize * 8}'"
132
+
133
+ # unlike the other types, subclasses of void are preserved - but
134
+ # historically the repr does not actually reveal the subclass
135
+ elif issubclass(dtype.type, np.void):
136
+ if _isunsized(dtype):
137
+ return "'V'"
138
+ else:
139
+ return "'V%d'" % dtype.itemsize
140
+
141
+ elif dtype.type == np.datetime64:
142
+ return f"'{byteorder}M8{_datetime_metadata_str(dtype)}'"
143
+
144
+ elif dtype.type == np.timedelta64:
145
+ return f"'{byteorder}m8{_datetime_metadata_str(dtype)}'"
146
+
147
+ elif dtype.isbuiltin == 2:
148
+ return dtype.type.__name__
149
+
150
+ elif np.issubdtype(dtype, np.number):
151
+ # Short repr with endianness, like '<f8'
152
+ if short or dtype.byteorder not in ('=', '|'):
153
+ return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
154
+
155
+ # Longer repr, like 'float64'
156
+ else:
157
+ return "'%s%d'" % (_kind_name(dtype), 8 * dtype.itemsize)
158
+
159
+ else:
160
+ raise RuntimeError(
161
+ "Internal error: NumPy dtype unrecognized type number")
162
+
163
+
164
+ def _byte_order_str(dtype):
165
+ """ Normalize byteorder to '<' or '>' """
166
+ # hack to obtain the native and swapped byte order characters
167
+ swapped = np.dtype(int).newbyteorder('S')
168
+ native = swapped.newbyteorder('S')
169
+
170
+ byteorder = dtype.byteorder
171
+ if byteorder == '=':
172
+ return native.byteorder
173
+ if byteorder == 'S':
174
+ # TODO: this path can never be reached
175
+ return swapped.byteorder
176
+ elif byteorder == '|':
177
+ return ''
178
+ else:
179
+ return byteorder
180
+
181
+
182
+ def _datetime_metadata_str(dtype):
183
+ # TODO: this duplicates the C metastr_to_unicode functionality
184
+ unit, count = np.datetime_data(dtype)
185
+ if unit == 'generic':
186
+ return ''
187
+ elif count == 1:
188
+ return f'[{unit}]'
189
+ else:
190
+ return f'[{count}{unit}]'
191
+
192
+
193
+ def _struct_dict_str(dtype, includealignedflag):
194
+ # unpack the fields dictionary into ls
195
+ names = dtype.names
196
+ fld_dtypes = []
197
+ offsets = []
198
+ titles = []
199
+ for name in names:
200
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
201
+ fld_dtypes.append(fld_dtype)
202
+ offsets.append(offset)
203
+ titles.append(title)
204
+
205
+ # Build up a string to make the dictionary
206
+
207
+ if np._core.arrayprint._get_legacy_print_mode() <= 121:
208
+ colon = ":"
209
+ fieldsep = ","
210
+ else:
211
+ colon = ": "
212
+ fieldsep = ", "
213
+
214
+ # First, the names
215
+ ret = "{'names'%s[" % colon
216
+ ret += fieldsep.join(repr(name) for name in names)
217
+
218
+ # Second, the formats
219
+ ret += f"], 'formats'{colon}["
220
+ ret += fieldsep.join(
221
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
222
+
223
+ # Third, the offsets
224
+ ret += f"], 'offsets'{colon}["
225
+ ret += fieldsep.join("%d" % offset for offset in offsets)
226
+
227
+ # Fourth, the titles
228
+ if any(title is not None for title in titles):
229
+ ret += f"], 'titles'{colon}["
230
+ ret += fieldsep.join(repr(title) for title in titles)
231
+
232
+ # Fifth, the itemsize
233
+ ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
234
+
235
+ if (includealignedflag and dtype.isalignedstruct):
236
+ # Finally, the aligned flag
237
+ ret += ", 'aligned'%sTrue}" % colon
238
+ else:
239
+ ret += "}"
240
+
241
+ return ret
242
+
243
+
244
+ def _aligned_offset(offset, alignment):
245
+ # round up offset:
246
+ return - (-offset // alignment) * alignment
247
+
248
+
249
+ def _is_packed(dtype):
250
+ """
251
+ Checks whether the structured data type in 'dtype'
252
+ has a simple layout, where all the fields are in order,
253
+ and follow each other with no alignment padding.
254
+
255
+ When this returns true, the dtype can be reconstructed
256
+ from a list of the field names and dtypes with no additional
257
+ dtype parameters.
258
+
259
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
260
+ """
261
+ align = dtype.isalignedstruct
262
+ max_alignment = 1
263
+ total_offset = 0
264
+ for name in dtype.names:
265
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
266
+
267
+ if align:
268
+ total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
269
+ max_alignment = max(max_alignment, fld_dtype.alignment)
270
+
271
+ if fld_offset != total_offset:
272
+ return False
273
+ total_offset += fld_dtype.itemsize
274
+
275
+ if align:
276
+ total_offset = _aligned_offset(total_offset, max_alignment)
277
+
278
+ return total_offset == dtype.itemsize
279
+
280
+
281
+ def _struct_list_str(dtype):
282
+ items = []
283
+ for name in dtype.names:
284
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
285
+
286
+ item = "("
287
+ if title is not None:
288
+ item += f"({title!r}, {name!r}), "
289
+ else:
290
+ item += f"{name!r}, "
291
+ # Special case subarray handling here
292
+ if fld_dtype.subdtype is not None:
293
+ base, shape = fld_dtype.subdtype
294
+ item += f"{_construction_repr(base, short=True)}, {shape}"
295
+ else:
296
+ item += _construction_repr(fld_dtype, short=True)
297
+
298
+ item += ")"
299
+ items.append(item)
300
+
301
+ return "[" + ", ".join(items) + "]"
302
+
303
+
304
+ def _struct_str(dtype, include_align):
305
+ # The list str representation can't include the 'align=' flag,
306
+ # so if it is requested and the struct has the aligned flag set,
307
+ # we must use the dict str instead.
308
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
309
+ sub = _struct_list_str(dtype)
310
+
311
+ else:
312
+ sub = _struct_dict_str(dtype, include_align)
313
+
314
+ # If the data type isn't the default, void, show it
315
+ if dtype.type != np.void:
316
+ return f"({dtype.type.__module__}.{dtype.type.__name__}, {sub})"
317
+ else:
318
+ return sub
319
+
320
+
321
+ def _subarray_str(dtype):
322
+ base, shape = dtype.subdtype
323
+ return f"({_construction_repr(base, short=True)}, {shape})"
324
+
325
+
326
+ def _name_includes_bit_suffix(dtype):
327
+ if dtype.type == np.object_:
328
+ # pointer size varies by system, best to omit it
329
+ return False
330
+ elif dtype.type == np.bool:
331
+ # implied
332
+ return False
333
+ elif dtype.type is None:
334
+ return True
335
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
336
+ # unspecified
337
+ return False
338
+ else:
339
+ return True
340
+
341
+
342
+ def _name_get(dtype):
343
+ # provides dtype.name.__get__, documented as returning a "bit name"
344
+
345
+ if dtype.isbuiltin == 2:
346
+ # user dtypes don't promise to do anything special
347
+ return dtype.type.__name__
348
+
349
+ if not type(dtype)._legacy:
350
+ name = type(dtype).__name__
351
+
352
+ elif issubclass(dtype.type, np.void):
353
+ # historically, void subclasses preserve their name, eg `record64`
354
+ name = dtype.type.__name__
355
+ else:
356
+ name = _kind_name(dtype)
357
+
358
+ # append bit counts
359
+ if _name_includes_bit_suffix(dtype):
360
+ name += f"{dtype.itemsize * 8}"
361
+
362
+ # append metadata to datetimes
363
+ if dtype.type in (np.datetime64, np.timedelta64):
364
+ name += _datetime_metadata_str(dtype)
365
+
366
+ return name
venv/lib/python3.13/site-packages/numpy/_core/_dtype.pyi ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Final, TypeAlias, TypedDict, overload, type_check_only
2
+ from typing import Literal as L
3
+
4
+ from typing_extensions import ReadOnly, TypeVar
5
+
6
+ import numpy as np
7
+
8
+ ###
9
+
10
+ _T = TypeVar("_T")
11
+
12
+ _Name: TypeAlias = L["uint", "int", "complex", "float", "bool", "void", "object", "datetime", "timedelta", "bytes", "str"]
13
+
14
+ @type_check_only
15
+ class _KindToStemType(TypedDict):
16
+ u: ReadOnly[L["uint"]]
17
+ i: ReadOnly[L["int"]]
18
+ c: ReadOnly[L["complex"]]
19
+ f: ReadOnly[L["float"]]
20
+ b: ReadOnly[L["bool"]]
21
+ V: ReadOnly[L["void"]]
22
+ O: ReadOnly[L["object"]]
23
+ M: ReadOnly[L["datetime"]]
24
+ m: ReadOnly[L["timedelta"]]
25
+ S: ReadOnly[L["bytes"]]
26
+ U: ReadOnly[L["str"]]
27
+
28
+ ###
29
+
30
+ _kind_to_stem: Final[_KindToStemType] = ...
31
+
32
+ #
33
+ def _kind_name(dtype: np.dtype) -> _Name: ...
34
+ def __str__(dtype: np.dtype) -> str: ...
35
+ def __repr__(dtype: np.dtype) -> str: ...
36
+
37
+ #
38
+ def _isunsized(dtype: np.dtype) -> bool: ...
39
+ def _is_packed(dtype: np.dtype) -> bool: ...
40
+ def _name_includes_bit_suffix(dtype: np.dtype) -> bool: ...
41
+
42
+ #
43
+ def _construction_repr(dtype: np.dtype, include_align: bool = False, short: bool = False) -> str: ...
44
+ def _scalar_str(dtype: np.dtype, short: bool) -> str: ...
45
+ def _byte_order_str(dtype: np.dtype) -> str: ...
46
+ def _datetime_metadata_str(dtype: np.dtype) -> str: ...
47
+ def _struct_dict_str(dtype: np.dtype, includealignedflag: bool) -> str: ...
48
+ def _struct_list_str(dtype: np.dtype) -> str: ...
49
+ def _struct_str(dtype: np.dtype, include_align: bool) -> str: ...
50
+ def _subarray_str(dtype: np.dtype) -> str: ...
51
+ def _name_get(dtype: np.dtype) -> str: ...
52
+
53
+ #
54
+ @overload
55
+ def _unpack_field(dtype: np.dtype, offset: int, title: _T) -> tuple[np.dtype, int, _T]: ...
56
+ @overload
57
+ def _unpack_field(dtype: np.dtype, offset: int, title: None = None) -> tuple[np.dtype, int, None]: ...
58
+ def _aligned_offset(offset: int, alignment: int) -> int: ...
venv/lib/python3.13/site-packages/numpy/_core/_dtype_ctypes.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversion from ctypes to dtype.
3
+
4
+ In an ideal world, we could achieve this through the PEP3118 buffer protocol,
5
+ something like::
6
+
7
+ def dtype_from_ctypes_type(t):
8
+ # needed to ensure that the shape of `t` is within memoryview.format
9
+ class DummyStruct(ctypes.Structure):
10
+ _fields_ = [('a', t)]
11
+
12
+ # empty to avoid memory allocation
13
+ ctype_0 = (DummyStruct * 0)()
14
+ mv = memoryview(ctype_0)
15
+
16
+ # convert the struct, and slice back out the field
17
+ return _dtype_from_pep3118(mv.format)['a']
18
+
19
+ Unfortunately, this fails because:
20
+
21
+ * ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
22
+ * PEP3118 cannot represent unions, but both numpy and ctypes can
23
+ * ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
24
+ """
25
+
26
+ # We delay-import ctypes for distributions that do not include it.
27
+ # While this module is not used unless the user passes in ctypes
28
+ # members, it is eagerly imported from numpy/_core/__init__.py.
29
+ import numpy as np
30
+
31
+
32
+ def _from_ctypes_array(t):
33
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
34
+
35
+
36
+ def _from_ctypes_structure(t):
37
+ for item in t._fields_:
38
+ if len(item) > 2:
39
+ raise TypeError(
40
+ "ctypes bitfields have no dtype equivalent")
41
+
42
+ if hasattr(t, "_pack_"):
43
+ import ctypes
44
+ formats = []
45
+ offsets = []
46
+ names = []
47
+ current_offset = 0
48
+ for fname, ftyp in t._fields_:
49
+ names.append(fname)
50
+ formats.append(dtype_from_ctypes_type(ftyp))
51
+ # Each type has a default offset, this is platform dependent
52
+ # for some types.
53
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
54
+ current_offset = (
55
+ (current_offset + effective_pack - 1) // effective_pack
56
+ ) * effective_pack
57
+ offsets.append(current_offset)
58
+ current_offset += ctypes.sizeof(ftyp)
59
+
60
+ return np.dtype({
61
+ "formats": formats,
62
+ "offsets": offsets,
63
+ "names": names,
64
+ "itemsize": ctypes.sizeof(t)})
65
+ else:
66
+ fields = []
67
+ for fname, ftyp in t._fields_:
68
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
69
+
70
+ # by default, ctypes structs are aligned
71
+ return np.dtype(fields, align=True)
72
+
73
+
74
+ def _from_ctypes_scalar(t):
75
+ """
76
+ Return the dtype type with endianness included if it's the case
77
+ """
78
+ if getattr(t, '__ctype_be__', None) is t:
79
+ return np.dtype('>' + t._type_)
80
+ elif getattr(t, '__ctype_le__', None) is t:
81
+ return np.dtype('<' + t._type_)
82
+ else:
83
+ return np.dtype(t._type_)
84
+
85
+
86
+ def _from_ctypes_union(t):
87
+ import ctypes
88
+ formats = []
89
+ offsets = []
90
+ names = []
91
+ for fname, ftyp in t._fields_:
92
+ names.append(fname)
93
+ formats.append(dtype_from_ctypes_type(ftyp))
94
+ offsets.append(0) # Union fields are offset to 0
95
+
96
+ return np.dtype({
97
+ "formats": formats,
98
+ "offsets": offsets,
99
+ "names": names,
100
+ "itemsize": ctypes.sizeof(t)})
101
+
102
+
103
+ def dtype_from_ctypes_type(t):
104
+ """
105
+ Construct a dtype object from a ctypes type
106
+ """
107
+ import _ctypes
108
+ if issubclass(t, _ctypes.Array):
109
+ return _from_ctypes_array(t)
110
+ elif issubclass(t, _ctypes._Pointer):
111
+ raise TypeError("ctypes pointers have no dtype equivalent")
112
+ elif issubclass(t, _ctypes.Structure):
113
+ return _from_ctypes_structure(t)
114
+ elif issubclass(t, _ctypes.Union):
115
+ return _from_ctypes_union(t)
116
+ elif isinstance(getattr(t, '_type_', None), str):
117
+ return _from_ctypes_scalar(t)
118
+ else:
119
+ raise NotImplementedError(
120
+ f"Unknown ctypes type {t.__name__}")
venv/lib/python3.13/site-packages/numpy/_core/_dtype_ctypes.pyi ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import _ctypes
2
+ import ctypes as ct
3
+ from typing import Any, overload
4
+
5
+ import numpy as np
6
+
7
+ #
8
+ @overload
9
+ def dtype_from_ctypes_type(t: type[_ctypes.Array[Any] | _ctypes.Structure]) -> np.dtype[np.void]: ...
10
+ @overload
11
+ def dtype_from_ctypes_type(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
12
+ @overload
13
+ def dtype_from_ctypes_type(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
14
+ @overload
15
+ def dtype_from_ctypes_type(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
16
+ @overload
17
+ def dtype_from_ctypes_type(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
18
+ @overload
19
+ def dtype_from_ctypes_type(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
20
+ @overload
21
+ def dtype_from_ctypes_type(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
22
+ @overload
23
+ def dtype_from_ctypes_type(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
24
+ @overload
25
+ def dtype_from_ctypes_type(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
26
+ @overload
27
+ def dtype_from_ctypes_type(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
28
+ @overload
29
+ def dtype_from_ctypes_type(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
30
+ @overload
31
+ def dtype_from_ctypes_type(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
32
+ @overload
33
+ def dtype_from_ctypes_type(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
34
+ @overload
35
+ def dtype_from_ctypes_type(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
36
+ @overload
37
+ def dtype_from_ctypes_type(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
38
+ @overload
39
+ def dtype_from_ctypes_type(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
40
+ @overload
41
+ def dtype_from_ctypes_type(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
42
+
43
+ # NOTE: the complex ctypes on python>=3.14 are not yet supported at runtim, see
44
+ # https://github.com/numpy/numpy/issues/28360
45
+
46
+ #
47
+ def _from_ctypes_array(t: type[_ctypes.Array[Any]]) -> np.dtype[np.void]: ...
48
+ def _from_ctypes_structure(t: type[_ctypes.Structure]) -> np.dtype[np.void]: ...
49
+ def _from_ctypes_union(t: type[_ctypes.Union]) -> np.dtype[np.void]: ...
50
+
51
+ # keep in sync with `dtype_from_ctypes_type` (minus the first overload)
52
+ @overload
53
+ def _from_ctypes_scalar(t: type[ct.c_bool]) -> np.dtype[np.bool]: ...
54
+ @overload
55
+ def _from_ctypes_scalar(t: type[ct.c_int8 | ct.c_byte]) -> np.dtype[np.int8]: ...
56
+ @overload
57
+ def _from_ctypes_scalar(t: type[ct.c_uint8 | ct.c_ubyte]) -> np.dtype[np.uint8]: ...
58
+ @overload
59
+ def _from_ctypes_scalar(t: type[ct.c_int16 | ct.c_short]) -> np.dtype[np.int16]: ...
60
+ @overload
61
+ def _from_ctypes_scalar(t: type[ct.c_uint16 | ct.c_ushort]) -> np.dtype[np.uint16]: ...
62
+ @overload
63
+ def _from_ctypes_scalar(t: type[ct.c_int32 | ct.c_int]) -> np.dtype[np.int32]: ...
64
+ @overload
65
+ def _from_ctypes_scalar(t: type[ct.c_uint32 | ct.c_uint]) -> np.dtype[np.uint32]: ...
66
+ @overload
67
+ def _from_ctypes_scalar(t: type[ct.c_ssize_t | ct.c_long]) -> np.dtype[np.int32 | np.int64]: ...
68
+ @overload
69
+ def _from_ctypes_scalar(t: type[ct.c_size_t | ct.c_ulong]) -> np.dtype[np.uint32 | np.uint64]: ...
70
+ @overload
71
+ def _from_ctypes_scalar(t: type[ct.c_int64 | ct.c_longlong]) -> np.dtype[np.int64]: ...
72
+ @overload
73
+ def _from_ctypes_scalar(t: type[ct.c_uint64 | ct.c_ulonglong]) -> np.dtype[np.uint64]: ...
74
+ @overload
75
+ def _from_ctypes_scalar(t: type[ct.c_float]) -> np.dtype[np.float32]: ...
76
+ @overload
77
+ def _from_ctypes_scalar(t: type[ct.c_double]) -> np.dtype[np.float64]: ...
78
+ @overload
79
+ def _from_ctypes_scalar(t: type[ct.c_longdouble]) -> np.dtype[np.longdouble]: ...
80
+ @overload
81
+ def _from_ctypes_scalar(t: type[ct.c_char]) -> np.dtype[np.bytes_]: ...
82
+ @overload
83
+ def _from_ctypes_scalar(t: type[ct.py_object[Any]]) -> np.dtype[np.object_]: ...
venv/lib/python3.13/site-packages/numpy/_core/_exceptions.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Various richly-typed exceptions, that also help us deal with string formatting
3
+ in python where it's easier.
4
+
5
+ By putting the formatting in `__str__`, we also avoid paying the cost for
6
+ users who silence the exceptions.
7
+ """
8
+
9
+ def _unpack_tuple(tup):
10
+ if len(tup) == 1:
11
+ return tup[0]
12
+ else:
13
+ return tup
14
+
15
+
16
+ def _display_as_base(cls):
17
+ """
18
+ A decorator that makes an exception class look like its base.
19
+
20
+ We use this to hide subclasses that are implementation details - the user
21
+ should catch the base type, which is what the traceback will show them.
22
+
23
+ Classes decorated with this decorator are subject to removal without a
24
+ deprecation warning.
25
+ """
26
+ assert issubclass(cls, Exception)
27
+ cls.__name__ = cls.__base__.__name__
28
+ return cls
29
+
30
+
31
+ class UFuncTypeError(TypeError):
32
+ """ Base class for all ufunc exceptions """
33
+ def __init__(self, ufunc):
34
+ self.ufunc = ufunc
35
+
36
+
37
+ @_display_as_base
38
+ class _UFuncNoLoopError(UFuncTypeError):
39
+ """ Thrown when a ufunc loop cannot be found """
40
+ def __init__(self, ufunc, dtypes):
41
+ super().__init__(ufunc)
42
+ self.dtypes = tuple(dtypes)
43
+
44
+ def __str__(self):
45
+ return (
46
+ f"ufunc {self.ufunc.__name__!r} did not contain a loop with signature "
47
+ f"matching types {_unpack_tuple(self.dtypes[:self.ufunc.nin])!r} "
48
+ f"-> {_unpack_tuple(self.dtypes[self.ufunc.nin:])!r}"
49
+ )
50
+
51
+
52
+ @_display_as_base
53
+ class _UFuncBinaryResolutionError(_UFuncNoLoopError):
54
+ """ Thrown when a binary resolution fails """
55
+ def __init__(self, ufunc, dtypes):
56
+ super().__init__(ufunc, dtypes)
57
+ assert len(self.dtypes) == 2
58
+
59
+ def __str__(self):
60
+ return (
61
+ "ufunc {!r} cannot use operands with types {!r} and {!r}"
62
+ ).format(
63
+ self.ufunc.__name__, *self.dtypes
64
+ )
65
+
66
+
67
+ @_display_as_base
68
+ class _UFuncCastingError(UFuncTypeError):
69
+ def __init__(self, ufunc, casting, from_, to):
70
+ super().__init__(ufunc)
71
+ self.casting = casting
72
+ self.from_ = from_
73
+ self.to = to
74
+
75
+
76
+ @_display_as_base
77
+ class _UFuncInputCastingError(_UFuncCastingError):
78
+ """ Thrown when a ufunc input cannot be casted """
79
+ def __init__(self, ufunc, casting, from_, to, i):
80
+ super().__init__(ufunc, casting, from_, to)
81
+ self.in_i = i
82
+
83
+ def __str__(self):
84
+ # only show the number if more than one input exists
85
+ i_str = f"{self.in_i} " if self.ufunc.nin != 1 else ""
86
+ return (
87
+ f"Cannot cast ufunc {self.ufunc.__name__!r} input {i_str}from "
88
+ f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
89
+ )
90
+
91
+
92
+ @_display_as_base
93
+ class _UFuncOutputCastingError(_UFuncCastingError):
94
+ """ Thrown when a ufunc output cannot be casted """
95
+ def __init__(self, ufunc, casting, from_, to, i):
96
+ super().__init__(ufunc, casting, from_, to)
97
+ self.out_i = i
98
+
99
+ def __str__(self):
100
+ # only show the number if more than one output exists
101
+ i_str = f"{self.out_i} " if self.ufunc.nout != 1 else ""
102
+ return (
103
+ f"Cannot cast ufunc {self.ufunc.__name__!r} output {i_str}from "
104
+ f"{self.from_!r} to {self.to!r} with casting rule {self.casting!r}"
105
+ )
106
+
107
+
108
+ @_display_as_base
109
+ class _ArrayMemoryError(MemoryError):
110
+ """ Thrown when an array cannot be allocated"""
111
+ def __init__(self, shape, dtype):
112
+ self.shape = shape
113
+ self.dtype = dtype
114
+
115
+ @property
116
+ def _total_size(self):
117
+ num_bytes = self.dtype.itemsize
118
+ for dim in self.shape:
119
+ num_bytes *= dim
120
+ return num_bytes
121
+
122
+ @staticmethod
123
+ def _size_to_string(num_bytes):
124
+ """ Convert a number of bytes into a binary size string """
125
+
126
+ # https://en.wikipedia.org/wiki/Binary_prefix
127
+ LOG2_STEP = 10
128
+ STEP = 1024
129
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
130
+
131
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
132
+ unit_val = 1 << (unit_i * LOG2_STEP)
133
+ n_units = num_bytes / unit_val
134
+ del unit_val
135
+
136
+ # ensure we pick a unit that is correct after rounding
137
+ if round(n_units) == STEP:
138
+ unit_i += 1
139
+ n_units /= STEP
140
+
141
+ # deal with sizes so large that we don't have units for them
142
+ if unit_i >= len(units):
143
+ new_unit_i = len(units) - 1
144
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
145
+ unit_i = new_unit_i
146
+
147
+ unit_name = units[unit_i]
148
+ # format with a sensible number of digits
149
+ if unit_i == 0:
150
+ # no decimal point on bytes
151
+ return f'{n_units:.0f} {unit_name}'
152
+ elif round(n_units) < 1000:
153
+ # 3 significant figures, if none are dropped to the left of the .
154
+ return f'{n_units:#.3g} {unit_name}'
155
+ else:
156
+ # just give all the digits otherwise
157
+ return f'{n_units:#.0f} {unit_name}'
158
+
159
+ def __str__(self):
160
+ size_str = self._size_to_string(self._total_size)
161
+ return (f"Unable to allocate {size_str} for an array with shape "
162
+ f"{self.shape} and data type {self.dtype}")
venv/lib/python3.13/site-packages/numpy/_core/_exceptions.pyi ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Any, Final, TypeVar, overload
3
+
4
+ import numpy as np
5
+ from numpy import _CastingKind
6
+ from numpy._utils import set_module as set_module
7
+
8
+ ###
9
+
10
+ _T = TypeVar("_T")
11
+ _TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
12
+ _ExceptionT = TypeVar("_ExceptionT", bound=Exception)
13
+
14
+ ###
15
+
16
+ class UFuncTypeError(TypeError):
17
+ ufunc: Final[np.ufunc]
18
+ def __init__(self, /, ufunc: np.ufunc) -> None: ...
19
+
20
+ class _UFuncNoLoopError(UFuncTypeError):
21
+ dtypes: tuple[np.dtype, ...]
22
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
23
+
24
+ class _UFuncBinaryResolutionError(_UFuncNoLoopError):
25
+ dtypes: tuple[np.dtype, np.dtype]
26
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
27
+
28
+ class _UFuncCastingError(UFuncTypeError):
29
+ casting: Final[_CastingKind]
30
+ from_: Final[np.dtype]
31
+ to: Final[np.dtype]
32
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
33
+
34
+ class _UFuncInputCastingError(_UFuncCastingError):
35
+ in_i: Final[int]
36
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
37
+
38
+ class _UFuncOutputCastingError(_UFuncCastingError):
39
+ out_i: Final[int]
40
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
41
+
42
+ class _ArrayMemoryError(MemoryError):
43
+ shape: tuple[int, ...]
44
+ dtype: np.dtype
45
+ def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
46
+ @property
47
+ def _total_size(self) -> int: ...
48
+ @staticmethod
49
+ def _size_to_string(num_bytes: int) -> str: ...
50
+
51
+ @overload
52
+ def _unpack_tuple(tup: tuple[_T]) -> _T: ...
53
+ @overload
54
+ def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
55
+ def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
venv/lib/python3.13/site-packages/numpy/_core/_internal.py ADDED
@@ -0,0 +1,958 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A place for internal code
3
+
4
+ Some things are more easily handled Python.
5
+
6
+ """
7
+ import ast
8
+ import math
9
+ import re
10
+ import sys
11
+ import warnings
12
+
13
+ from numpy import _NoValue
14
+ from numpy.exceptions import DTypePromotionError
15
+
16
+ from .multiarray import StringDType, array, dtype, promote_types
17
+
18
+ try:
19
+ import ctypes
20
+ except ImportError:
21
+ ctypes = None
22
+
23
+ IS_PYPY = sys.implementation.name == 'pypy'
24
+
25
+ if sys.byteorder == 'little':
26
+ _nbo = '<'
27
+ else:
28
+ _nbo = '>'
29
+
30
+ def _makenames_list(adict, align):
31
+ allfields = []
32
+
33
+ for fname, obj in adict.items():
34
+ n = len(obj)
35
+ if not isinstance(obj, tuple) or n not in (2, 3):
36
+ raise ValueError("entry not a 2- or 3- tuple")
37
+ if n > 2 and obj[2] == fname:
38
+ continue
39
+ num = int(obj[1])
40
+ if num < 0:
41
+ raise ValueError("invalid offset.")
42
+ format = dtype(obj[0], align=align)
43
+ if n > 2:
44
+ title = obj[2]
45
+ else:
46
+ title = None
47
+ allfields.append((fname, format, num, title))
48
+ # sort by offsets
49
+ allfields.sort(key=lambda x: x[2])
50
+ names = [x[0] for x in allfields]
51
+ formats = [x[1] for x in allfields]
52
+ offsets = [x[2] for x in allfields]
53
+ titles = [x[3] for x in allfields]
54
+
55
+ return names, formats, offsets, titles
56
+
57
+ # Called in PyArray_DescrConverter function when
58
+ # a dictionary without "names" and "formats"
59
+ # fields is used as a data-type descriptor.
60
+ def _usefields(adict, align):
61
+ try:
62
+ names = adict[-1]
63
+ except KeyError:
64
+ names = None
65
+ if names is None:
66
+ names, formats, offsets, titles = _makenames_list(adict, align)
67
+ else:
68
+ formats = []
69
+ offsets = []
70
+ titles = []
71
+ for name in names:
72
+ res = adict[name]
73
+ formats.append(res[0])
74
+ offsets.append(res[1])
75
+ if len(res) > 2:
76
+ titles.append(res[2])
77
+ else:
78
+ titles.append(None)
79
+
80
+ return dtype({"names": names,
81
+ "formats": formats,
82
+ "offsets": offsets,
83
+ "titles": titles}, align)
84
+
85
+
86
+ # construct an array_protocol descriptor list
87
+ # from the fields attribute of a descriptor
88
+ # This calls itself recursively but should eventually hit
89
+ # a descriptor that has no fields and then return
90
+ # a simple typestring
91
+
92
+ def _array_descr(descriptor):
93
+ fields = descriptor.fields
94
+ if fields is None:
95
+ subdtype = descriptor.subdtype
96
+ if subdtype is None:
97
+ if descriptor.metadata is None:
98
+ return descriptor.str
99
+ else:
100
+ new = descriptor.metadata.copy()
101
+ if new:
102
+ return (descriptor.str, new)
103
+ else:
104
+ return descriptor.str
105
+ else:
106
+ return (_array_descr(subdtype[0]), subdtype[1])
107
+
108
+ names = descriptor.names
109
+ ordered_fields = [fields[x] + (x,) for x in names]
110
+ result = []
111
+ offset = 0
112
+ for field in ordered_fields:
113
+ if field[1] > offset:
114
+ num = field[1] - offset
115
+ result.append(('', f'|V{num}'))
116
+ offset += num
117
+ elif field[1] < offset:
118
+ raise ValueError(
119
+ "dtype.descr is not defined for types with overlapping or "
120
+ "out-of-order fields")
121
+ if len(field) > 3:
122
+ name = (field[2], field[3])
123
+ else:
124
+ name = field[2]
125
+ if field[0].subdtype:
126
+ tup = (name, _array_descr(field[0].subdtype[0]),
127
+ field[0].subdtype[1])
128
+ else:
129
+ tup = (name, _array_descr(field[0]))
130
+ offset += field[0].itemsize
131
+ result.append(tup)
132
+
133
+ if descriptor.itemsize > offset:
134
+ num = descriptor.itemsize - offset
135
+ result.append(('', f'|V{num}'))
136
+
137
+ return result
138
+
139
+
140
+ # format_re was originally from numarray by J. Todd Miller
141
+
142
+ format_re = re.compile(r'(?P<order1>[<>|=]?)'
143
+ r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
144
+ r'(?P<order2>[<>|=]?)'
145
+ r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
146
+ sep_re = re.compile(r'\s*,\s*')
147
+ space_re = re.compile(r'\s+$')
148
+
149
+ # astr is a string (perhaps comma separated)
150
+
151
+ _convorder = {'=': _nbo}
152
+
153
+ def _commastring(astr):
154
+ startindex = 0
155
+ result = []
156
+ islist = False
157
+ while startindex < len(astr):
158
+ mo = format_re.match(astr, pos=startindex)
159
+ try:
160
+ (order1, repeats, order2, dtype) = mo.groups()
161
+ except (TypeError, AttributeError):
162
+ raise ValueError(
163
+ f'format number {len(result) + 1} of "{astr}" is not recognized'
164
+ ) from None
165
+ startindex = mo.end()
166
+ # Separator or ending padding
167
+ if startindex < len(astr):
168
+ if space_re.match(astr, pos=startindex):
169
+ startindex = len(astr)
170
+ else:
171
+ mo = sep_re.match(astr, pos=startindex)
172
+ if not mo:
173
+ raise ValueError(
174
+ 'format number %d of "%s" is not recognized' %
175
+ (len(result) + 1, astr))
176
+ startindex = mo.end()
177
+ islist = True
178
+
179
+ if order2 == '':
180
+ order = order1
181
+ elif order1 == '':
182
+ order = order2
183
+ else:
184
+ order1 = _convorder.get(order1, order1)
185
+ order2 = _convorder.get(order2, order2)
186
+ if (order1 != order2):
187
+ raise ValueError(
188
+ f'inconsistent byte-order specification {order1} and {order2}')
189
+ order = order1
190
+
191
+ if order in ('|', '=', _nbo):
192
+ order = ''
193
+ dtype = order + dtype
194
+ if repeats == '':
195
+ newitem = dtype
196
+ else:
197
+ if (repeats[0] == "(" and repeats[-1] == ")"
198
+ and repeats[1:-1].strip() != ""
199
+ and "," not in repeats):
200
+ warnings.warn(
201
+ 'Passing in a parenthesized single number for repeats '
202
+ 'is deprecated; pass either a single number or indicate '
203
+ 'a tuple with a comma, like "(2,)".', DeprecationWarning,
204
+ stacklevel=2)
205
+ newitem = (dtype, ast.literal_eval(repeats))
206
+
207
+ result.append(newitem)
208
+
209
+ return result if islist else result[0]
210
+
211
+ class dummy_ctype:
212
+
213
+ def __init__(self, cls):
214
+ self._cls = cls
215
+
216
+ def __mul__(self, other):
217
+ return self
218
+
219
+ def __call__(self, *other):
220
+ return self._cls(other)
221
+
222
+ def __eq__(self, other):
223
+ return self._cls == other._cls
224
+
225
+ def __ne__(self, other):
226
+ return self._cls != other._cls
227
+
228
+ def _getintp_ctype():
229
+ val = _getintp_ctype.cache
230
+ if val is not None:
231
+ return val
232
+ if ctypes is None:
233
+ import numpy as np
234
+ val = dummy_ctype(np.intp)
235
+ else:
236
+ char = dtype('n').char
237
+ if char == 'i':
238
+ val = ctypes.c_int
239
+ elif char == 'l':
240
+ val = ctypes.c_long
241
+ elif char == 'q':
242
+ val = ctypes.c_longlong
243
+ else:
244
+ val = ctypes.c_long
245
+ _getintp_ctype.cache = val
246
+ return val
247
+
248
+
249
+ _getintp_ctype.cache = None
250
+
251
+ # Used for .ctypes attribute of ndarray
252
+
253
+ class _missing_ctypes:
254
+ def cast(self, num, obj):
255
+ return num.value
256
+
257
+ class c_void_p:
258
+ def __init__(self, ptr):
259
+ self.value = ptr
260
+
261
+
262
+ class _ctypes:
263
+ def __init__(self, array, ptr=None):
264
+ self._arr = array
265
+
266
+ if ctypes:
267
+ self._ctypes = ctypes
268
+ self._data = self._ctypes.c_void_p(ptr)
269
+ else:
270
+ # fake a pointer-like object that holds onto the reference
271
+ self._ctypes = _missing_ctypes()
272
+ self._data = self._ctypes.c_void_p(ptr)
273
+ self._data._objects = array
274
+
275
+ if self._arr.ndim == 0:
276
+ self._zerod = True
277
+ else:
278
+ self._zerod = False
279
+
280
+ def data_as(self, obj):
281
+ """
282
+ Return the data pointer cast to a particular c-types object.
283
+ For example, calling ``self._as_parameter_`` is equivalent to
284
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
285
+ the data as a pointer to a ctypes array of floating-point data:
286
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
287
+
288
+ The returned pointer will keep a reference to the array.
289
+ """
290
+ # _ctypes.cast function causes a circular reference of self._data in
291
+ # self._data._objects. Attributes of self._data cannot be released
292
+ # until gc.collect is called. Make a copy of the pointer first then
293
+ # let it hold the array reference. This is a workaround to circumvent
294
+ # the CPython bug https://bugs.python.org/issue12836.
295
+ ptr = self._ctypes.cast(self._data, obj)
296
+ ptr._arr = self._arr
297
+ return ptr
298
+
299
+ def shape_as(self, obj):
300
+ """
301
+ Return the shape tuple as an array of some other c-types
302
+ type. For example: ``self.shape_as(ctypes.c_short)``.
303
+ """
304
+ if self._zerod:
305
+ return None
306
+ return (obj * self._arr.ndim)(*self._arr.shape)
307
+
308
+ def strides_as(self, obj):
309
+ """
310
+ Return the strides tuple as an array of some other
311
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
312
+ """
313
+ if self._zerod:
314
+ return None
315
+ return (obj * self._arr.ndim)(*self._arr.strides)
316
+
317
+ @property
318
+ def data(self):
319
+ """
320
+ A pointer to the memory area of the array as a Python integer.
321
+ This memory area may contain data that is not aligned, or not in
322
+ correct byte-order. The memory area may not even be writeable.
323
+ The array flags and data-type of this array should be respected
324
+ when passing this attribute to arbitrary C-code to avoid trouble
325
+ that can include Python crashing. User Beware! The value of this
326
+ attribute is exactly the same as:
327
+ ``self._array_interface_['data'][0]``.
328
+
329
+ Note that unlike ``data_as``, a reference won't be kept to the array:
330
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
331
+ pointer to a deallocated array, and should be spelt
332
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
333
+ """
334
+ return self._data.value
335
+
336
+ @property
337
+ def shape(self):
338
+ """
339
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
340
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
341
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
342
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
343
+ the platform. The ctypes array contains the shape of
344
+ the underlying array.
345
+ """
346
+ return self.shape_as(_getintp_ctype())
347
+
348
+ @property
349
+ def strides(self):
350
+ """
351
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
352
+ the basetype is the same as for the shape attribute. This ctypes
353
+ array contains the strides information from the underlying array.
354
+ This strides information is important for showing how many bytes
355
+ must be jumped to get to the next element in the array.
356
+ """
357
+ return self.strides_as(_getintp_ctype())
358
+
359
+ @property
360
+ def _as_parameter_(self):
361
+ """
362
+ Overrides the ctypes semi-magic method
363
+
364
+ Enables `c_func(some_array.ctypes)`
365
+ """
366
+ return self.data_as(ctypes.c_void_p)
367
+
368
+ # Numpy 1.21.0, 2021-05-18
369
+
370
+ def get_data(self):
371
+ """Deprecated getter for the `_ctypes.data` property.
372
+
373
+ .. deprecated:: 1.21
374
+ """
375
+ warnings.warn('"get_data" is deprecated. Use "data" instead',
376
+ DeprecationWarning, stacklevel=2)
377
+ return self.data
378
+
379
+ def get_shape(self):
380
+ """Deprecated getter for the `_ctypes.shape` property.
381
+
382
+ .. deprecated:: 1.21
383
+ """
384
+ warnings.warn('"get_shape" is deprecated. Use "shape" instead',
385
+ DeprecationWarning, stacklevel=2)
386
+ return self.shape
387
+
388
+ def get_strides(self):
389
+ """Deprecated getter for the `_ctypes.strides` property.
390
+
391
+ .. deprecated:: 1.21
392
+ """
393
+ warnings.warn('"get_strides" is deprecated. Use "strides" instead',
394
+ DeprecationWarning, stacklevel=2)
395
+ return self.strides
396
+
397
+ def get_as_parameter(self):
398
+ """Deprecated getter for the `_ctypes._as_parameter_` property.
399
+
400
+ .. deprecated:: 1.21
401
+ """
402
+ warnings.warn(
403
+ '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
404
+ DeprecationWarning, stacklevel=2,
405
+ )
406
+ return self._as_parameter_
407
+
408
+
409
+ def _newnames(datatype, order):
410
+ """
411
+ Given a datatype and an order object, return a new names tuple, with the
412
+ order indicated
413
+ """
414
+ oldnames = datatype.names
415
+ nameslist = list(oldnames)
416
+ if isinstance(order, str):
417
+ order = [order]
418
+ seen = set()
419
+ if isinstance(order, (list, tuple)):
420
+ for name in order:
421
+ try:
422
+ nameslist.remove(name)
423
+ except ValueError:
424
+ if name in seen:
425
+ raise ValueError(f"duplicate field name: {name}") from None
426
+ else:
427
+ raise ValueError(f"unknown field name: {name}") from None
428
+ seen.add(name)
429
+ return tuple(list(order) + nameslist)
430
+ raise ValueError(f"unsupported order value: {order}")
431
+
432
+ def _copy_fields(ary):
433
+ """Return copy of structured array with padding between fields removed.
434
+
435
+ Parameters
436
+ ----------
437
+ ary : ndarray
438
+ Structured array from which to remove padding bytes
439
+
440
+ Returns
441
+ -------
442
+ ary_copy : ndarray
443
+ Copy of ary with padding bytes removed
444
+ """
445
+ dt = ary.dtype
446
+ copy_dtype = {'names': dt.names,
447
+ 'formats': [dt.fields[name][0] for name in dt.names]}
448
+ return array(ary, dtype=copy_dtype, copy=True)
449
+
450
+ def _promote_fields(dt1, dt2):
451
+ """ Perform type promotion for two structured dtypes.
452
+
453
+ Parameters
454
+ ----------
455
+ dt1 : structured dtype
456
+ First dtype.
457
+ dt2 : structured dtype
458
+ Second dtype.
459
+
460
+ Returns
461
+ -------
462
+ out : dtype
463
+ The promoted dtype
464
+
465
+ Notes
466
+ -----
467
+ If one of the inputs is aligned, the result will be. The titles of
468
+ both descriptors must match (point to the same field).
469
+ """
470
+ # Both must be structured and have the same names in the same order
471
+ if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
472
+ raise DTypePromotionError(
473
+ f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
474
+
475
+ # if both are identical, we can (maybe!) just return the same dtype.
476
+ identical = dt1 is dt2
477
+ new_fields = []
478
+ for name in dt1.names:
479
+ field1 = dt1.fields[name]
480
+ field2 = dt2.fields[name]
481
+ new_descr = promote_types(field1[0], field2[0])
482
+ identical = identical and new_descr is field1[0]
483
+
484
+ # Check that the titles match (if given):
485
+ if field1[2:] != field2[2:]:
486
+ raise DTypePromotionError(
487
+ f"field titles of field '{name}' mismatch")
488
+ if len(field1) == 2:
489
+ new_fields.append((name, new_descr))
490
+ else:
491
+ new_fields.append(((field1[2], name), new_descr))
492
+
493
+ res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
494
+
495
+ # Might as well preserve identity (and metadata) if the dtype is identical
496
+ # and the itemsize, offsets are also unmodified. This could probably be
497
+ # sped up, but also probably just be removed entirely.
498
+ if identical and res.itemsize == dt1.itemsize:
499
+ for name in dt1.names:
500
+ if dt1.fields[name][1] != res.fields[name][1]:
501
+ return res # the dtype changed.
502
+ return dt1
503
+
504
+ return res
505
+
506
+
507
+ def _getfield_is_safe(oldtype, newtype, offset):
508
+ """ Checks safety of getfield for object arrays.
509
+
510
+ As in _view_is_safe, we need to check that memory containing objects is not
511
+ reinterpreted as a non-object datatype and vice versa.
512
+
513
+ Parameters
514
+ ----------
515
+ oldtype : data-type
516
+ Data type of the original ndarray.
517
+ newtype : data-type
518
+ Data type of the field being accessed by ndarray.getfield
519
+ offset : int
520
+ Offset of the field being accessed by ndarray.getfield
521
+
522
+ Raises
523
+ ------
524
+ TypeError
525
+ If the field access is invalid
526
+
527
+ """
528
+ if newtype.hasobject or oldtype.hasobject:
529
+ if offset == 0 and newtype == oldtype:
530
+ return
531
+ if oldtype.names is not None:
532
+ for name in oldtype.names:
533
+ if (oldtype.fields[name][1] == offset and
534
+ oldtype.fields[name][0] == newtype):
535
+ return
536
+ raise TypeError("Cannot get/set field of an object array")
537
+ return
538
+
539
+ def _view_is_safe(oldtype, newtype):
540
+ """ Checks safety of a view involving object arrays, for example when
541
+ doing::
542
+
543
+ np.zeros(10, dtype=oldtype).view(newtype)
544
+
545
+ Parameters
546
+ ----------
547
+ oldtype : data-type
548
+ Data type of original ndarray
549
+ newtype : data-type
550
+ Data type of the view
551
+
552
+ Raises
553
+ ------
554
+ TypeError
555
+ If the new type is incompatible with the old type.
556
+
557
+ """
558
+
559
+ # if the types are equivalent, there is no problem.
560
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
561
+ if oldtype == newtype:
562
+ return
563
+
564
+ if newtype.hasobject or oldtype.hasobject:
565
+ raise TypeError("Cannot change data-type for array of references.")
566
+ return
567
+
568
+
569
+ # Given a string containing a PEP 3118 format specifier,
570
+ # construct a NumPy dtype
571
+
572
+ _pep3118_native_map = {
573
+ '?': '?',
574
+ 'c': 'S1',
575
+ 'b': 'b',
576
+ 'B': 'B',
577
+ 'h': 'h',
578
+ 'H': 'H',
579
+ 'i': 'i',
580
+ 'I': 'I',
581
+ 'l': 'l',
582
+ 'L': 'L',
583
+ 'q': 'q',
584
+ 'Q': 'Q',
585
+ 'e': 'e',
586
+ 'f': 'f',
587
+ 'd': 'd',
588
+ 'g': 'g',
589
+ 'Zf': 'F',
590
+ 'Zd': 'D',
591
+ 'Zg': 'G',
592
+ 's': 'S',
593
+ 'w': 'U',
594
+ 'O': 'O',
595
+ 'x': 'V', # padding
596
+ }
597
+ _pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
598
+
599
+ _pep3118_standard_map = {
600
+ '?': '?',
601
+ 'c': 'S1',
602
+ 'b': 'b',
603
+ 'B': 'B',
604
+ 'h': 'i2',
605
+ 'H': 'u2',
606
+ 'i': 'i4',
607
+ 'I': 'u4',
608
+ 'l': 'i4',
609
+ 'L': 'u4',
610
+ 'q': 'i8',
611
+ 'Q': 'u8',
612
+ 'e': 'f2',
613
+ 'f': 'f',
614
+ 'd': 'd',
615
+ 'Zf': 'F',
616
+ 'Zd': 'D',
617
+ 's': 'S',
618
+ 'w': 'U',
619
+ 'O': 'O',
620
+ 'x': 'V', # padding
621
+ }
622
+ _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
623
+
624
+ _pep3118_unsupported_map = {
625
+ 'u': 'UCS-2 strings',
626
+ '&': 'pointers',
627
+ 't': 'bitfields',
628
+ 'X': 'function pointers',
629
+ }
630
+
631
+ class _Stream:
632
+ def __init__(self, s):
633
+ self.s = s
634
+ self.byteorder = '@'
635
+
636
+ def advance(self, n):
637
+ res = self.s[:n]
638
+ self.s = self.s[n:]
639
+ return res
640
+
641
+ def consume(self, c):
642
+ if self.s[:len(c)] == c:
643
+ self.advance(len(c))
644
+ return True
645
+ return False
646
+
647
+ def consume_until(self, c):
648
+ if callable(c):
649
+ i = 0
650
+ while i < len(self.s) and not c(self.s[i]):
651
+ i = i + 1
652
+ return self.advance(i)
653
+ else:
654
+ i = self.s.index(c)
655
+ res = self.advance(i)
656
+ self.advance(len(c))
657
+ return res
658
+
659
+ @property
660
+ def next(self):
661
+ return self.s[0]
662
+
663
+ def __bool__(self):
664
+ return bool(self.s)
665
+
666
+
667
+ def _dtype_from_pep3118(spec):
668
+ stream = _Stream(spec)
669
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
670
+ return dtype
671
+
672
+ def __dtype_from_pep3118(stream, is_subdtype):
673
+ field_spec = {
674
+ 'names': [],
675
+ 'formats': [],
676
+ 'offsets': [],
677
+ 'itemsize': 0
678
+ }
679
+ offset = 0
680
+ common_alignment = 1
681
+ is_padding = False
682
+
683
+ # Parse spec
684
+ while stream:
685
+ value = None
686
+
687
+ # End of structure, bail out to upper level
688
+ if stream.consume('}'):
689
+ break
690
+
691
+ # Sub-arrays (1)
692
+ shape = None
693
+ if stream.consume('('):
694
+ shape = stream.consume_until(')')
695
+ shape = tuple(map(int, shape.split(',')))
696
+
697
+ # Byte order
698
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
699
+ byteorder = stream.advance(1)
700
+ if byteorder == '!':
701
+ byteorder = '>'
702
+ stream.byteorder = byteorder
703
+
704
+ # Byte order characters also control native vs. standard type sizes
705
+ if stream.byteorder in ('@', '^'):
706
+ type_map = _pep3118_native_map
707
+ type_map_chars = _pep3118_native_typechars
708
+ else:
709
+ type_map = _pep3118_standard_map
710
+ type_map_chars = _pep3118_standard_typechars
711
+
712
+ # Item sizes
713
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
714
+ if itemsize_str:
715
+ itemsize = int(itemsize_str)
716
+ else:
717
+ itemsize = 1
718
+
719
+ # Data types
720
+ is_padding = False
721
+
722
+ if stream.consume('T{'):
723
+ value, align = __dtype_from_pep3118(
724
+ stream, is_subdtype=True)
725
+ elif stream.next in type_map_chars:
726
+ if stream.next == 'Z':
727
+ typechar = stream.advance(2)
728
+ else:
729
+ typechar = stream.advance(1)
730
+
731
+ is_padding = (typechar == 'x')
732
+ dtypechar = type_map[typechar]
733
+ if dtypechar in 'USV':
734
+ dtypechar += '%d' % itemsize
735
+ itemsize = 1
736
+ numpy_byteorder = {'@': '=', '^': '='}.get(
737
+ stream.byteorder, stream.byteorder)
738
+ value = dtype(numpy_byteorder + dtypechar)
739
+ align = value.alignment
740
+ elif stream.next in _pep3118_unsupported_map:
741
+ desc = _pep3118_unsupported_map[stream.next]
742
+ raise NotImplementedError(
743
+ f"Unrepresentable PEP 3118 data type {stream.next!r} ({desc})")
744
+ else:
745
+ raise ValueError(
746
+ f"Unknown PEP 3118 data type specifier {stream.s!r}"
747
+ )
748
+
749
+ #
750
+ # Native alignment may require padding
751
+ #
752
+ # Here we assume that the presence of a '@' character implicitly
753
+ # implies that the start of the array is *already* aligned.
754
+ #
755
+ extra_offset = 0
756
+ if stream.byteorder == '@':
757
+ start_padding = (-offset) % align
758
+ intra_padding = (-value.itemsize) % align
759
+
760
+ offset += start_padding
761
+
762
+ if intra_padding != 0:
763
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
764
+ # Inject internal padding to the end of the sub-item
765
+ value = _add_trailing_padding(value, intra_padding)
766
+ else:
767
+ # We can postpone the injection of internal padding,
768
+ # as the item appears at most once
769
+ extra_offset += intra_padding
770
+
771
+ # Update common alignment
772
+ common_alignment = _lcm(align, common_alignment)
773
+
774
+ # Convert itemsize to sub-array
775
+ if itemsize != 1:
776
+ value = dtype((value, (itemsize,)))
777
+
778
+ # Sub-arrays (2)
779
+ if shape is not None:
780
+ value = dtype((value, shape))
781
+
782
+ # Field name
783
+ if stream.consume(':'):
784
+ name = stream.consume_until(':')
785
+ else:
786
+ name = None
787
+
788
+ if not (is_padding and name is None):
789
+ if name is not None and name in field_spec['names']:
790
+ raise RuntimeError(
791
+ f"Duplicate field name '{name}' in PEP3118 format"
792
+ )
793
+ field_spec['names'].append(name)
794
+ field_spec['formats'].append(value)
795
+ field_spec['offsets'].append(offset)
796
+
797
+ offset += value.itemsize
798
+ offset += extra_offset
799
+
800
+ field_spec['itemsize'] = offset
801
+
802
+ # extra final padding for aligned types
803
+ if stream.byteorder == '@':
804
+ field_spec['itemsize'] += (-offset) % common_alignment
805
+
806
+ # Check if this was a simple 1-item type, and unwrap it
807
+ if (field_spec['names'] == [None]
808
+ and field_spec['offsets'][0] == 0
809
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
810
+ and not is_subdtype):
811
+ ret = field_spec['formats'][0]
812
+ else:
813
+ _fix_names(field_spec)
814
+ ret = dtype(field_spec)
815
+
816
+ # Finished
817
+ return ret, common_alignment
818
+
819
+ def _fix_names(field_spec):
820
+ """ Replace names which are None with the next unused f%d name """
821
+ names = field_spec['names']
822
+ for i, name in enumerate(names):
823
+ if name is not None:
824
+ continue
825
+
826
+ j = 0
827
+ while True:
828
+ name = f'f{j}'
829
+ if name not in names:
830
+ break
831
+ j = j + 1
832
+ names[i] = name
833
+
834
+ def _add_trailing_padding(value, padding):
835
+ """Inject the specified number of padding bytes at the end of a dtype"""
836
+ if value.fields is None:
837
+ field_spec = {
838
+ 'names': ['f0'],
839
+ 'formats': [value],
840
+ 'offsets': [0],
841
+ 'itemsize': value.itemsize
842
+ }
843
+ else:
844
+ fields = value.fields
845
+ names = value.names
846
+ field_spec = {
847
+ 'names': names,
848
+ 'formats': [fields[name][0] for name in names],
849
+ 'offsets': [fields[name][1] for name in names],
850
+ 'itemsize': value.itemsize
851
+ }
852
+
853
+ field_spec['itemsize'] += padding
854
+ return dtype(field_spec)
855
+
856
+ def _prod(a):
857
+ p = 1
858
+ for x in a:
859
+ p *= x
860
+ return p
861
+
862
+ def _gcd(a, b):
863
+ """Calculate the greatest common divisor of a and b"""
864
+ if not (math.isfinite(a) and math.isfinite(b)):
865
+ raise ValueError('Can only find greatest common divisor of '
866
+ f'finite arguments, found "{a}" and "{b}"')
867
+ while b:
868
+ a, b = b, a % b
869
+ return a
870
+
871
+ def _lcm(a, b):
872
+ return a // _gcd(a, b) * b
873
+
874
+ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
875
+ """ Format the error message for when __array_ufunc__ gives up. """
876
+ args_string = ', '.join([f'{arg!r}' for arg in inputs] +
877
+ [f'{k}={v!r}'
878
+ for k, v in kwargs.items()])
879
+ args = inputs + kwargs.get('out', ())
880
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
881
+ return ('operand type(s) all returned NotImplemented from '
882
+ f'__array_ufunc__({ufunc!r}, {method!r}, {args_string}): {types_string}'
883
+ )
884
+
885
+
886
+ def array_function_errmsg_formatter(public_api, types):
887
+ """ Format the error message for when __array_ufunc__ gives up. """
888
+ func_name = f'{public_api.__module__}.{public_api.__name__}'
889
+ return (f"no implementation found for '{func_name}' on types that implement "
890
+ f'__array_function__: {list(types)}')
891
+
892
+
893
+ def _ufunc_doc_signature_formatter(ufunc):
894
+ """
895
+ Builds a signature string which resembles PEP 457
896
+
897
+ This is used to construct the first line of the docstring
898
+ """
899
+
900
+ # input arguments are simple
901
+ if ufunc.nin == 1:
902
+ in_args = 'x'
903
+ else:
904
+ in_args = ', '.join(f'x{i + 1}' for i in range(ufunc.nin))
905
+
906
+ # output arguments are both keyword or positional
907
+ if ufunc.nout == 0:
908
+ out_args = ', /, out=()'
909
+ elif ufunc.nout == 1:
910
+ out_args = ', /, out=None'
911
+ else:
912
+ out_args = '[, {positional}], / [, out={default}]'.format(
913
+ positional=', '.join(
914
+ f'out{i + 1}' for i in range(ufunc.nout)),
915
+ default=repr((None,) * ufunc.nout)
916
+ )
917
+
918
+ # keyword only args depend on whether this is a gufunc
919
+ kwargs = (
920
+ ", casting='same_kind'"
921
+ ", order='K'"
922
+ ", dtype=None"
923
+ ", subok=True"
924
+ )
925
+
926
+ # NOTE: gufuncs may or may not support the `axis` parameter
927
+ if ufunc.signature is None:
928
+ kwargs = f", where=True{kwargs}[, signature]"
929
+ else:
930
+ kwargs += "[, signature, axes, axis]"
931
+
932
+ # join all the parts together
933
+ return f'{ufunc.__name__}({in_args}{out_args}, *{kwargs})'
934
+
935
+
936
+ def npy_ctypes_check(cls):
937
+ # determine if a class comes from ctypes, in order to work around
938
+ # a bug in the buffer protocol for those objects, bpo-10746
939
+ try:
940
+ # ctypes class are new-style, so have an __mro__. This probably fails
941
+ # for ctypes classes with multiple inheritance.
942
+ if IS_PYPY:
943
+ # (..., _ctypes.basics._CData, Bufferable, object)
944
+ ctype_base = cls.__mro__[-3]
945
+ else:
946
+ # # (..., _ctypes._CData, object)
947
+ ctype_base = cls.__mro__[-2]
948
+ # right now, they're part of the _ctypes module
949
+ return '_ctypes' in ctype_base.__module__
950
+ except Exception:
951
+ return False
952
+
953
+ # used to handle the _NoValue default argument for na_object
954
+ # in the C implementation of the __reduce__ method for stringdtype
955
+ def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
956
+ if na_object is _NoValue:
957
+ return StringDType(coerce=coerce)
958
+ return StringDType(coerce=coerce, na_object=na_object)
venv/lib/python3.13/site-packages/numpy/_core/_internal.pyi ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ctypes as ct
2
+ import re
3
+ from collections.abc import Callable, Iterable
4
+ from typing import Any, Final, Generic, Self, overload
5
+
6
+ from typing_extensions import TypeVar, deprecated
7
+
8
+ import numpy as np
9
+ import numpy.typing as npt
10
+ from numpy.ctypeslib import c_intp
11
+
12
+ _CastT = TypeVar("_CastT", bound=ct._CanCastTo)
13
+ _T_co = TypeVar("_T_co", covariant=True)
14
+ _CT = TypeVar("_CT", bound=ct._CData)
15
+ _PT_co = TypeVar("_PT_co", bound=int | None, default=None, covariant=True)
16
+
17
+ ###
18
+
19
+ IS_PYPY: Final[bool] = ...
20
+
21
+ format_re: Final[re.Pattern[str]] = ...
22
+ sep_re: Final[re.Pattern[str]] = ...
23
+ space_re: Final[re.Pattern[str]] = ...
24
+
25
+ ###
26
+
27
+ # TODO: Let the likes of `shape_as` and `strides_as` return `None`
28
+ # for 0D arrays once we've got shape-support
29
+
30
+ class _ctypes(Generic[_PT_co]):
31
+ @overload
32
+ def __init__(self: _ctypes[None], /, array: npt.NDArray[Any], ptr: None = None) -> None: ...
33
+ @overload
34
+ def __init__(self, /, array: npt.NDArray[Any], ptr: _PT_co) -> None: ...
35
+
36
+ #
37
+ @property
38
+ def data(self) -> _PT_co: ...
39
+ @property
40
+ def shape(self) -> ct.Array[c_intp]: ...
41
+ @property
42
+ def strides(self) -> ct.Array[c_intp]: ...
43
+ @property
44
+ def _as_parameter_(self) -> ct.c_void_p: ...
45
+
46
+ #
47
+ def data_as(self, /, obj: type[_CastT]) -> _CastT: ...
48
+ def shape_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
49
+ def strides_as(self, /, obj: type[_CT]) -> ct.Array[_CT]: ...
50
+
51
+ #
52
+ @deprecated('"get_data" is deprecated. Use "data" instead')
53
+ def get_data(self, /) -> _PT_co: ...
54
+ @deprecated('"get_shape" is deprecated. Use "shape" instead')
55
+ def get_shape(self, /) -> ct.Array[c_intp]: ...
56
+ @deprecated('"get_strides" is deprecated. Use "strides" instead')
57
+ def get_strides(self, /) -> ct.Array[c_intp]: ...
58
+ @deprecated('"get_as_parameter" is deprecated. Use "_as_parameter_" instead')
59
+ def get_as_parameter(self, /) -> ct.c_void_p: ...
60
+
61
+ class dummy_ctype(Generic[_T_co]):
62
+ _cls: type[_T_co]
63
+
64
+ def __init__(self, /, cls: type[_T_co]) -> None: ...
65
+ def __eq__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
66
+ def __ne__(self, other: Self, /) -> bool: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride]
67
+ def __mul__(self, other: object, /) -> Self: ...
68
+ def __call__(self, /, *other: object) -> _T_co: ...
69
+
70
+ def array_ufunc_errmsg_formatter(dummy: object, ufunc: np.ufunc, method: str, *inputs: object, **kwargs: object) -> str: ...
71
+ def array_function_errmsg_formatter(public_api: Callable[..., object], types: Iterable[str]) -> str: ...
72
+ def npy_ctypes_check(cls: type) -> bool: ...
venv/lib/python3.13/site-packages/numpy/_core/_machar.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Machine arithmetic - determine the parameters of the
3
+ floating-point arithmetic system
4
+
5
+ Author: Pearu Peterson, September 2003
6
+
7
+ """
8
+ __all__ = ['MachAr']
9
+
10
+ from ._ufunc_config import errstate
11
+ from .fromnumeric import any
12
+
13
+ # Need to speed this up...especially for longdouble
14
+
15
+ # Deprecated 2021-10-20, NumPy 1.22
16
+ class MachAr:
17
+ """
18
+ Diagnosing machine parameters.
19
+
20
+ Attributes
21
+ ----------
22
+ ibeta : int
23
+ Radix in which numbers are represented.
24
+ it : int
25
+ Number of base-`ibeta` digits in the floating point mantissa M.
26
+ machep : int
27
+ Exponent of the smallest (most negative) power of `ibeta` that,
28
+ added to 1.0, gives something different from 1.0
29
+ eps : float
30
+ Floating-point number ``beta**machep`` (floating point precision)
31
+ negep : int
32
+ Exponent of the smallest power of `ibeta` that, subtracted
33
+ from 1.0, gives something different from 1.0.
34
+ epsneg : float
35
+ Floating-point number ``beta**negep``.
36
+ iexp : int
37
+ Number of bits in the exponent (including its sign and bias).
38
+ minexp : int
39
+ Smallest (most negative) power of `ibeta` consistent with there
40
+ being no leading zeros in the mantissa.
41
+ xmin : float
42
+ Floating-point number ``beta**minexp`` (the smallest [in
43
+ magnitude] positive floating point number with full precision).
44
+ maxexp : int
45
+ Smallest (positive) power of `ibeta` that causes overflow.
46
+ xmax : float
47
+ ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
48
+ usable floating value).
49
+ irnd : int
50
+ In ``range(6)``, information on what kind of rounding is done
51
+ in addition, and on how underflow is handled.
52
+ ngrd : int
53
+ Number of 'guard digits' used when truncating the product
54
+ of two mantissas to fit the representation.
55
+ epsilon : float
56
+ Same as `eps`.
57
+ tiny : float
58
+ An alias for `smallest_normal`, kept for backwards compatibility.
59
+ huge : float
60
+ Same as `xmax`.
61
+ precision : float
62
+ ``- int(-log10(eps))``
63
+ resolution : float
64
+ ``- 10**(-precision)``
65
+ smallest_normal : float
66
+ The smallest positive floating point number with 1 as leading bit in
67
+ the mantissa following IEEE-754. Same as `xmin`.
68
+ smallest_subnormal : float
69
+ The smallest positive floating point number with 0 as leading bit in
70
+ the mantissa following IEEE-754.
71
+
72
+ Parameters
73
+ ----------
74
+ float_conv : function, optional
75
+ Function that converts an integer or integer array to a float
76
+ or float array. Default is `float`.
77
+ int_conv : function, optional
78
+ Function that converts a float or float array to an integer or
79
+ integer array. Default is `int`.
80
+ float_to_float : function, optional
81
+ Function that converts a float array to float. Default is `float`.
82
+ Note that this does not seem to do anything useful in the current
83
+ implementation.
84
+ float_to_str : function, optional
85
+ Function that converts a single float to a string. Default is
86
+ ``lambda v:'%24.16e' %v``.
87
+ title : str, optional
88
+ Title that is printed in the string representation of `MachAr`.
89
+
90
+ See Also
91
+ --------
92
+ finfo : Machine limits for floating point types.
93
+ iinfo : Machine limits for integer types.
94
+
95
+ References
96
+ ----------
97
+ .. [1] Press, Teukolsky, Vetterling and Flannery,
98
+ "Numerical Recipes in C++," 2nd ed,
99
+ Cambridge University Press, 2002, p. 31.
100
+
101
+ """
102
+
103
+ def __init__(self, float_conv=float, int_conv=int,
104
+ float_to_float=float,
105
+ float_to_str=lambda v: f'{v:24.16e}',
106
+ title='Python floating point number'):
107
+ """
108
+
109
+ float_conv - convert integer to float (array)
110
+ int_conv - convert float (array) to integer
111
+ float_to_float - convert float array to float
112
+ float_to_str - convert array float to str
113
+ title - description of used floating point numbers
114
+
115
+ """
116
+ # We ignore all errors here because we are purposely triggering
117
+ # underflow to detect the properties of the running arch.
118
+ with errstate(under='ignore'):
119
+ self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
120
+
121
+ def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
122
+ max_iterN = 10000
123
+ msg = "Did not converge after %d tries with %s"
124
+ one = float_conv(1)
125
+ two = one + one
126
+ zero = one - one
127
+
128
+ # Do we really need to do this? Aren't they 2 and 2.0?
129
+ # Determine ibeta and beta
130
+ a = one
131
+ for _ in range(max_iterN):
132
+ a = a + a
133
+ temp = a + one
134
+ temp1 = temp - a
135
+ if any(temp1 - one != zero):
136
+ break
137
+ else:
138
+ raise RuntimeError(msg % (_, one.dtype))
139
+ b = one
140
+ for _ in range(max_iterN):
141
+ b = b + b
142
+ temp = a + b
143
+ itemp = int_conv(temp - a)
144
+ if any(itemp != 0):
145
+ break
146
+ else:
147
+ raise RuntimeError(msg % (_, one.dtype))
148
+ ibeta = itemp
149
+ beta = float_conv(ibeta)
150
+
151
+ # Determine it and irnd
152
+ it = -1
153
+ b = one
154
+ for _ in range(max_iterN):
155
+ it = it + 1
156
+ b = b * beta
157
+ temp = b + one
158
+ temp1 = temp - b
159
+ if any(temp1 - one != zero):
160
+ break
161
+ else:
162
+ raise RuntimeError(msg % (_, one.dtype))
163
+
164
+ betah = beta / two
165
+ a = one
166
+ for _ in range(max_iterN):
167
+ a = a + a
168
+ temp = a + one
169
+ temp1 = temp - a
170
+ if any(temp1 - one != zero):
171
+ break
172
+ else:
173
+ raise RuntimeError(msg % (_, one.dtype))
174
+ temp = a + betah
175
+ irnd = 0
176
+ if any(temp - a != zero):
177
+ irnd = 1
178
+ tempa = a + beta
179
+ temp = tempa + betah
180
+ if irnd == 0 and any(temp - tempa != zero):
181
+ irnd = 2
182
+
183
+ # Determine negep and epsneg
184
+ negep = it + 3
185
+ betain = one / beta
186
+ a = one
187
+ for i in range(negep):
188
+ a = a * betain
189
+ b = a
190
+ for _ in range(max_iterN):
191
+ temp = one - a
192
+ if any(temp - one != zero):
193
+ break
194
+ a = a * beta
195
+ negep = negep - 1
196
+ # Prevent infinite loop on PPC with gcc 4.0:
197
+ if negep < 0:
198
+ raise RuntimeError("could not determine machine tolerance "
199
+ "for 'negep', locals() -> %s" % (locals()))
200
+ else:
201
+ raise RuntimeError(msg % (_, one.dtype))
202
+ negep = -negep
203
+ epsneg = a
204
+
205
+ # Determine machep and eps
206
+ machep = - it - 3
207
+ a = b
208
+
209
+ for _ in range(max_iterN):
210
+ temp = one + a
211
+ if any(temp - one != zero):
212
+ break
213
+ a = a * beta
214
+ machep = machep + 1
215
+ else:
216
+ raise RuntimeError(msg % (_, one.dtype))
217
+ eps = a
218
+
219
+ # Determine ngrd
220
+ ngrd = 0
221
+ temp = one + eps
222
+ if irnd == 0 and any(temp * one - one != zero):
223
+ ngrd = 1
224
+
225
+ # Determine iexp
226
+ i = 0
227
+ k = 1
228
+ z = betain
229
+ t = one + eps
230
+ nxres = 0
231
+ for _ in range(max_iterN):
232
+ y = z
233
+ z = y * y
234
+ a = z * one # Check here for underflow
235
+ temp = z * t
236
+ if any(a + a == zero) or any(abs(z) >= y):
237
+ break
238
+ temp1 = temp * betain
239
+ if any(temp1 * beta == z):
240
+ break
241
+ i = i + 1
242
+ k = k + k
243
+ else:
244
+ raise RuntimeError(msg % (_, one.dtype))
245
+ if ibeta != 10:
246
+ iexp = i + 1
247
+ mx = k + k
248
+ else:
249
+ iexp = 2
250
+ iz = ibeta
251
+ while k >= iz:
252
+ iz = iz * ibeta
253
+ iexp = iexp + 1
254
+ mx = iz + iz - 1
255
+
256
+ # Determine minexp and xmin
257
+ for _ in range(max_iterN):
258
+ xmin = y
259
+ y = y * betain
260
+ a = y * one
261
+ temp = y * t
262
+ if any((a + a) != zero) and any(abs(y) < xmin):
263
+ k = k + 1
264
+ temp1 = temp * betain
265
+ if any(temp1 * beta == y) and any(temp != y):
266
+ nxres = 3
267
+ xmin = y
268
+ break
269
+ else:
270
+ break
271
+ else:
272
+ raise RuntimeError(msg % (_, one.dtype))
273
+ minexp = -k
274
+
275
+ # Determine maxexp, xmax
276
+ if mx <= k + k - 3 and ibeta != 10:
277
+ mx = mx + mx
278
+ iexp = iexp + 1
279
+ maxexp = mx + minexp
280
+ irnd = irnd + nxres
281
+ if irnd >= 2:
282
+ maxexp = maxexp - 2
283
+ i = maxexp + minexp
284
+ if ibeta == 2 and not i:
285
+ maxexp = maxexp - 1
286
+ if i > 20:
287
+ maxexp = maxexp - 1
288
+ if any(a != y):
289
+ maxexp = maxexp - 2
290
+ xmax = one - epsneg
291
+ if any(xmax * one != xmax):
292
+ xmax = one - beta * epsneg
293
+ xmax = xmax / (xmin * beta * beta * beta)
294
+ i = maxexp + minexp + 3
295
+ for j in range(i):
296
+ if ibeta == 2:
297
+ xmax = xmax + xmax
298
+ else:
299
+ xmax = xmax * beta
300
+
301
+ smallest_subnormal = abs(xmin / beta ** (it))
302
+
303
+ self.ibeta = ibeta
304
+ self.it = it
305
+ self.negep = negep
306
+ self.epsneg = float_to_float(epsneg)
307
+ self._str_epsneg = float_to_str(epsneg)
308
+ self.machep = machep
309
+ self.eps = float_to_float(eps)
310
+ self._str_eps = float_to_str(eps)
311
+ self.ngrd = ngrd
312
+ self.iexp = iexp
313
+ self.minexp = minexp
314
+ self.xmin = float_to_float(xmin)
315
+ self._str_xmin = float_to_str(xmin)
316
+ self.maxexp = maxexp
317
+ self.xmax = float_to_float(xmax)
318
+ self._str_xmax = float_to_str(xmax)
319
+ self.irnd = irnd
320
+
321
+ self.title = title
322
+ # Commonly used parameters
323
+ self.epsilon = self.eps
324
+ self.tiny = self.xmin
325
+ self.huge = self.xmax
326
+ self.smallest_normal = self.xmin
327
+ self._str_smallest_normal = float_to_str(self.xmin)
328
+ self.smallest_subnormal = float_to_float(smallest_subnormal)
329
+ self._str_smallest_subnormal = float_to_str(smallest_subnormal)
330
+
331
+ import math
332
+ self.precision = int(-math.log10(float_to_float(self.eps)))
333
+ ten = two + two + two + two + two
334
+ resolution = ten ** (-self.precision)
335
+ self.resolution = float_to_float(resolution)
336
+ self._str_resolution = float_to_str(resolution)
337
+
338
+ def __str__(self):
339
+ fmt = (
340
+ 'Machine parameters for %(title)s\n'
341
+ '---------------------------------------------------------------------\n'
342
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
343
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
344
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
345
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
346
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
347
+ 'smallest_normal=%(smallest_normal)s '
348
+ 'smallest_subnormal=%(smallest_subnormal)s\n'
349
+ '---------------------------------------------------------------------\n'
350
+ )
351
+ return fmt % self.__dict__
352
+
353
+
354
+ if __name__ == '__main__':
355
+ print(MachAr())
venv/lib/python3.13/site-packages/numpy/_core/_machar.pyi ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Any, Final, TypeVar, overload
3
+
4
+ import numpy as np
5
+ from numpy import _CastingKind
6
+ from numpy._utils import set_module as set_module
7
+
8
+ ###
9
+
10
+ _T = TypeVar("_T")
11
+ _TupleT = TypeVar("_TupleT", bound=tuple[()] | tuple[Any, Any, *tuple[Any, ...]])
12
+ _ExceptionT = TypeVar("_ExceptionT", bound=Exception)
13
+
14
+ ###
15
+
16
+ class UFuncTypeError(TypeError):
17
+ ufunc: Final[np.ufunc]
18
+ def __init__(self, /, ufunc: np.ufunc) -> None: ...
19
+
20
+ class _UFuncNoLoopError(UFuncTypeError):
21
+ dtypes: tuple[np.dtype, ...]
22
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
23
+
24
+ class _UFuncBinaryResolutionError(_UFuncNoLoopError):
25
+ dtypes: tuple[np.dtype, np.dtype]
26
+ def __init__(self, /, ufunc: np.ufunc, dtypes: Iterable[np.dtype]) -> None: ...
27
+
28
+ class _UFuncCastingError(UFuncTypeError):
29
+ casting: Final[_CastingKind]
30
+ from_: Final[np.dtype]
31
+ to: Final[np.dtype]
32
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype) -> None: ...
33
+
34
+ class _UFuncInputCastingError(_UFuncCastingError):
35
+ in_i: Final[int]
36
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
37
+
38
+ class _UFuncOutputCastingError(_UFuncCastingError):
39
+ out_i: Final[int]
40
+ def __init__(self, /, ufunc: np.ufunc, casting: _CastingKind, from_: np.dtype, to: np.dtype, i: int) -> None: ...
41
+
42
+ class _ArrayMemoryError(MemoryError):
43
+ shape: tuple[int, ...]
44
+ dtype: np.dtype
45
+ def __init__(self, /, shape: tuple[int, ...], dtype: np.dtype) -> None: ...
46
+ @property
47
+ def _total_size(self) -> int: ...
48
+ @staticmethod
49
+ def _size_to_string(num_bytes: int) -> str: ...
50
+
51
+ @overload
52
+ def _unpack_tuple(tup: tuple[_T]) -> _T: ...
53
+ @overload
54
+ def _unpack_tuple(tup: _TupleT) -> _TupleT: ...
55
+ def _display_as_base(cls: type[_ExceptionT]) -> type[_ExceptionT]: ...
venv/lib/python3.13/site-packages/numpy/_core/_methods.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Array methods which are called by both the C-code for the method
3
+ and the Python code for the NumPy-namespace function
4
+
5
+ """
6
+ import os
7
+ import pickle
8
+ import warnings
9
+ from contextlib import nullcontext
10
+
11
+ import numpy as np
12
+ from numpy._core import multiarray as mu
13
+ from numpy._core import numerictypes as nt
14
+ from numpy._core import umath as um
15
+ from numpy._core.multiarray import asanyarray
16
+ from numpy._globals import _NoValue
17
+
18
+ # save those O(100) nanoseconds!
19
+ bool_dt = mu.dtype("bool")
20
+ umr_maximum = um.maximum.reduce
21
+ umr_minimum = um.minimum.reduce
22
+ umr_sum = um.add.reduce
23
+ umr_prod = um.multiply.reduce
24
+ umr_bitwise_count = um.bitwise_count
25
+ umr_any = um.logical_or.reduce
26
+ umr_all = um.logical_and.reduce
27
+
28
+ # Complex types to -> (2,)float view for fast-path computation in _var()
29
+ _complex_to_float = {
30
+ nt.dtype(nt.csingle): nt.dtype(nt.single),
31
+ nt.dtype(nt.cdouble): nt.dtype(nt.double),
32
+ }
33
+ # Special case for windows: ensure double takes precedence
34
+ if nt.dtype(nt.longdouble) != nt.dtype(nt.double):
35
+ _complex_to_float.update({
36
+ nt.dtype(nt.clongdouble): nt.dtype(nt.longdouble),
37
+ })
38
+
39
+ # avoid keyword arguments to speed up parsing, saves about 15%-20% for very
40
+ # small reductions
41
+ def _amax(a, axis=None, out=None, keepdims=False,
42
+ initial=_NoValue, where=True):
43
+ return umr_maximum(a, axis, None, out, keepdims, initial, where)
44
+
45
+ def _amin(a, axis=None, out=None, keepdims=False,
46
+ initial=_NoValue, where=True):
47
+ return umr_minimum(a, axis, None, out, keepdims, initial, where)
48
+
49
+ def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
50
+ initial=_NoValue, where=True):
51
+ return umr_sum(a, axis, dtype, out, keepdims, initial, where)
52
+
53
+ def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
54
+ initial=_NoValue, where=True):
55
+ return umr_prod(a, axis, dtype, out, keepdims, initial, where)
56
+
57
+ def _any(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
58
+ # By default, return a boolean for any and all
59
+ if dtype is None:
60
+ dtype = bool_dt
61
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
62
+ if where is True:
63
+ return umr_any(a, axis, dtype, out, keepdims)
64
+ return umr_any(a, axis, dtype, out, keepdims, where=where)
65
+
66
+ def _all(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
67
+ # By default, return a boolean for any and all
68
+ if dtype is None:
69
+ dtype = bool_dt
70
+ # Parsing keyword arguments is currently fairly slow, so avoid it for now
71
+ if where is True:
72
+ return umr_all(a, axis, dtype, out, keepdims)
73
+ return umr_all(a, axis, dtype, out, keepdims, where=where)
74
+
75
+ def _count_reduce_items(arr, axis, keepdims=False, where=True):
76
+ # fast-path for the default case
77
+ if where is True:
78
+ # no boolean mask given, calculate items according to axis
79
+ if axis is None:
80
+ axis = tuple(range(arr.ndim))
81
+ elif not isinstance(axis, tuple):
82
+ axis = (axis,)
83
+ items = 1
84
+ for ax in axis:
85
+ items *= arr.shape[mu.normalize_axis_index(ax, arr.ndim)]
86
+ items = nt.intp(items)
87
+ else:
88
+ # TODO: Optimize case when `where` is broadcast along a non-reduction
89
+ # axis and full sum is more excessive than needed.
90
+
91
+ # guarded to protect circular imports
92
+ from numpy.lib._stride_tricks_impl import broadcast_to
93
+ # count True values in (potentially broadcasted) boolean mask
94
+ items = umr_sum(broadcast_to(where, arr.shape), axis, nt.intp, None,
95
+ keepdims)
96
+ return items
97
+
98
+ def _clip(a, min=None, max=None, out=None, **kwargs):
99
+ if a.dtype.kind in "iu":
100
+ # If min/max is a Python integer, deal with out-of-bound values here.
101
+ # (This enforces NEP 50 rules as no value based promotion is done.)
102
+ if type(min) is int and min <= np.iinfo(a.dtype).min:
103
+ min = None
104
+ if type(max) is int and max >= np.iinfo(a.dtype).max:
105
+ max = None
106
+
107
+ if min is None and max is None:
108
+ # return identity
109
+ return um.positive(a, out=out, **kwargs)
110
+ elif min is None:
111
+ return um.minimum(a, max, out=out, **kwargs)
112
+ elif max is None:
113
+ return um.maximum(a, min, out=out, **kwargs)
114
+ else:
115
+ return um.clip(a, min, max, out=out, **kwargs)
116
+
117
+ def _mean(a, axis=None, dtype=None, out=None, keepdims=False, *, where=True):
118
+ arr = asanyarray(a)
119
+
120
+ is_float16_result = False
121
+
122
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
123
+ if rcount == 0 if where is True else umr_any(rcount == 0, axis=None):
124
+ warnings.warn("Mean of empty slice.", RuntimeWarning, stacklevel=2)
125
+
126
+ # Cast bool, unsigned int, and int to float64 by default
127
+ if dtype is None:
128
+ if issubclass(arr.dtype.type, (nt.integer, nt.bool)):
129
+ dtype = mu.dtype('f8')
130
+ elif issubclass(arr.dtype.type, nt.float16):
131
+ dtype = mu.dtype('f4')
132
+ is_float16_result = True
133
+
134
+ ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
135
+ if isinstance(ret, mu.ndarray):
136
+ ret = um.true_divide(
137
+ ret, rcount, out=ret, casting='unsafe', subok=False)
138
+ if is_float16_result and out is None:
139
+ ret = arr.dtype.type(ret)
140
+ elif hasattr(ret, 'dtype'):
141
+ if is_float16_result:
142
+ ret = arr.dtype.type(ret / rcount)
143
+ else:
144
+ ret = ret.dtype.type(ret / rcount)
145
+ else:
146
+ ret = ret / rcount
147
+
148
+ return ret
149
+
150
+ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
151
+ where=True, mean=None):
152
+ arr = asanyarray(a)
153
+
154
+ rcount = _count_reduce_items(arr, axis, keepdims=keepdims, where=where)
155
+ # Make this warning show up on top.
156
+ if ddof >= rcount if where is True else umr_any(ddof >= rcount, axis=None):
157
+ warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning,
158
+ stacklevel=2)
159
+
160
+ # Cast bool, unsigned int, and int to float64 by default
161
+ if dtype is None and issubclass(arr.dtype.type, (nt.integer, nt.bool)):
162
+ dtype = mu.dtype('f8')
163
+
164
+ if mean is not None:
165
+ arrmean = mean
166
+ else:
167
+ # Compute the mean.
168
+ # Note that if dtype is not of inexact type then arraymean will
169
+ # not be either.
170
+ arrmean = umr_sum(arr, axis, dtype, keepdims=True, where=where)
171
+ # The shape of rcount has to match arrmean to not change the shape of
172
+ # out in broadcasting. Otherwise, it cannot be stored back to arrmean.
173
+ if rcount.ndim == 0:
174
+ # fast-path for default case when where is True
175
+ div = rcount
176
+ else:
177
+ # matching rcount to arrmean when where is specified as array
178
+ div = rcount.reshape(arrmean.shape)
179
+ if isinstance(arrmean, mu.ndarray):
180
+ arrmean = um.true_divide(arrmean, div, out=arrmean,
181
+ casting='unsafe', subok=False)
182
+ elif hasattr(arrmean, "dtype"):
183
+ arrmean = arrmean.dtype.type(arrmean / rcount)
184
+ else:
185
+ arrmean = arrmean / rcount
186
+
187
+ # Compute sum of squared deviations from mean
188
+ # Note that x may not be inexact and that we need it to be an array,
189
+ # not a scalar.
190
+ x = asanyarray(arr - arrmean)
191
+
192
+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
193
+ x = um.multiply(x, x, out=x)
194
+ # Fast-paths for built-in complex types
195
+ elif x.dtype in _complex_to_float:
196
+ xv = x.view(dtype=(_complex_to_float[x.dtype], (2,)))
197
+ um.multiply(xv, xv, out=xv)
198
+ x = um.add(xv[..., 0], xv[..., 1], out=x.real).real
199
+ # Most general case; includes handling object arrays containing imaginary
200
+ # numbers and complex types with non-native byteorder
201
+ else:
202
+ x = um.multiply(x, um.conjugate(x), out=x).real
203
+
204
+ ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)
205
+
206
+ # Compute degrees of freedom and make sure it is not negative.
207
+ rcount = um.maximum(rcount - ddof, 0)
208
+
209
+ # divide by degrees of freedom
210
+ if isinstance(ret, mu.ndarray):
211
+ ret = um.true_divide(
212
+ ret, rcount, out=ret, casting='unsafe', subok=False)
213
+ elif hasattr(ret, 'dtype'):
214
+ ret = ret.dtype.type(ret / rcount)
215
+ else:
216
+ ret = ret / rcount
217
+
218
+ return ret
219
+
220
+ def _std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False, *,
221
+ where=True, mean=None):
222
+ ret = _var(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
223
+ keepdims=keepdims, where=where, mean=mean)
224
+
225
+ if isinstance(ret, mu.ndarray):
226
+ ret = um.sqrt(ret, out=ret)
227
+ elif hasattr(ret, 'dtype'):
228
+ ret = ret.dtype.type(um.sqrt(ret))
229
+ else:
230
+ ret = um.sqrt(ret)
231
+
232
+ return ret
233
+
234
+ def _ptp(a, axis=None, out=None, keepdims=False):
235
+ return um.subtract(
236
+ umr_maximum(a, axis, None, out, keepdims),
237
+ umr_minimum(a, axis, None, None, keepdims),
238
+ out
239
+ )
240
+
241
+ def _dump(self, file, protocol=2):
242
+ if hasattr(file, 'write'):
243
+ ctx = nullcontext(file)
244
+ else:
245
+ ctx = open(os.fspath(file), "wb")
246
+ with ctx as f:
247
+ pickle.dump(self, f, protocol=protocol)
248
+
249
+ def _dumps(self, protocol=2):
250
+ return pickle.dumps(self, protocol=protocol)
251
+
252
+ def _bitwise_count(a, out=None, *, where=True, casting='same_kind',
253
+ order='K', dtype=None, subok=True):
254
+ return umr_bitwise_count(a, out, where=where, casting=casting,
255
+ order=order, dtype=dtype, subok=subok)
venv/lib/python3.13/site-packages/numpy/_core/_methods.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Concatenate, TypeAlias
3
+
4
+ import numpy as np
5
+
6
+ from . import _exceptions as _exceptions
7
+
8
+ ###
9
+
10
+ _Reduce2: TypeAlias = Callable[Concatenate[object, ...], Any]
11
+
12
+ ###
13
+
14
+ bool_dt: np.dtype[np.bool] = ...
15
+ umr_maximum: _Reduce2 = ...
16
+ umr_minimum: _Reduce2 = ...
17
+ umr_sum: _Reduce2 = ...
18
+ umr_prod: _Reduce2 = ...
19
+ umr_bitwise_count = np.bitwise_count
20
+ umr_any: _Reduce2 = ...
21
+ umr_all: _Reduce2 = ...
22
+ _complex_to_float: dict[np.dtype[np.complexfloating], np.dtype[np.floating]] = ...
venv/lib/python3.13/site-packages/numpy/_core/_operand_flag_tests.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (16.8 kB). View file
 
venv/lib/python3.13/site-packages/numpy/_core/_rational_tests.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (59.6 kB). View file
 
venv/lib/python3.13/site-packages/numpy/_core/_simd.pyi ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from types import ModuleType
2
+ from typing import TypedDict, type_check_only
3
+
4
+ # NOTE: these 5 are only defined on systems with an intel processor
5
+ SSE42: ModuleType | None = ...
6
+ FMA3: ModuleType | None = ...
7
+ AVX2: ModuleType | None = ...
8
+ AVX512F: ModuleType | None = ...
9
+ AVX512_SKX: ModuleType | None = ...
10
+
11
+ baseline: ModuleType | None = ...
12
+
13
+ @type_check_only
14
+ class SimdTargets(TypedDict):
15
+ SSE42: ModuleType | None
16
+ AVX2: ModuleType | None
17
+ FMA3: ModuleType | None
18
+ AVX512F: ModuleType | None
19
+ AVX512_SKX: ModuleType | None
20
+ baseline: ModuleType | None
21
+
22
+ targets: SimdTargets = ...
23
+
24
+ def clear_floatstatus() -> None: ...
25
+ def get_floatstatus() -> int: ...
venv/lib/python3.13/site-packages/numpy/_core/_string_helpers.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ String-handling utilities to avoid locale-dependence.
3
+
4
+ Used primarily to generate type name aliases.
5
+ """
6
+ # "import string" is costly to import!
7
+ # Construct the translation tables directly
8
+ # "A" = chr(65), "a" = chr(97)
9
+ _all_chars = tuple(map(chr, range(256)))
10
+ _ascii_upper = _all_chars[65:65 + 26]
11
+ _ascii_lower = _all_chars[97:97 + 26]
12
+ LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65 + 26:]
13
+ UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97 + 26:]
14
+
15
+
16
+ def english_lower(s):
17
+ """ Apply English case rules to convert ASCII strings to all lower case.
18
+
19
+ This is an internal utility function to replace calls to str.lower() such
20
+ that we can avoid changing behavior with changing locales. In particular,
21
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
22
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
23
+
24
+ Parameters
25
+ ----------
26
+ s : str
27
+
28
+ Returns
29
+ -------
30
+ lowered : str
31
+
32
+ Examples
33
+ --------
34
+ >>> from numpy._core.numerictypes import english_lower
35
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
36
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
37
+ >>> english_lower('')
38
+ ''
39
+ """
40
+ lowered = s.translate(LOWER_TABLE)
41
+ return lowered
42
+
43
+
44
+ def english_upper(s):
45
+ """ Apply English case rules to convert ASCII strings to all upper case.
46
+
47
+ This is an internal utility function to replace calls to str.upper() such
48
+ that we can avoid changing behavior with changing locales. In particular,
49
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
50
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
51
+
52
+ Parameters
53
+ ----------
54
+ s : str
55
+
56
+ Returns
57
+ -------
58
+ uppered : str
59
+
60
+ Examples
61
+ --------
62
+ >>> from numpy._core.numerictypes import english_upper
63
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
64
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
65
+ >>> english_upper('')
66
+ ''
67
+ """
68
+ uppered = s.translate(UPPER_TABLE)
69
+ return uppered
70
+
71
+
72
+ def english_capitalize(s):
73
+ """ Apply English case rules to convert the first character of an ASCII
74
+ string to upper case.
75
+
76
+ This is an internal utility function to replace calls to str.capitalize()
77
+ such that we can avoid changing behavior with changing locales.
78
+
79
+ Parameters
80
+ ----------
81
+ s : str
82
+
83
+ Returns
84
+ -------
85
+ capitalized : str
86
+
87
+ Examples
88
+ --------
89
+ >>> from numpy._core.numerictypes import english_capitalize
90
+ >>> english_capitalize('int8')
91
+ 'Int8'
92
+ >>> english_capitalize('Int8')
93
+ 'Int8'
94
+ >>> english_capitalize('')
95
+ ''
96
+ """
97
+ if s:
98
+ return english_upper(s[0]) + s[1:]
99
+ else:
100
+ return s
venv/lib/python3.13/site-packages/numpy/_core/_string_helpers.pyi ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Final
2
+
3
+ _all_chars: Final[tuple[str, ...]] = ...
4
+ _ascii_upper: Final[tuple[str, ...]] = ...
5
+ _ascii_lower: Final[tuple[str, ...]] = ...
6
+
7
+ LOWER_TABLE: Final[tuple[str, ...]] = ...
8
+ UPPER_TABLE: Final[tuple[str, ...]] = ...
9
+
10
+ def english_lower(s: str) -> str: ...
11
+ def english_upper(s: str) -> str: ...
12
+ def english_capitalize(s: str) -> str: ...
venv/lib/python3.13/site-packages/numpy/_core/_struct_ufunc_tests.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.13/site-packages/numpy/_core/_type_aliases.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Due to compatibility, numpy has a very large number of different naming
3
+ conventions for the scalar types (those subclassing from `numpy.generic`).
4
+ This file produces a convoluted set of dictionaries mapping names to types,
5
+ and sometimes other mappings too.
6
+
7
+ .. data:: allTypes
8
+ A dictionary of names to types that will be exposed as attributes through
9
+ ``np._core.numerictypes.*``
10
+
11
+ .. data:: sctypeDict
12
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
13
+
14
+ .. data:: sctypes
15
+ A dictionary keyed by a "type group" string, providing a list of types
16
+ under that group.
17
+
18
+ """
19
+
20
+ import numpy._core.multiarray as ma
21
+ from numpy._core.multiarray import dtype, typeinfo
22
+
23
+ ######################################
24
+ # Building `sctypeDict` and `allTypes`
25
+ ######################################
26
+
27
+ sctypeDict = {}
28
+ allTypes = {}
29
+ c_names_dict = {}
30
+
31
+ _abstract_type_names = {
32
+ "generic", "integer", "inexact", "floating", "number",
33
+ "flexible", "character", "complexfloating", "unsignedinteger",
34
+ "signedinteger"
35
+ }
36
+
37
+ for _abstract_type_name in _abstract_type_names:
38
+ allTypes[_abstract_type_name] = getattr(ma, _abstract_type_name)
39
+
40
+ for k, v in typeinfo.items():
41
+ if k.startswith("NPY_") and v not in c_names_dict:
42
+ c_names_dict[k[4:]] = v
43
+ else:
44
+ concrete_type = v.type
45
+ allTypes[k] = concrete_type
46
+ sctypeDict[k] = concrete_type
47
+
48
+ _aliases = {
49
+ "double": "float64",
50
+ "cdouble": "complex128",
51
+ "single": "float32",
52
+ "csingle": "complex64",
53
+ "half": "float16",
54
+ "bool_": "bool",
55
+ # Default integer:
56
+ "int_": "intp",
57
+ "uint": "uintp",
58
+ }
59
+
60
+ for k, v in _aliases.items():
61
+ sctypeDict[k] = allTypes[v]
62
+ allTypes[k] = allTypes[v]
63
+
64
+ # extra aliases are added only to `sctypeDict`
65
+ # to support dtype name access, such as`np.dtype("float")`
66
+ _extra_aliases = {
67
+ "float": "float64",
68
+ "complex": "complex128",
69
+ "object": "object_",
70
+ "bytes": "bytes_",
71
+ "a": "bytes_",
72
+ "int": "int_",
73
+ "str": "str_",
74
+ "unicode": "str_",
75
+ }
76
+
77
+ for k, v in _extra_aliases.items():
78
+ sctypeDict[k] = allTypes[v]
79
+
80
+ # include extended precision sized aliases
81
+ for is_complex, full_name in [(False, "longdouble"), (True, "clongdouble")]:
82
+ longdouble_type: type = allTypes[full_name]
83
+
84
+ bits: int = dtype(longdouble_type).itemsize * 8
85
+ base_name: str = "complex" if is_complex else "float"
86
+ extended_prec_name: str = f"{base_name}{bits}"
87
+ if extended_prec_name not in allTypes:
88
+ sctypeDict[extended_prec_name] = longdouble_type
89
+ allTypes[extended_prec_name] = longdouble_type
90
+
91
+
92
+ ####################
93
+ # Building `sctypes`
94
+ ####################
95
+
96
+ sctypes = {"int": set(), "uint": set(), "float": set(),
97
+ "complex": set(), "others": set()}
98
+
99
+ for type_info in typeinfo.values():
100
+ if type_info.kind in ["M", "m"]: # exclude timedelta and datetime
101
+ continue
102
+
103
+ concrete_type = type_info.type
104
+
105
+ # find proper group for each concrete type
106
+ for type_group, abstract_type in [
107
+ ("int", ma.signedinteger), ("uint", ma.unsignedinteger),
108
+ ("float", ma.floating), ("complex", ma.complexfloating),
109
+ ("others", ma.generic)
110
+ ]:
111
+ if issubclass(concrete_type, abstract_type):
112
+ sctypes[type_group].add(concrete_type)
113
+ break
114
+
115
+ # sort sctype groups by bitsize
116
+ for sctype_key in sctypes.keys():
117
+ sctype_list = list(sctypes[sctype_key])
118
+ sctype_list.sort(key=lambda x: dtype(x).itemsize)
119
+ sctypes[sctype_key] = sctype_list
venv/lib/python3.13/site-packages/numpy/_core/_type_aliases.pyi ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Collection
2
+ from typing import Final, TypeAlias, TypedDict, type_check_only
3
+ from typing import Literal as L
4
+
5
+ import numpy as np
6
+
7
+ __all__ = (
8
+ "_abstract_type_names",
9
+ "_aliases",
10
+ "_extra_aliases",
11
+ "allTypes",
12
+ "c_names_dict",
13
+ "sctypeDict",
14
+ "sctypes",
15
+ )
16
+
17
+ sctypeDict: Final[dict[str, type[np.generic]]]
18
+ allTypes: Final[dict[str, type[np.generic]]]
19
+
20
+ @type_check_only
21
+ class _CNamesDict(TypedDict):
22
+ BOOL: np.dtype[np.bool]
23
+ HALF: np.dtype[np.half]
24
+ FLOAT: np.dtype[np.single]
25
+ DOUBLE: np.dtype[np.double]
26
+ LONGDOUBLE: np.dtype[np.longdouble]
27
+ CFLOAT: np.dtype[np.csingle]
28
+ CDOUBLE: np.dtype[np.cdouble]
29
+ CLONGDOUBLE: np.dtype[np.clongdouble]
30
+ STRING: np.dtype[np.bytes_]
31
+ UNICODE: np.dtype[np.str_]
32
+ VOID: np.dtype[np.void]
33
+ OBJECT: np.dtype[np.object_]
34
+ DATETIME: np.dtype[np.datetime64]
35
+ TIMEDELTA: np.dtype[np.timedelta64]
36
+ BYTE: np.dtype[np.byte]
37
+ UBYTE: np.dtype[np.ubyte]
38
+ SHORT: np.dtype[np.short]
39
+ USHORT: np.dtype[np.ushort]
40
+ INT: np.dtype[np.intc]
41
+ UINT: np.dtype[np.uintc]
42
+ LONG: np.dtype[np.long]
43
+ ULONG: np.dtype[np.ulong]
44
+ LONGLONG: np.dtype[np.longlong]
45
+ ULONGLONG: np.dtype[np.ulonglong]
46
+
47
+ c_names_dict: Final[_CNamesDict]
48
+
49
+ _AbstractTypeName: TypeAlias = L[
50
+ "generic",
51
+ "flexible",
52
+ "character",
53
+ "number",
54
+ "integer",
55
+ "inexact",
56
+ "unsignedinteger",
57
+ "signedinteger",
58
+ "floating",
59
+ "complexfloating",
60
+ ]
61
+ _abstract_type_names: Final[set[_AbstractTypeName]]
62
+
63
+ @type_check_only
64
+ class _AliasesType(TypedDict):
65
+ double: L["float64"]
66
+ cdouble: L["complex128"]
67
+ single: L["float32"]
68
+ csingle: L["complex64"]
69
+ half: L["float16"]
70
+ bool_: L["bool"]
71
+ int_: L["intp"]
72
+ uint: L["intp"]
73
+
74
+ _aliases: Final[_AliasesType]
75
+
76
+ @type_check_only
77
+ class _ExtraAliasesType(TypedDict):
78
+ float: L["float64"]
79
+ complex: L["complex128"]
80
+ object: L["object_"]
81
+ bytes: L["bytes_"]
82
+ a: L["bytes_"]
83
+ int: L["int_"]
84
+ str: L["str_"]
85
+ unicode: L["str_"]
86
+
87
+ _extra_aliases: Final[_ExtraAliasesType]
88
+
89
+ @type_check_only
90
+ class _SCTypes(TypedDict):
91
+ int: Collection[type[np.signedinteger]]
92
+ uint: Collection[type[np.unsignedinteger]]
93
+ float: Collection[type[np.floating]]
94
+ complex: Collection[type[np.complexfloating]]
95
+ others: Collection[type[np.flexible | np.bool | np.object_]]
96
+
97
+ sctypes: Final[_SCTypes]
venv/lib/python3.13/site-packages/numpy/_core/_ufunc_config.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for changing global ufunc configuration
3
+
4
+ This provides helpers which wrap `_get_extobj_dict` and `_make_extobj`, and
5
+ `_extobj_contextvar` from umath.
6
+ """
7
+ import functools
8
+
9
+ from numpy._utils import set_module
10
+
11
+ from .umath import _extobj_contextvar, _get_extobj_dict, _make_extobj
12
+
13
+ __all__ = [
14
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
15
+ "errstate"
16
+ ]
17
+
18
+
19
+ @set_module('numpy')
20
+ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
21
+ """
22
+ Set how floating-point errors are handled.
23
+
24
+ Note that operations on integer scalar types (such as `int16`) are
25
+ handled like floating point, and are affected by these settings.
26
+
27
+ Parameters
28
+ ----------
29
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
30
+ Set treatment for all types of floating-point errors at once:
31
+
32
+ - ignore: Take no action when the exception occurs.
33
+ - warn: Print a :exc:`RuntimeWarning` (via the Python `warnings`
34
+ module).
35
+ - raise: Raise a :exc:`FloatingPointError`.
36
+ - call: Call a function specified using the `seterrcall` function.
37
+ - print: Print a warning directly to ``stdout``.
38
+ - log: Record error in a Log object specified by `seterrcall`.
39
+
40
+ The default is not to change the current behavior.
41
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
42
+ Treatment for division by zero.
43
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
44
+ Treatment for floating-point overflow.
45
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
46
+ Treatment for floating-point underflow.
47
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
48
+ Treatment for invalid floating-point operation.
49
+
50
+ Returns
51
+ -------
52
+ old_settings : dict
53
+ Dictionary containing the old settings.
54
+
55
+ See also
56
+ --------
57
+ seterrcall : Set a callback function for the 'call' mode.
58
+ geterr, geterrcall, errstate
59
+
60
+ Notes
61
+ -----
62
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
63
+
64
+ - Division by zero: infinite result obtained from finite numbers.
65
+ - Overflow: result too large to be expressed.
66
+ - Underflow: result so close to zero that some precision
67
+ was lost.
68
+ - Invalid operation: result is not an expressible number, typically
69
+ indicates that a NaN was produced.
70
+
71
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
72
+
73
+ Examples
74
+ --------
75
+ >>> import numpy as np
76
+ >>> orig_settings = np.seterr(all='ignore') # seterr to known value
77
+ >>> np.int16(32000) * np.int16(3)
78
+ np.int16(30464)
79
+ >>> np.seterr(over='raise')
80
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
81
+ >>> old_settings = np.seterr(all='warn', over='raise')
82
+ >>> np.int16(32000) * np.int16(3)
83
+ Traceback (most recent call last):
84
+ File "<stdin>", line 1, in <module>
85
+ FloatingPointError: overflow encountered in scalar multiply
86
+
87
+ >>> old_settings = np.seterr(all='print')
88
+ >>> np.geterr()
89
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
90
+ >>> np.int16(32000) * np.int16(3)
91
+ np.int16(30464)
92
+ >>> np.seterr(**orig_settings) # restore original
93
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
94
+
95
+ """
96
+
97
+ old = _get_extobj_dict()
98
+ # The errstate doesn't include call and bufsize, so pop them:
99
+ old.pop("call", None)
100
+ old.pop("bufsize", None)
101
+
102
+ extobj = _make_extobj(
103
+ all=all, divide=divide, over=over, under=under, invalid=invalid)
104
+ _extobj_contextvar.set(extobj)
105
+ return old
106
+
107
+
108
+ @set_module('numpy')
109
+ def geterr():
110
+ """
111
+ Get the current way of handling floating-point errors.
112
+
113
+ Returns
114
+ -------
115
+ res : dict
116
+ A dictionary with keys "divide", "over", "under", and "invalid",
117
+ whose values are from the strings "ignore", "print", "log", "warn",
118
+ "raise", and "call". The keys represent possible floating-point
119
+ exceptions, and the values define how these exceptions are handled.
120
+
121
+ See Also
122
+ --------
123
+ geterrcall, seterr, seterrcall
124
+
125
+ Notes
126
+ -----
127
+ For complete documentation of the types of floating-point exceptions and
128
+ treatment options, see `seterr`.
129
+
130
+ Examples
131
+ --------
132
+ >>> import numpy as np
133
+ >>> np.geterr()
134
+ {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
135
+ >>> np.arange(3.) / np.arange(3.) # doctest: +SKIP
136
+ array([nan, 1., 1.])
137
+ RuntimeWarning: invalid value encountered in divide
138
+
139
+ >>> oldsettings = np.seterr(all='warn', invalid='raise')
140
+ >>> np.geterr()
141
+ {'divide': 'warn', 'over': 'warn', 'under': 'warn', 'invalid': 'raise'}
142
+ >>> np.arange(3.) / np.arange(3.)
143
+ Traceback (most recent call last):
144
+ ...
145
+ FloatingPointError: invalid value encountered in divide
146
+ >>> oldsettings = np.seterr(**oldsettings) # restore original
147
+
148
+ """
149
+ res = _get_extobj_dict()
150
+ # The "geterr" doesn't include call and bufsize,:
151
+ res.pop("call", None)
152
+ res.pop("bufsize", None)
153
+ return res
154
+
155
+
156
+ @set_module('numpy')
157
+ def setbufsize(size):
158
+ """
159
+ Set the size of the buffer used in ufuncs.
160
+
161
+ .. versionchanged:: 2.0
162
+ The scope of setting the buffer is tied to the `numpy.errstate`
163
+ context. Exiting a ``with errstate():`` will also restore the bufsize.
164
+
165
+ Parameters
166
+ ----------
167
+ size : int
168
+ Size of buffer.
169
+
170
+ Returns
171
+ -------
172
+ bufsize : int
173
+ Previous size of ufunc buffer in bytes.
174
+
175
+ Examples
176
+ --------
177
+ When exiting a `numpy.errstate` context manager the bufsize is restored:
178
+
179
+ >>> import numpy as np
180
+ >>> with np.errstate():
181
+ ... np.setbufsize(4096)
182
+ ... print(np.getbufsize())
183
+ ...
184
+ 8192
185
+ 4096
186
+ >>> np.getbufsize()
187
+ 8192
188
+
189
+ """
190
+ if size < 0:
191
+ raise ValueError("buffer size must be non-negative")
192
+ old = _get_extobj_dict()["bufsize"]
193
+ extobj = _make_extobj(bufsize=size)
194
+ _extobj_contextvar.set(extobj)
195
+ return old
196
+
197
+
198
+ @set_module('numpy')
199
+ def getbufsize():
200
+ """
201
+ Return the size of the buffer used in ufuncs.
202
+
203
+ Returns
204
+ -------
205
+ getbufsize : int
206
+ Size of ufunc buffer in bytes.
207
+
208
+ Examples
209
+ --------
210
+ >>> import numpy as np
211
+ >>> np.getbufsize()
212
+ 8192
213
+
214
+ """
215
+ return _get_extobj_dict()["bufsize"]
216
+
217
+
218
+ @set_module('numpy')
219
+ def seterrcall(func):
220
+ """
221
+ Set the floating-point error callback function or log object.
222
+
223
+ There are two ways to capture floating-point error messages. The first
224
+ is to set the error-handler to 'call', using `seterr`. Then, set
225
+ the function to call using this function.
226
+
227
+ The second is to set the error-handler to 'log', using `seterr`.
228
+ Floating-point errors then trigger a call to the 'write' method of
229
+ the provided object.
230
+
231
+ Parameters
232
+ ----------
233
+ func : callable f(err, flag) or object with write method
234
+ Function to call upon floating-point errors ('call'-mode) or
235
+ object whose 'write' method is used to log such message ('log'-mode).
236
+
237
+ The call function takes two arguments. The first is a string describing
238
+ the type of error (such as "divide by zero", "overflow", "underflow",
239
+ or "invalid value"), and the second is the status flag. The flag is a
240
+ byte, whose four least-significant bits indicate the type of error, one
241
+ of "divide", "over", "under", "invalid"::
242
+
243
+ [0 0 0 0 divide over under invalid]
244
+
245
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
246
+
247
+ If an object is provided, its write method should take one argument,
248
+ a string.
249
+
250
+ Returns
251
+ -------
252
+ h : callable, log instance or None
253
+ The old error handler.
254
+
255
+ See Also
256
+ --------
257
+ seterr, geterr, geterrcall
258
+
259
+ Examples
260
+ --------
261
+ Callback upon error:
262
+
263
+ >>> def err_handler(type, flag):
264
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
265
+ ...
266
+
267
+ >>> import numpy as np
268
+
269
+ >>> orig_handler = np.seterrcall(err_handler)
270
+ >>> orig_err = np.seterr(all='call')
271
+
272
+ >>> np.array([1, 2, 3]) / 0.0
273
+ Floating point error (divide by zero), with flag 1
274
+ array([inf, inf, inf])
275
+
276
+ >>> np.seterrcall(orig_handler)
277
+ <function err_handler at 0x...>
278
+ >>> np.seterr(**orig_err)
279
+ {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
280
+
281
+ Log error message:
282
+
283
+ >>> class Log:
284
+ ... def write(self, msg):
285
+ ... print("LOG: %s" % msg)
286
+ ...
287
+
288
+ >>> log = Log()
289
+ >>> saved_handler = np.seterrcall(log)
290
+ >>> save_err = np.seterr(all='log')
291
+
292
+ >>> np.array([1, 2, 3]) / 0.0
293
+ LOG: Warning: divide by zero encountered in divide
294
+ array([inf, inf, inf])
295
+
296
+ >>> np.seterrcall(orig_handler)
297
+ <numpy.Log object at 0x...>
298
+ >>> np.seterr(**orig_err)
299
+ {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
300
+
301
+ """
302
+ old = _get_extobj_dict()["call"]
303
+ extobj = _make_extobj(call=func)
304
+ _extobj_contextvar.set(extobj)
305
+ return old
306
+
307
+
308
+ @set_module('numpy')
309
+ def geterrcall():
310
+ """
311
+ Return the current callback function used on floating-point errors.
312
+
313
+ When the error handling for a floating-point error (one of "divide",
314
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
315
+ that is called or the log instance that is written to is returned by
316
+ `geterrcall`. This function or log instance has been set with
317
+ `seterrcall`.
318
+
319
+ Returns
320
+ -------
321
+ errobj : callable, log instance or None
322
+ The current error handler. If no handler was set through `seterrcall`,
323
+ ``None`` is returned.
324
+
325
+ See Also
326
+ --------
327
+ seterrcall, seterr, geterr
328
+
329
+ Notes
330
+ -----
331
+ For complete documentation of the types of floating-point exceptions and
332
+ treatment options, see `seterr`.
333
+
334
+ Examples
335
+ --------
336
+ >>> import numpy as np
337
+ >>> np.geterrcall() # we did not yet set a handler, returns None
338
+
339
+ >>> orig_settings = np.seterr(all='call')
340
+ >>> def err_handler(type, flag):
341
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
342
+ >>> old_handler = np.seterrcall(err_handler)
343
+ >>> np.array([1, 2, 3]) / 0.0
344
+ Floating point error (divide by zero), with flag 1
345
+ array([inf, inf, inf])
346
+
347
+ >>> cur_handler = np.geterrcall()
348
+ >>> cur_handler is err_handler
349
+ True
350
+ >>> old_settings = np.seterr(**orig_settings) # restore original
351
+ >>> old_handler = np.seterrcall(None) # restore original
352
+
353
+ """
354
+ return _get_extobj_dict()["call"]
355
+
356
+
357
+ class _unspecified:
358
+ pass
359
+
360
+
361
+ _Unspecified = _unspecified()
362
+
363
+
364
+ @set_module('numpy')
365
+ class errstate:
366
+ """
367
+ errstate(**kwargs)
368
+
369
+ Context manager for floating-point error handling.
370
+
371
+ Using an instance of `errstate` as a context manager allows statements in
372
+ that context to execute with a known error handling behavior. Upon entering
373
+ the context the error handling is set with `seterr` and `seterrcall`, and
374
+ upon exiting it is reset to what it was before.
375
+
376
+ .. versionchanged:: 1.17.0
377
+ `errstate` is also usable as a function decorator, saving
378
+ a level of indentation if an entire function is wrapped.
379
+
380
+ .. versionchanged:: 2.0
381
+ `errstate` is now fully thread and asyncio safe, but may not be
382
+ entered more than once.
383
+ It is not safe to decorate async functions using ``errstate``.
384
+
385
+ Parameters
386
+ ----------
387
+ kwargs : {divide, over, under, invalid}
388
+ Keyword arguments. The valid keywords are the possible floating-point
389
+ exceptions. Each keyword should have a string value that defines the
390
+ treatment for the particular error. Possible values are
391
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
392
+
393
+ See Also
394
+ --------
395
+ seterr, geterr, seterrcall, geterrcall
396
+
397
+ Notes
398
+ -----
399
+ For complete documentation of the types of floating-point exceptions and
400
+ treatment options, see `seterr`.
401
+
402
+ Examples
403
+ --------
404
+ >>> import numpy as np
405
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
406
+
407
+ >>> np.arange(3) / 0.
408
+ array([nan, inf, inf])
409
+ >>> with np.errstate(divide='ignore'):
410
+ ... np.arange(3) / 0.
411
+ array([nan, inf, inf])
412
+
413
+ >>> np.sqrt(-1)
414
+ np.float64(nan)
415
+ >>> with np.errstate(invalid='raise'):
416
+ ... np.sqrt(-1)
417
+ Traceback (most recent call last):
418
+ File "<stdin>", line 2, in <module>
419
+ FloatingPointError: invalid value encountered in sqrt
420
+
421
+ Outside the context the error handling behavior has not changed:
422
+
423
+ >>> np.geterr()
424
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
425
+ >>> olderr = np.seterr(**olderr) # restore original state
426
+
427
+ """
428
+ __slots__ = (
429
+ "_all",
430
+ "_call",
431
+ "_divide",
432
+ "_invalid",
433
+ "_over",
434
+ "_token",
435
+ "_under",
436
+ )
437
+
438
+ def __init__(self, *, call=_Unspecified,
439
+ all=None, divide=None, over=None, under=None, invalid=None):
440
+ self._token = None
441
+ self._call = call
442
+ self._all = all
443
+ self._divide = divide
444
+ self._over = over
445
+ self._under = under
446
+ self._invalid = invalid
447
+
448
+ def __enter__(self):
449
+ # Note that __call__ duplicates much of this logic
450
+ if self._token is not None:
451
+ raise TypeError("Cannot enter `np.errstate` twice.")
452
+ if self._call is _Unspecified:
453
+ extobj = _make_extobj(
454
+ all=self._all, divide=self._divide, over=self._over,
455
+ under=self._under, invalid=self._invalid)
456
+ else:
457
+ extobj = _make_extobj(
458
+ call=self._call,
459
+ all=self._all, divide=self._divide, over=self._over,
460
+ under=self._under, invalid=self._invalid)
461
+
462
+ self._token = _extobj_contextvar.set(extobj)
463
+
464
+ def __exit__(self, *exc_info):
465
+ _extobj_contextvar.reset(self._token)
466
+
467
+ def __call__(self, func):
468
+ # We need to customize `__call__` compared to `ContextDecorator`
469
+ # because we must store the token per-thread so cannot store it on
470
+ # the instance (we could create a new instance for this).
471
+ # This duplicates the code from `__enter__`.
472
+ @functools.wraps(func)
473
+ def inner(*args, **kwargs):
474
+ if self._call is _Unspecified:
475
+ extobj = _make_extobj(
476
+ all=self._all, divide=self._divide, over=self._over,
477
+ under=self._under, invalid=self._invalid)
478
+ else:
479
+ extobj = _make_extobj(
480
+ call=self._call,
481
+ all=self._all, divide=self._divide, over=self._over,
482
+ under=self._under, invalid=self._invalid)
483
+
484
+ _token = _extobj_contextvar.set(extobj)
485
+ try:
486
+ # Call the original, decorated, function:
487
+ return func(*args, **kwargs)
488
+ finally:
489
+ _extobj_contextvar.reset(_token)
490
+
491
+ return inner
venv/lib/python3.13/site-packages/numpy/_core/_ufunc_config.pyi ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from types import TracebackType
3
+ from typing import (
4
+ Any,
5
+ Final,
6
+ Literal,
7
+ TypeAlias,
8
+ TypedDict,
9
+ TypeVar,
10
+ type_check_only,
11
+ )
12
+
13
+ from _typeshed import SupportsWrite
14
+
15
+ __all__ = [
16
+ "seterr",
17
+ "geterr",
18
+ "setbufsize",
19
+ "getbufsize",
20
+ "seterrcall",
21
+ "geterrcall",
22
+ "errstate",
23
+ ]
24
+
25
+ _ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"]
26
+ _ErrCall: TypeAlias = Callable[[str, int], Any] | SupportsWrite[str]
27
+
28
+ _CallableT = TypeVar("_CallableT", bound=Callable[..., object])
29
+
30
+ @type_check_only
31
+ class _ErrDict(TypedDict):
32
+ divide: _ErrKind
33
+ over: _ErrKind
34
+ under: _ErrKind
35
+ invalid: _ErrKind
36
+
37
+ ###
38
+
39
+ class _unspecified: ...
40
+
41
+ _Unspecified: Final[_unspecified]
42
+
43
+ class errstate:
44
+ __slots__ = "_all", "_call", "_divide", "_invalid", "_over", "_token", "_under"
45
+
46
+ def __init__(
47
+ self,
48
+ /,
49
+ *,
50
+ call: _ErrCall | _unspecified = ..., # = _Unspecified
51
+ all: _ErrKind | None = None,
52
+ divide: _ErrKind | None = None,
53
+ over: _ErrKind | None = None,
54
+ under: _ErrKind | None = None,
55
+ invalid: _ErrKind | None = None,
56
+ ) -> None: ...
57
+ def __call__(self, /, func: _CallableT) -> _CallableT: ...
58
+ def __enter__(self) -> None: ...
59
+ def __exit__(
60
+ self,
61
+ exc_type: type[BaseException] | None,
62
+ exc_value: BaseException | None,
63
+ traceback: TracebackType | None,
64
+ /,
65
+ ) -> None: ...
66
+
67
+ def seterr(
68
+ all: _ErrKind | None = ...,
69
+ divide: _ErrKind | None = ...,
70
+ over: _ErrKind | None = ...,
71
+ under: _ErrKind | None = ...,
72
+ invalid: _ErrKind | None = ...,
73
+ ) -> _ErrDict: ...
74
+ def geterr() -> _ErrDict: ...
75
+ def setbufsize(size: int) -> int: ...
76
+ def getbufsize() -> int: ...
77
+ def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ...
78
+ def geterrcall() -> _ErrCall | None: ...
venv/lib/python3.13/site-packages/numpy/_core/_umath_tests.cpython-313-x86_64-linux-gnu.so ADDED
Binary file (50.3 kB). View file
 
venv/lib/python3.13/site-packages/numpy/_core/arrayprint.py ADDED
@@ -0,0 +1,1775 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Array printing function
2
+
3
+ $Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
4
+
5
+ """
6
+ __all__ = ["array2string", "array_str", "array_repr",
7
+ "set_printoptions", "get_printoptions", "printoptions",
8
+ "format_float_positional", "format_float_scientific"]
9
+ __docformat__ = 'restructuredtext'
10
+
11
+ #
12
+ # Written by Konrad Hinsen <hinsenk@ere.umontreal.ca>
13
+ # last revision: 1996-3-13
14
+ # modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
15
+ # and by Perry Greenfield 2000-4-1 for numarray
16
+ # and by Travis Oliphant 2005-8-22 for numpy
17
+
18
+
19
+ # Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
20
+ # scalars but for different purposes. scalartypes.c.src has str/reprs for when
21
+ # the scalar is printed on its own, while arrayprint.py has strs for when
22
+ # scalars are printed inside an ndarray. Only the latter strs are currently
23
+ # user-customizable.
24
+
25
+ import functools
26
+ import numbers
27
+ import sys
28
+
29
+ try:
30
+ from _thread import get_ident
31
+ except ImportError:
32
+ from _dummy_thread import get_ident
33
+
34
+ import contextlib
35
+ import operator
36
+ import warnings
37
+
38
+ import numpy as np
39
+
40
+ from . import numerictypes as _nt
41
+ from .fromnumeric import any
42
+ from .multiarray import (
43
+ array,
44
+ datetime_as_string,
45
+ datetime_data,
46
+ dragon4_positional,
47
+ dragon4_scientific,
48
+ ndarray,
49
+ )
50
+ from .numeric import asarray, concatenate, errstate
51
+ from .numerictypes import complex128, flexible, float64, int_
52
+ from .overrides import array_function_dispatch, set_module
53
+ from .printoptions import format_options
54
+ from .umath import absolute, isfinite, isinf, isnat
55
+
56
+
57
+ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
58
+ linewidth=None, suppress=None, nanstr=None, infstr=None,
59
+ sign=None, formatter=None, floatmode=None, legacy=None,
60
+ override_repr=None):
61
+ """
62
+ Make a dictionary out of the non-None arguments, plus conversion of
63
+ *legacy* and sanity checks.
64
+ """
65
+
66
+ options = {k: v for k, v in list(locals().items()) if v is not None}
67
+
68
+ if suppress is not None:
69
+ options['suppress'] = bool(suppress)
70
+
71
+ modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
72
+ if floatmode not in modes + [None]:
73
+ raise ValueError("floatmode option must be one of " +
74
+ ", ".join(f'"{m}"' for m in modes))
75
+
76
+ if sign not in [None, '-', '+', ' ']:
77
+ raise ValueError("sign option must be one of ' ', '+', or '-'")
78
+
79
+ if legacy is False:
80
+ options['legacy'] = sys.maxsize
81
+ elif legacy == False: # noqa: E712
82
+ warnings.warn(
83
+ f"Passing `legacy={legacy!r}` is deprecated.",
84
+ FutureWarning, stacklevel=3
85
+ )
86
+ options['legacy'] = sys.maxsize
87
+ elif legacy == '1.13':
88
+ options['legacy'] = 113
89
+ elif legacy == '1.21':
90
+ options['legacy'] = 121
91
+ elif legacy == '1.25':
92
+ options['legacy'] = 125
93
+ elif legacy == '2.1':
94
+ options['legacy'] = 201
95
+ elif legacy == '2.2':
96
+ options['legacy'] = 202
97
+ elif legacy is None:
98
+ pass # OK, do nothing.
99
+ else:
100
+ warnings.warn(
101
+ "legacy printing option can currently only be '1.13', '1.21', "
102
+ "'1.25', '2.1', '2.2' or `False`", stacklevel=3)
103
+
104
+ if threshold is not None:
105
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
106
+ if not isinstance(threshold, numbers.Number):
107
+ raise TypeError("threshold must be numeric")
108
+ if np.isnan(threshold):
109
+ raise ValueError("threshold must be non-NAN, try "
110
+ "sys.maxsize for untruncated representation")
111
+
112
+ if precision is not None:
113
+ # forbid the bad precision arg as suggested by issue #18254
114
+ try:
115
+ options['precision'] = operator.index(precision)
116
+ except TypeError as e:
117
+ raise TypeError('precision must be an integer') from e
118
+
119
+ return options
120
+
121
+
122
+ @set_module('numpy')
123
+ def set_printoptions(precision=None, threshold=None, edgeitems=None,
124
+ linewidth=None, suppress=None, nanstr=None,
125
+ infstr=None, formatter=None, sign=None, floatmode=None,
126
+ *, legacy=None, override_repr=None):
127
+ """
128
+ Set printing options.
129
+
130
+ These options determine the way floating point numbers, arrays and
131
+ other NumPy objects are displayed.
132
+
133
+ Parameters
134
+ ----------
135
+ precision : int or None, optional
136
+ Number of digits of precision for floating point output (default 8).
137
+ May be None if `floatmode` is not `fixed`, to print as many digits as
138
+ necessary to uniquely specify the value.
139
+ threshold : int, optional
140
+ Total number of array elements which trigger summarization
141
+ rather than full repr (default 1000).
142
+ To always use the full repr without summarization, pass `sys.maxsize`.
143
+ edgeitems : int, optional
144
+ Number of array items in summary at beginning and end of
145
+ each dimension (default 3).
146
+ linewidth : int, optional
147
+ The number of characters per line for the purpose of inserting
148
+ line breaks (default 75).
149
+ suppress : bool, optional
150
+ If True, always print floating point numbers using fixed point
151
+ notation, in which case numbers equal to zero in the current precision
152
+ will print as zero. If False, then scientific notation is used when
153
+ absolute value of the smallest number is < 1e-4 or the ratio of the
154
+ maximum absolute value to the minimum is > 1e3. The default is False.
155
+ nanstr : str, optional
156
+ String representation of floating point not-a-number (default nan).
157
+ infstr : str, optional
158
+ String representation of floating point infinity (default inf).
159
+ sign : string, either '-', '+', or ' ', optional
160
+ Controls printing of the sign of floating-point types. If '+', always
161
+ print the sign of positive values. If ' ', always prints a space
162
+ (whitespace character) in the sign position of positive values. If
163
+ '-', omit the sign character of positive values. (default '-')
164
+
165
+ .. versionchanged:: 2.0
166
+ The sign parameter can now be an integer type, previously
167
+ types were floating-point types.
168
+
169
+ formatter : dict of callables, optional
170
+ If not None, the keys should indicate the type(s) that the respective
171
+ formatting function applies to. Callables should return a string.
172
+ Types that are not specified (by their corresponding keys) are handled
173
+ by the default formatters. Individual types for which a formatter
174
+ can be set are:
175
+
176
+ - 'bool'
177
+ - 'int'
178
+ - 'timedelta' : a `numpy.timedelta64`
179
+ - 'datetime' : a `numpy.datetime64`
180
+ - 'float'
181
+ - 'longfloat' : 128-bit floats
182
+ - 'complexfloat'
183
+ - 'longcomplexfloat' : composed of two 128-bit floats
184
+ - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
185
+ - 'object' : `np.object_` arrays
186
+
187
+ Other keys that can be used to set a group of types at once are:
188
+
189
+ - 'all' : sets all types
190
+ - 'int_kind' : sets 'int'
191
+ - 'float_kind' : sets 'float' and 'longfloat'
192
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
193
+ - 'str_kind' : sets 'numpystr'
194
+ floatmode : str, optional
195
+ Controls the interpretation of the `precision` option for
196
+ floating-point types. Can take the following values
197
+ (default maxprec_equal):
198
+
199
+ * 'fixed': Always print exactly `precision` fractional digits,
200
+ even if this would print more or fewer digits than
201
+ necessary to specify the value uniquely.
202
+ * 'unique': Print the minimum number of fractional digits necessary
203
+ to represent each value uniquely. Different elements may
204
+ have a different number of digits. The value of the
205
+ `precision` option is ignored.
206
+ * 'maxprec': Print at most `precision` fractional digits, but if
207
+ an element can be uniquely represented with fewer digits
208
+ only print it with that many.
209
+ * 'maxprec_equal': Print at most `precision` fractional digits,
210
+ but if every element in the array can be uniquely
211
+ represented with an equal number of fewer digits, use that
212
+ many digits for all elements.
213
+ legacy : string or `False`, optional
214
+ If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This
215
+ approximates numpy 1.13 print output by including a space in the sign
216
+ position of floats and different behavior for 0d arrays. This also
217
+ enables 1.21 legacy printing mode (described below).
218
+
219
+ If set to the string ``'1.21'`` enables 1.21 legacy printing mode. This
220
+ approximates numpy 1.21 print output of complex structured dtypes
221
+ by not inserting spaces after commas that separate fields and after
222
+ colons.
223
+
224
+ If set to ``'1.25'`` approximates printing of 1.25 which mainly means
225
+ that numeric scalars are printed without their type information, e.g.
226
+ as ``3.0`` rather than ``np.float64(3.0)``.
227
+
228
+ If set to ``'2.1'``, shape information is not given when arrays are
229
+ summarized (i.e., multiple elements replaced with ``...``).
230
+
231
+ If set to ``'2.2'``, the transition to use scientific notation for
232
+ printing ``np.float16`` and ``np.float32`` types may happen later or
233
+ not at all for larger values.
234
+
235
+ If set to `False`, disables legacy mode.
236
+
237
+ Unrecognized strings will be ignored with a warning for forward
238
+ compatibility.
239
+
240
+ .. versionchanged:: 1.22.0
241
+ .. versionchanged:: 2.2
242
+
243
+ override_repr: callable, optional
244
+ If set a passed function will be used for generating arrays' repr.
245
+ Other options will be ignored.
246
+
247
+ See Also
248
+ --------
249
+ get_printoptions, printoptions, array2string
250
+
251
+ Notes
252
+ -----
253
+ `formatter` is always reset with a call to `set_printoptions`.
254
+
255
+ Use `printoptions` as a context manager to set the values temporarily.
256
+
257
+ Examples
258
+ --------
259
+ Floating point precision can be set:
260
+
261
+ >>> import numpy as np
262
+ >>> np.set_printoptions(precision=4)
263
+ >>> np.array([1.123456789])
264
+ [1.1235]
265
+
266
+ Long arrays can be summarised:
267
+
268
+ >>> np.set_printoptions(threshold=5)
269
+ >>> np.arange(10)
270
+ array([0, 1, 2, ..., 7, 8, 9], shape=(10,))
271
+
272
+ Small results can be suppressed:
273
+
274
+ >>> eps = np.finfo(float).eps
275
+ >>> x = np.arange(4.)
276
+ >>> x**2 - (x + eps)**2
277
+ array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
278
+ >>> np.set_printoptions(suppress=True)
279
+ >>> x**2 - (x + eps)**2
280
+ array([-0., -0., 0., 0.])
281
+
282
+ A custom formatter can be used to display array elements as desired:
283
+
284
+ >>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
285
+ >>> x = np.arange(3)
286
+ >>> x
287
+ array([int: 0, int: -1, int: -2])
288
+ >>> np.set_printoptions() # formatter gets reset
289
+ >>> x
290
+ array([0, 1, 2])
291
+
292
+ To put back the default options, you can use:
293
+
294
+ >>> np.set_printoptions(edgeitems=3, infstr='inf',
295
+ ... linewidth=75, nanstr='nan', precision=8,
296
+ ... suppress=False, threshold=1000, formatter=None)
297
+
298
+ Also to temporarily override options, use `printoptions`
299
+ as a context manager:
300
+
301
+ >>> with np.printoptions(precision=2, suppress=True, threshold=5):
302
+ ... np.linspace(0, 10, 10)
303
+ array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ], shape=(10,))
304
+
305
+ """
306
+ _set_printoptions(precision, threshold, edgeitems, linewidth, suppress,
307
+ nanstr, infstr, formatter, sign, floatmode,
308
+ legacy=legacy, override_repr=override_repr)
309
+
310
+
311
+ def _set_printoptions(precision=None, threshold=None, edgeitems=None,
312
+ linewidth=None, suppress=None, nanstr=None,
313
+ infstr=None, formatter=None, sign=None, floatmode=None,
314
+ *, legacy=None, override_repr=None):
315
+ new_opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
316
+ suppress, nanstr, infstr, sign, formatter,
317
+ floatmode, legacy)
318
+ # formatter and override_repr are always reset
319
+ new_opt['formatter'] = formatter
320
+ new_opt['override_repr'] = override_repr
321
+
322
+ updated_opt = format_options.get() | new_opt
323
+ updated_opt.update(new_opt)
324
+
325
+ if updated_opt['legacy'] == 113:
326
+ updated_opt['sign'] = '-'
327
+
328
+ return format_options.set(updated_opt)
329
+
330
+
331
+ @set_module('numpy')
332
+ def get_printoptions():
333
+ """
334
+ Return the current print options.
335
+
336
+ Returns
337
+ -------
338
+ print_opts : dict
339
+ Dictionary of current print options with keys
340
+
341
+ - precision : int
342
+ - threshold : int
343
+ - edgeitems : int
344
+ - linewidth : int
345
+ - suppress : bool
346
+ - nanstr : str
347
+ - infstr : str
348
+ - sign : str
349
+ - formatter : dict of callables
350
+ - floatmode : str
351
+ - legacy : str or False
352
+
353
+ For a full description of these options, see `set_printoptions`.
354
+
355
+ See Also
356
+ --------
357
+ set_printoptions, printoptions
358
+
359
+ Examples
360
+ --------
361
+ >>> import numpy as np
362
+
363
+ >>> np.get_printoptions()
364
+ {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None}
365
+
366
+ >>> np.get_printoptions()['linewidth']
367
+ 75
368
+ >>> np.set_printoptions(linewidth=100)
369
+ >>> np.get_printoptions()['linewidth']
370
+ 100
371
+
372
+ """
373
+ opts = format_options.get().copy()
374
+ opts['legacy'] = {
375
+ 113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1',
376
+ 202: '2.2', sys.maxsize: False,
377
+ }[opts['legacy']]
378
+ return opts
379
+
380
+
381
+ def _get_legacy_print_mode():
382
+ """Return the legacy print mode as an int."""
383
+ return format_options.get()['legacy']
384
+
385
+
386
+ @set_module('numpy')
387
+ @contextlib.contextmanager
388
+ def printoptions(*args, **kwargs):
389
+ """Context manager for setting print options.
390
+
391
+ Set print options for the scope of the `with` block, and restore the old
392
+ options at the end. See `set_printoptions` for the full description of
393
+ available options.
394
+
395
+ Examples
396
+ --------
397
+ >>> import numpy as np
398
+
399
+ >>> from numpy.testing import assert_equal
400
+ >>> with np.printoptions(precision=2):
401
+ ... np.array([2.0]) / 3
402
+ array([0.67])
403
+
404
+ The `as`-clause of the `with`-statement gives the current print options:
405
+
406
+ >>> with np.printoptions(precision=2) as opts:
407
+ ... assert_equal(opts, np.get_printoptions())
408
+
409
+ See Also
410
+ --------
411
+ set_printoptions, get_printoptions
412
+
413
+ """
414
+ token = _set_printoptions(*args, **kwargs)
415
+
416
+ try:
417
+ yield get_printoptions()
418
+ finally:
419
+ format_options.reset(token)
420
+
421
+
422
+ def _leading_trailing(a, edgeitems, index=()):
423
+ """
424
+ Keep only the N-D corners (leading and trailing edges) of an array.
425
+
426
+ Should be passed a base-class ndarray, since it makes no guarantees about
427
+ preserving subclasses.
428
+ """
429
+ axis = len(index)
430
+ if axis == a.ndim:
431
+ return a[index]
432
+
433
+ if a.shape[axis] > 2 * edgeitems:
434
+ return concatenate((
435
+ _leading_trailing(a, edgeitems, index + np.index_exp[:edgeitems]),
436
+ _leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
437
+ ), axis=axis)
438
+ else:
439
+ return _leading_trailing(a, edgeitems, index + np.index_exp[:])
440
+
441
+
442
+ def _object_format(o):
443
+ """ Object arrays containing lists should be printed unambiguously """
444
+ if type(o) is list:
445
+ fmt = 'list({!r})'
446
+ else:
447
+ fmt = '{!r}'
448
+ return fmt.format(o)
449
+
450
+ def repr_format(x):
451
+ if isinstance(x, (np.str_, np.bytes_)):
452
+ return repr(x.item())
453
+ return repr(x)
454
+
455
+ def str_format(x):
456
+ if isinstance(x, (np.str_, np.bytes_)):
457
+ return str(x.item())
458
+ return str(x)
459
+
460
+ def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
461
+ formatter, **kwargs):
462
+ # note: extra arguments in kwargs are ignored
463
+
464
+ # wrapped in lambdas to avoid taking a code path
465
+ # with the wrong type of data
466
+ formatdict = {
467
+ 'bool': lambda: BoolFormat(data),
468
+ 'int': lambda: IntegerFormat(data, sign),
469
+ 'float': lambda: FloatingFormat(
470
+ data, precision, floatmode, suppress, sign, legacy=legacy),
471
+ 'longfloat': lambda: FloatingFormat(
472
+ data, precision, floatmode, suppress, sign, legacy=legacy),
473
+ 'complexfloat': lambda: ComplexFloatingFormat(
474
+ data, precision, floatmode, suppress, sign, legacy=legacy),
475
+ 'longcomplexfloat': lambda: ComplexFloatingFormat(
476
+ data, precision, floatmode, suppress, sign, legacy=legacy),
477
+ 'datetime': lambda: DatetimeFormat(data, legacy=legacy),
478
+ 'timedelta': lambda: TimedeltaFormat(data),
479
+ 'object': lambda: _object_format,
480
+ 'void': lambda: str_format,
481
+ 'numpystr': lambda: repr_format}
482
+
483
+ # we need to wrap values in `formatter` in a lambda, so that the interface
484
+ # is the same as the above values.
485
+ def indirect(x):
486
+ return lambda: x
487
+
488
+ if formatter is not None:
489
+ fkeys = [k for k in formatter.keys() if formatter[k] is not None]
490
+ if 'all' in fkeys:
491
+ for key in formatdict.keys():
492
+ formatdict[key] = indirect(formatter['all'])
493
+ if 'int_kind' in fkeys:
494
+ for key in ['int']:
495
+ formatdict[key] = indirect(formatter['int_kind'])
496
+ if 'float_kind' in fkeys:
497
+ for key in ['float', 'longfloat']:
498
+ formatdict[key] = indirect(formatter['float_kind'])
499
+ if 'complex_kind' in fkeys:
500
+ for key in ['complexfloat', 'longcomplexfloat']:
501
+ formatdict[key] = indirect(formatter['complex_kind'])
502
+ if 'str_kind' in fkeys:
503
+ formatdict['numpystr'] = indirect(formatter['str_kind'])
504
+ for key in formatdict.keys():
505
+ if key in fkeys:
506
+ formatdict[key] = indirect(formatter[key])
507
+
508
+ return formatdict
509
+
510
+ def _get_format_function(data, **options):
511
+ """
512
+ find the right formatting function for the dtype_
513
+ """
514
+ dtype_ = data.dtype
515
+ dtypeobj = dtype_.type
516
+ formatdict = _get_formatdict(data, **options)
517
+ if dtypeobj is None:
518
+ return formatdict["numpystr"]()
519
+ elif issubclass(dtypeobj, _nt.bool):
520
+ return formatdict['bool']()
521
+ elif issubclass(dtypeobj, _nt.integer):
522
+ if issubclass(dtypeobj, _nt.timedelta64):
523
+ return formatdict['timedelta']()
524
+ else:
525
+ return formatdict['int']()
526
+ elif issubclass(dtypeobj, _nt.floating):
527
+ if issubclass(dtypeobj, _nt.longdouble):
528
+ return formatdict['longfloat']()
529
+ else:
530
+ return formatdict['float']()
531
+ elif issubclass(dtypeobj, _nt.complexfloating):
532
+ if issubclass(dtypeobj, _nt.clongdouble):
533
+ return formatdict['longcomplexfloat']()
534
+ else:
535
+ return formatdict['complexfloat']()
536
+ elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):
537
+ return formatdict['numpystr']()
538
+ elif issubclass(dtypeobj, _nt.datetime64):
539
+ return formatdict['datetime']()
540
+ elif issubclass(dtypeobj, _nt.object_):
541
+ return formatdict['object']()
542
+ elif issubclass(dtypeobj, _nt.void):
543
+ if dtype_.names is not None:
544
+ return StructuredVoidFormat.from_data(data, **options)
545
+ else:
546
+ return formatdict['void']()
547
+ else:
548
+ return formatdict['numpystr']()
549
+
550
+
551
+ def _recursive_guard(fillvalue='...'):
552
+ """
553
+ Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
554
+
555
+ Decorates a function such that if it calls itself with the same first
556
+ argument, it returns `fillvalue` instead of recursing.
557
+
558
+ Largely copied from reprlib.recursive_repr
559
+ """
560
+
561
+ def decorating_function(f):
562
+ repr_running = set()
563
+
564
+ @functools.wraps(f)
565
+ def wrapper(self, *args, **kwargs):
566
+ key = id(self), get_ident()
567
+ if key in repr_running:
568
+ return fillvalue
569
+ repr_running.add(key)
570
+ try:
571
+ return f(self, *args, **kwargs)
572
+ finally:
573
+ repr_running.discard(key)
574
+
575
+ return wrapper
576
+
577
+ return decorating_function
578
+
579
+
580
+ # gracefully handle recursive calls, when object arrays contain themselves
581
+ @_recursive_guard()
582
+ def _array2string(a, options, separator=' ', prefix=""):
583
+ # The formatter __init__s in _get_format_function cannot deal with
584
+ # subclasses yet, and we also need to avoid recursion issues in
585
+ # _formatArray with subclasses which return 0d arrays in place of scalars
586
+ data = asarray(a)
587
+ if a.shape == ():
588
+ a = data
589
+
590
+ if a.size > options['threshold']:
591
+ summary_insert = "..."
592
+ data = _leading_trailing(data, options['edgeitems'])
593
+ else:
594
+ summary_insert = ""
595
+
596
+ # find the right formatting function for the array
597
+ format_function = _get_format_function(data, **options)
598
+
599
+ # skip over "["
600
+ next_line_prefix = " "
601
+ # skip over array(
602
+ next_line_prefix += " " * len(prefix)
603
+
604
+ lst = _formatArray(a, format_function, options['linewidth'],
605
+ next_line_prefix, separator, options['edgeitems'],
606
+ summary_insert, options['legacy'])
607
+ return lst
608
+
609
+
610
+ def _array2string_dispatcher(
611
+ a, max_line_width=None, precision=None,
612
+ suppress_small=None, separator=None, prefix=None,
613
+ style=None, formatter=None, threshold=None,
614
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
615
+ *, legacy=None):
616
+ return (a,)
617
+
618
+
619
+ @array_function_dispatch(_array2string_dispatcher, module='numpy')
620
+ def array2string(a, max_line_width=None, precision=None,
621
+ suppress_small=None, separator=' ', prefix="",
622
+ style=np._NoValue, formatter=None, threshold=None,
623
+ edgeitems=None, sign=None, floatmode=None, suffix="",
624
+ *, legacy=None):
625
+ """
626
+ Return a string representation of an array.
627
+
628
+ Parameters
629
+ ----------
630
+ a : ndarray
631
+ Input array.
632
+ max_line_width : int, optional
633
+ Inserts newlines if text is longer than `max_line_width`.
634
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
635
+ precision : int or None, optional
636
+ Floating point precision.
637
+ Defaults to ``numpy.get_printoptions()['precision']``.
638
+ suppress_small : bool, optional
639
+ Represent numbers "very close" to zero as zero; default is False.
640
+ Very close is defined by precision: if the precision is 8, e.g.,
641
+ numbers smaller (in absolute value) than 5e-9 are represented as
642
+ zero.
643
+ Defaults to ``numpy.get_printoptions()['suppress']``.
644
+ separator : str, optional
645
+ Inserted between elements.
646
+ prefix : str, optional
647
+ suffix : str, optional
648
+ The length of the prefix and suffix strings are used to respectively
649
+ align and wrap the output. An array is typically printed as::
650
+
651
+ prefix + array2string(a) + suffix
652
+
653
+ The output is left-padded by the length of the prefix string, and
654
+ wrapping is forced at the column ``max_line_width - len(suffix)``.
655
+ It should be noted that the content of prefix and suffix strings are
656
+ not included in the output.
657
+ style : _NoValue, optional
658
+ Has no effect, do not use.
659
+
660
+ .. deprecated:: 1.14.0
661
+ formatter : dict of callables, optional
662
+ If not None, the keys should indicate the type(s) that the respective
663
+ formatting function applies to. Callables should return a string.
664
+ Types that are not specified (by their corresponding keys) are handled
665
+ by the default formatters. Individual types for which a formatter
666
+ can be set are:
667
+
668
+ - 'bool'
669
+ - 'int'
670
+ - 'timedelta' : a `numpy.timedelta64`
671
+ - 'datetime' : a `numpy.datetime64`
672
+ - 'float'
673
+ - 'longfloat' : 128-bit floats
674
+ - 'complexfloat'
675
+ - 'longcomplexfloat' : composed of two 128-bit floats
676
+ - 'void' : type `numpy.void`
677
+ - 'numpystr' : types `numpy.bytes_` and `numpy.str_`
678
+
679
+ Other keys that can be used to set a group of types at once are:
680
+
681
+ - 'all' : sets all types
682
+ - 'int_kind' : sets 'int'
683
+ - 'float_kind' : sets 'float' and 'longfloat'
684
+ - 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
685
+ - 'str_kind' : sets 'numpystr'
686
+ threshold : int, optional
687
+ Total number of array elements which trigger summarization
688
+ rather than full repr.
689
+ Defaults to ``numpy.get_printoptions()['threshold']``.
690
+ edgeitems : int, optional
691
+ Number of array items in summary at beginning and end of
692
+ each dimension.
693
+ Defaults to ``numpy.get_printoptions()['edgeitems']``.
694
+ sign : string, either '-', '+', or ' ', optional
695
+ Controls printing of the sign of floating-point types. If '+', always
696
+ print the sign of positive values. If ' ', always prints a space
697
+ (whitespace character) in the sign position of positive values. If
698
+ '-', omit the sign character of positive values.
699
+ Defaults to ``numpy.get_printoptions()['sign']``.
700
+
701
+ .. versionchanged:: 2.0
702
+ The sign parameter can now be an integer type, previously
703
+ types were floating-point types.
704
+
705
+ floatmode : str, optional
706
+ Controls the interpretation of the `precision` option for
707
+ floating-point types.
708
+ Defaults to ``numpy.get_printoptions()['floatmode']``.
709
+ Can take the following values:
710
+
711
+ - 'fixed': Always print exactly `precision` fractional digits,
712
+ even if this would print more or fewer digits than
713
+ necessary to specify the value uniquely.
714
+ - 'unique': Print the minimum number of fractional digits necessary
715
+ to represent each value uniquely. Different elements may
716
+ have a different number of digits. The value of the
717
+ `precision` option is ignored.
718
+ - 'maxprec': Print at most `precision` fractional digits, but if
719
+ an element can be uniquely represented with fewer digits
720
+ only print it with that many.
721
+ - 'maxprec_equal': Print at most `precision` fractional digits,
722
+ but if every element in the array can be uniquely
723
+ represented with an equal number of fewer digits, use that
724
+ many digits for all elements.
725
+ legacy : string or `False`, optional
726
+ If set to the string ``'1.13'`` enables 1.13 legacy printing mode. This
727
+ approximates numpy 1.13 print output by including a space in the sign
728
+ position of floats and different behavior for 0d arrays. If set to
729
+ `False`, disables legacy mode. Unrecognized strings will be ignored
730
+ with a warning for forward compatibility.
731
+
732
+ Returns
733
+ -------
734
+ array_str : str
735
+ String representation of the array.
736
+
737
+ Raises
738
+ ------
739
+ TypeError
740
+ if a callable in `formatter` does not return a string.
741
+
742
+ See Also
743
+ --------
744
+ array_str, array_repr, set_printoptions, get_printoptions
745
+
746
+ Notes
747
+ -----
748
+ If a formatter is specified for a certain type, the `precision` keyword is
749
+ ignored for that type.
750
+
751
+ This is a very flexible function; `array_repr` and `array_str` are using
752
+ `array2string` internally so keywords with the same name should work
753
+ identically in all three functions.
754
+
755
+ Examples
756
+ --------
757
+ >>> import numpy as np
758
+ >>> x = np.array([1e-16,1,2,3])
759
+ >>> np.array2string(x, precision=2, separator=',',
760
+ ... suppress_small=True)
761
+ '[0.,1.,2.,3.]'
762
+
763
+ >>> x = np.arange(3.)
764
+ >>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
765
+ '[0.00 1.00 2.00]'
766
+
767
+ >>> x = np.arange(3)
768
+ >>> np.array2string(x, formatter={'int':lambda x: hex(x)})
769
+ '[0x0 0x1 0x2]'
770
+
771
+ """
772
+
773
+ overrides = _make_options_dict(precision, threshold, edgeitems,
774
+ max_line_width, suppress_small, None, None,
775
+ sign, formatter, floatmode, legacy)
776
+ options = format_options.get().copy()
777
+ options.update(overrides)
778
+
779
+ if options['legacy'] <= 113:
780
+ if style is np._NoValue:
781
+ style = repr
782
+
783
+ if a.shape == () and a.dtype.names is None:
784
+ return style(a.item())
785
+ elif style is not np._NoValue:
786
+ # Deprecation 11-9-2017 v1.14
787
+ warnings.warn("'style' argument is deprecated and no longer functional"
788
+ " except in 1.13 'legacy' mode",
789
+ DeprecationWarning, stacklevel=2)
790
+
791
+ if options['legacy'] > 113:
792
+ options['linewidth'] -= len(suffix)
793
+
794
+ # treat as a null array if any of shape elements == 0
795
+ if a.size == 0:
796
+ return "[]"
797
+
798
+ return _array2string(a, options, separator, prefix)
799
+
800
+
801
+ def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
802
+ needs_wrap = len(line) + len(word) > line_width
803
+ if legacy > 113:
804
+ # don't wrap lines if it won't help
805
+ if len(line) <= len(next_line_prefix):
806
+ needs_wrap = False
807
+
808
+ if needs_wrap:
809
+ s += line.rstrip() + "\n"
810
+ line = next_line_prefix
811
+ line += word
812
+ return s, line
813
+
814
+
815
+ def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
816
+ """
817
+ Extends line with nicely formatted (possibly multi-line) string ``word``.
818
+ """
819
+ words = word.splitlines()
820
+ if len(words) == 1 or legacy <= 113:
821
+ return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
822
+
823
+ max_word_length = max(len(word) for word in words)
824
+ if (len(line) + max_word_length > line_width and
825
+ len(line) > len(next_line_prefix)):
826
+ s += line.rstrip() + '\n'
827
+ line = next_line_prefix + words[0]
828
+ indent = next_line_prefix
829
+ else:
830
+ indent = len(line) * ' '
831
+ line += words[0]
832
+
833
+ for word in words[1::]:
834
+ s += line.rstrip() + '\n'
835
+ line = indent + word
836
+
837
+ suffix_length = max_word_length - len(words[-1])
838
+ line += suffix_length * ' '
839
+
840
+ return s, line
841
+
842
+ def _formatArray(a, format_function, line_width, next_line_prefix,
843
+ separator, edge_items, summary_insert, legacy):
844
+ """formatArray is designed for two modes of operation:
845
+
846
+ 1. Full output
847
+
848
+ 2. Summarized output
849
+
850
+ """
851
+ def recurser(index, hanging_indent, curr_width):
852
+ """
853
+ By using this local function, we don't need to recurse with all the
854
+ arguments. Since this function is not created recursively, the cost is
855
+ not significant
856
+ """
857
+ axis = len(index)
858
+ axes_left = a.ndim - axis
859
+
860
+ if axes_left == 0:
861
+ return format_function(a[index])
862
+
863
+ # when recursing, add a space to align with the [ added, and reduce the
864
+ # length of the line by 1
865
+ next_hanging_indent = hanging_indent + ' '
866
+ if legacy <= 113:
867
+ next_width = curr_width
868
+ else:
869
+ next_width = curr_width - len(']')
870
+
871
+ a_len = a.shape[axis]
872
+ show_summary = summary_insert and 2 * edge_items < a_len
873
+ if show_summary:
874
+ leading_items = edge_items
875
+ trailing_items = edge_items
876
+ else:
877
+ leading_items = 0
878
+ trailing_items = a_len
879
+
880
+ # stringify the array with the hanging indent on the first line too
881
+ s = ''
882
+
883
+ # last axis (rows) - wrap elements if they would not fit on one line
884
+ if axes_left == 1:
885
+ # the length up until the beginning of the separator / bracket
886
+ if legacy <= 113:
887
+ elem_width = curr_width - len(separator.rstrip())
888
+ else:
889
+ elem_width = curr_width - max(
890
+ len(separator.rstrip()), len(']')
891
+ )
892
+
893
+ line = hanging_indent
894
+ for i in range(leading_items):
895
+ word = recurser(index + (i,), next_hanging_indent, next_width)
896
+ s, line = _extendLine_pretty(
897
+ s, line, word, elem_width, hanging_indent, legacy)
898
+ line += separator
899
+
900
+ if show_summary:
901
+ s, line = _extendLine(
902
+ s, line, summary_insert, elem_width, hanging_indent, legacy
903
+ )
904
+ if legacy <= 113:
905
+ line += ", "
906
+ else:
907
+ line += separator
908
+
909
+ for i in range(trailing_items, 1, -1):
910
+ word = recurser(index + (-i,), next_hanging_indent, next_width)
911
+ s, line = _extendLine_pretty(
912
+ s, line, word, elem_width, hanging_indent, legacy)
913
+ line += separator
914
+
915
+ if legacy <= 113:
916
+ # width of the separator is not considered on 1.13
917
+ elem_width = curr_width
918
+ word = recurser(index + (-1,), next_hanging_indent, next_width)
919
+ s, line = _extendLine_pretty(
920
+ s, line, word, elem_width, hanging_indent, legacy)
921
+
922
+ s += line
923
+
924
+ # other axes - insert newlines between rows
925
+ else:
926
+ s = ''
927
+ line_sep = separator.rstrip() + '\n' * (axes_left - 1)
928
+
929
+ for i in range(leading_items):
930
+ nested = recurser(
931
+ index + (i,), next_hanging_indent, next_width
932
+ )
933
+ s += hanging_indent + nested + line_sep
934
+
935
+ if show_summary:
936
+ if legacy <= 113:
937
+ # trailing space, fixed nbr of newlines,
938
+ # and fixed separator
939
+ s += hanging_indent + summary_insert + ", \n"
940
+ else:
941
+ s += hanging_indent + summary_insert + line_sep
942
+
943
+ for i in range(trailing_items, 1, -1):
944
+ nested = recurser(index + (-i,), next_hanging_indent,
945
+ next_width)
946
+ s += hanging_indent + nested + line_sep
947
+
948
+ nested = recurser(index + (-1,), next_hanging_indent, next_width)
949
+ s += hanging_indent + nested
950
+
951
+ # remove the hanging indent, and wrap in []
952
+ s = '[' + s[len(hanging_indent):] + ']'
953
+ return s
954
+
955
+ try:
956
+ # invoke the recursive part with an initial index and prefix
957
+ return recurser(index=(),
958
+ hanging_indent=next_line_prefix,
959
+ curr_width=line_width)
960
+ finally:
961
+ # recursive closures have a cyclic reference to themselves, which
962
+ # requires gc to collect (gh-10620). To avoid this problem, for
963
+ # performance and PyPy friendliness, we break the cycle:
964
+ recurser = None
965
+
966
+ def _none_or_positive_arg(x, name):
967
+ if x is None:
968
+ return -1
969
+ if x < 0:
970
+ raise ValueError(f"{name} must be >= 0")
971
+ return x
972
+
973
+ class FloatingFormat:
974
+ """ Formatter for subtypes of np.floating """
975
+ def __init__(self, data, precision, floatmode, suppress_small, sign=False,
976
+ *, legacy=None):
977
+ # for backcompatibility, accept bools
978
+ if isinstance(sign, bool):
979
+ sign = '+' if sign else '-'
980
+
981
+ self._legacy = legacy
982
+ if self._legacy <= 113:
983
+ # when not 0d, legacy does not support '-'
984
+ if data.shape != () and sign == '-':
985
+ sign = ' '
986
+
987
+ self.floatmode = floatmode
988
+ if floatmode == 'unique':
989
+ self.precision = None
990
+ else:
991
+ self.precision = precision
992
+
993
+ self.precision = _none_or_positive_arg(self.precision, 'precision')
994
+
995
+ self.suppress_small = suppress_small
996
+ self.sign = sign
997
+ self.exp_format = False
998
+ self.large_exponent = False
999
+ self.fillFormat(data)
1000
+
1001
+ def fillFormat(self, data):
1002
+ # only the finite values are used to compute the number of digits
1003
+ finite_vals = data[isfinite(data)]
1004
+
1005
+ # choose exponential mode based on the non-zero finite values:
1006
+ abs_non_zero = absolute(finite_vals[finite_vals != 0])
1007
+ if len(abs_non_zero) != 0:
1008
+ max_val = np.max(abs_non_zero)
1009
+ min_val = np.min(abs_non_zero)
1010
+ if self._legacy <= 202:
1011
+ exp_cutoff_max = 1.e8
1012
+ else:
1013
+ # consider data type while deciding the max cutoff for exp format
1014
+ exp_cutoff_max = 10.**min(8, np.finfo(data.dtype).precision)
1015
+ with errstate(over='ignore'): # division can overflow
1016
+ if max_val >= exp_cutoff_max or (not self.suppress_small and
1017
+ (min_val < 0.0001 or max_val / min_val > 1000.)):
1018
+ self.exp_format = True
1019
+
1020
+ # do a first pass of printing all the numbers, to determine sizes
1021
+ if len(finite_vals) == 0:
1022
+ self.pad_left = 0
1023
+ self.pad_right = 0
1024
+ self.trim = '.'
1025
+ self.exp_size = -1
1026
+ self.unique = True
1027
+ self.min_digits = None
1028
+ elif self.exp_format:
1029
+ trim, unique = '.', True
1030
+ if self.floatmode == 'fixed' or self._legacy <= 113:
1031
+ trim, unique = 'k', False
1032
+ strs = (dragon4_scientific(x, precision=self.precision,
1033
+ unique=unique, trim=trim, sign=self.sign == '+')
1034
+ for x in finite_vals)
1035
+ frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
1036
+ int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
1037
+ self.exp_size = max(len(s) for s in exp_strs) - 1
1038
+
1039
+ self.trim = 'k'
1040
+ self.precision = max(len(s) for s in frac_part)
1041
+ self.min_digits = self.precision
1042
+ self.unique = unique
1043
+
1044
+ # for back-compat with np 1.13, use 2 spaces & sign and full prec
1045
+ if self._legacy <= 113:
1046
+ self.pad_left = 3
1047
+ else:
1048
+ # this should be only 1 or 2. Can be calculated from sign.
1049
+ self.pad_left = max(len(s) for s in int_part)
1050
+ # pad_right is only needed for nan length calculation
1051
+ self.pad_right = self.exp_size + 2 + self.precision
1052
+ else:
1053
+ trim, unique = '.', True
1054
+ if self.floatmode == 'fixed':
1055
+ trim, unique = 'k', False
1056
+ strs = (dragon4_positional(x, precision=self.precision,
1057
+ fractional=True,
1058
+ unique=unique, trim=trim,
1059
+ sign=self.sign == '+')
1060
+ for x in finite_vals)
1061
+ int_part, frac_part = zip(*(s.split('.') for s in strs))
1062
+ if self._legacy <= 113:
1063
+ self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
1064
+ else:
1065
+ self.pad_left = max(len(s) for s in int_part)
1066
+ self.pad_right = max(len(s) for s in frac_part)
1067
+ self.exp_size = -1
1068
+ self.unique = unique
1069
+
1070
+ if self.floatmode in ['fixed', 'maxprec_equal']:
1071
+ self.precision = self.min_digits = self.pad_right
1072
+ self.trim = 'k'
1073
+ else:
1074
+ self.trim = '.'
1075
+ self.min_digits = 0
1076
+
1077
+ if self._legacy > 113:
1078
+ # account for sign = ' ' by adding one to pad_left
1079
+ if self.sign == ' ' and not any(np.signbit(finite_vals)):
1080
+ self.pad_left += 1
1081
+
1082
+ # if there are non-finite values, may need to increase pad_left
1083
+ if data.size != finite_vals.size:
1084
+ neginf = self.sign != '-' or any(data[isinf(data)] < 0)
1085
+ offset = self.pad_right + 1 # +1 for decimal pt
1086
+ current_options = format_options.get()
1087
+ self.pad_left = max(
1088
+ self.pad_left, len(current_options['nanstr']) - offset,
1089
+ len(current_options['infstr']) + neginf - offset
1090
+ )
1091
+
1092
+ def __call__(self, x):
1093
+ if not np.isfinite(x):
1094
+ with errstate(invalid='ignore'):
1095
+ current_options = format_options.get()
1096
+ if np.isnan(x):
1097
+ sign = '+' if self.sign == '+' else ''
1098
+ ret = sign + current_options['nanstr']
1099
+ else: # isinf
1100
+ sign = '-' if x < 0 else '+' if self.sign == '+' else ''
1101
+ ret = sign + current_options['infstr']
1102
+ return ' ' * (
1103
+ self.pad_left + self.pad_right + 1 - len(ret)
1104
+ ) + ret
1105
+
1106
+ if self.exp_format:
1107
+ return dragon4_scientific(x,
1108
+ precision=self.precision,
1109
+ min_digits=self.min_digits,
1110
+ unique=self.unique,
1111
+ trim=self.trim,
1112
+ sign=self.sign == '+',
1113
+ pad_left=self.pad_left,
1114
+ exp_digits=self.exp_size)
1115
+ else:
1116
+ return dragon4_positional(x,
1117
+ precision=self.precision,
1118
+ min_digits=self.min_digits,
1119
+ unique=self.unique,
1120
+ fractional=True,
1121
+ trim=self.trim,
1122
+ sign=self.sign == '+',
1123
+ pad_left=self.pad_left,
1124
+ pad_right=self.pad_right)
1125
+
1126
+
1127
+ @set_module('numpy')
1128
+ def format_float_scientific(x, precision=None, unique=True, trim='k',
1129
+ sign=False, pad_left=None, exp_digits=None,
1130
+ min_digits=None):
1131
+ """
1132
+ Format a floating-point scalar as a decimal string in scientific notation.
1133
+
1134
+ Provides control over rounding, trimming and padding. Uses and assumes
1135
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
1136
+
1137
+ Parameters
1138
+ ----------
1139
+ x : python float or numpy floating scalar
1140
+ Value to format.
1141
+ precision : non-negative integer or None, optional
1142
+ Maximum number of digits to print. May be None if `unique` is
1143
+ `True`, but must be an integer if unique is `False`.
1144
+ unique : boolean, optional
1145
+ If `True`, use a digit-generation strategy which gives the shortest
1146
+ representation which uniquely identifies the floating-point number from
1147
+ other values of the same type, by judicious rounding. If `precision`
1148
+ is given fewer digits than necessary can be printed. If `min_digits`
1149
+ is given more can be printed, in which cases the last digit is rounded
1150
+ with unbiased rounding.
1151
+ If `False`, digits are generated as if printing an infinite-precision
1152
+ value and stopping after `precision` digits, rounding the remaining
1153
+ value with unbiased rounding
1154
+ trim : one of 'k', '.', '0', '-', optional
1155
+ Controls post-processing trimming of trailing digits, as follows:
1156
+
1157
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
1158
+ * '.' : trim all trailing zeros, leave decimal point
1159
+ * '0' : trim all but the zero before the decimal point. Insert the
1160
+ zero if it is missing.
1161
+ * '-' : trim trailing zeros and any trailing decimal point
1162
+ sign : boolean, optional
1163
+ Whether to show the sign for positive values.
1164
+ pad_left : non-negative integer, optional
1165
+ Pad the left side of the string with whitespace until at least that
1166
+ many characters are to the left of the decimal point.
1167
+ exp_digits : non-negative integer, optional
1168
+ Pad the exponent with zeros until it contains at least this
1169
+ many digits. If omitted, the exponent will be at least 2 digits.
1170
+ min_digits : non-negative integer or None, optional
1171
+ Minimum number of digits to print. This only has an effect for
1172
+ `unique=True`. In that case more digits than necessary to uniquely
1173
+ identify the value may be printed and rounded unbiased.
1174
+
1175
+ .. versionadded:: 1.21.0
1176
+
1177
+ Returns
1178
+ -------
1179
+ rep : string
1180
+ The string representation of the floating point value
1181
+
1182
+ See Also
1183
+ --------
1184
+ format_float_positional
1185
+
1186
+ Examples
1187
+ --------
1188
+ >>> import numpy as np
1189
+ >>> np.format_float_scientific(np.float32(np.pi))
1190
+ '3.1415927e+00'
1191
+ >>> s = np.float32(1.23e24)
1192
+ >>> np.format_float_scientific(s, unique=False, precision=15)
1193
+ '1.230000071797338e+24'
1194
+ >>> np.format_float_scientific(s, exp_digits=4)
1195
+ '1.23e+0024'
1196
+ """
1197
+ precision = _none_or_positive_arg(precision, 'precision')
1198
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
1199
+ exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
1200
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
1201
+ if min_digits > 0 and precision > 0 and min_digits > precision:
1202
+ raise ValueError("min_digits must be less than or equal to precision")
1203
+ return dragon4_scientific(x, precision=precision, unique=unique,
1204
+ trim=trim, sign=sign, pad_left=pad_left,
1205
+ exp_digits=exp_digits, min_digits=min_digits)
1206
+
1207
+
1208
+ @set_module('numpy')
1209
+ def format_float_positional(x, precision=None, unique=True,
1210
+ fractional=True, trim='k', sign=False,
1211
+ pad_left=None, pad_right=None, min_digits=None):
1212
+ """
1213
+ Format a floating-point scalar as a decimal string in positional notation.
1214
+
1215
+ Provides control over rounding, trimming and padding. Uses and assumes
1216
+ IEEE unbiased rounding. Uses the "Dragon4" algorithm.
1217
+
1218
+ Parameters
1219
+ ----------
1220
+ x : python float or numpy floating scalar
1221
+ Value to format.
1222
+ precision : non-negative integer or None, optional
1223
+ Maximum number of digits to print. May be None if `unique` is
1224
+ `True`, but must be an integer if unique is `False`.
1225
+ unique : boolean, optional
1226
+ If `True`, use a digit-generation strategy which gives the shortest
1227
+ representation which uniquely identifies the floating-point number from
1228
+ other values of the same type, by judicious rounding. If `precision`
1229
+ is given fewer digits than necessary can be printed, or if `min_digits`
1230
+ is given more can be printed, in which cases the last digit is rounded
1231
+ with unbiased rounding.
1232
+ If `False`, digits are generated as if printing an infinite-precision
1233
+ value and stopping after `precision` digits, rounding the remaining
1234
+ value with unbiased rounding
1235
+ fractional : boolean, optional
1236
+ If `True`, the cutoffs of `precision` and `min_digits` refer to the
1237
+ total number of digits after the decimal point, including leading
1238
+ zeros.
1239
+ If `False`, `precision` and `min_digits` refer to the total number of
1240
+ significant digits, before or after the decimal point, ignoring leading
1241
+ zeros.
1242
+ trim : one of 'k', '.', '0', '-', optional
1243
+ Controls post-processing trimming of trailing digits, as follows:
1244
+
1245
+ * 'k' : keep trailing zeros, keep decimal point (no trimming)
1246
+ * '.' : trim all trailing zeros, leave decimal point
1247
+ * '0' : trim all but the zero before the decimal point. Insert the
1248
+ zero if it is missing.
1249
+ * '-' : trim trailing zeros and any trailing decimal point
1250
+ sign : boolean, optional
1251
+ Whether to show the sign for positive values.
1252
+ pad_left : non-negative integer, optional
1253
+ Pad the left side of the string with whitespace until at least that
1254
+ many characters are to the left of the decimal point.
1255
+ pad_right : non-negative integer, optional
1256
+ Pad the right side of the string with whitespace until at least that
1257
+ many characters are to the right of the decimal point.
1258
+ min_digits : non-negative integer or None, optional
1259
+ Minimum number of digits to print. Only has an effect if `unique=True`
1260
+ in which case additional digits past those necessary to uniquely
1261
+ identify the value may be printed, rounding the last additional digit.
1262
+
1263
+ .. versionadded:: 1.21.0
1264
+
1265
+ Returns
1266
+ -------
1267
+ rep : string
1268
+ The string representation of the floating point value
1269
+
1270
+ See Also
1271
+ --------
1272
+ format_float_scientific
1273
+
1274
+ Examples
1275
+ --------
1276
+ >>> import numpy as np
1277
+ >>> np.format_float_positional(np.float32(np.pi))
1278
+ '3.1415927'
1279
+ >>> np.format_float_positional(np.float16(np.pi))
1280
+ '3.14'
1281
+ >>> np.format_float_positional(np.float16(0.3))
1282
+ '0.3'
1283
+ >>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
1284
+ '0.3000488281'
1285
+ """
1286
+ precision = _none_or_positive_arg(precision, 'precision')
1287
+ pad_left = _none_or_positive_arg(pad_left, 'pad_left')
1288
+ pad_right = _none_or_positive_arg(pad_right, 'pad_right')
1289
+ min_digits = _none_or_positive_arg(min_digits, 'min_digits')
1290
+ if not fractional and precision == 0:
1291
+ raise ValueError("precision must be greater than 0 if "
1292
+ "fractional=False")
1293
+ if min_digits > 0 and precision > 0 and min_digits > precision:
1294
+ raise ValueError("min_digits must be less than or equal to precision")
1295
+ return dragon4_positional(x, precision=precision, unique=unique,
1296
+ fractional=fractional, trim=trim,
1297
+ sign=sign, pad_left=pad_left,
1298
+ pad_right=pad_right, min_digits=min_digits)
1299
+
1300
+ class IntegerFormat:
1301
+ def __init__(self, data, sign='-'):
1302
+ if data.size > 0:
1303
+ data_max = np.max(data)
1304
+ data_min = np.min(data)
1305
+ data_max_str_len = len(str(data_max))
1306
+ if sign == ' ' and data_min < 0:
1307
+ sign = '-'
1308
+ if data_max >= 0 and sign in "+ ":
1309
+ data_max_str_len += 1
1310
+ max_str_len = max(data_max_str_len,
1311
+ len(str(data_min)))
1312
+ else:
1313
+ max_str_len = 0
1314
+ self.format = f'{{:{sign}{max_str_len}d}}'
1315
+
1316
+ def __call__(self, x):
1317
+ return self.format.format(x)
1318
+
1319
+ class BoolFormat:
1320
+ def __init__(self, data, **kwargs):
1321
+ # add an extra space so " True" and "False" have the same length and
1322
+ # array elements align nicely when printed, except in 0d arrays
1323
+ self.truestr = ' True' if data.shape != () else 'True'
1324
+
1325
+ def __call__(self, x):
1326
+ return self.truestr if x else "False"
1327
+
1328
+
1329
+ class ComplexFloatingFormat:
1330
+ """ Formatter for subtypes of np.complexfloating """
1331
+ def __init__(self, x, precision, floatmode, suppress_small,
1332
+ sign=False, *, legacy=None):
1333
+ # for backcompatibility, accept bools
1334
+ if isinstance(sign, bool):
1335
+ sign = '+' if sign else '-'
1336
+
1337
+ floatmode_real = floatmode_imag = floatmode
1338
+ if legacy <= 113:
1339
+ floatmode_real = 'maxprec_equal'
1340
+ floatmode_imag = 'maxprec'
1341
+
1342
+ self.real_format = FloatingFormat(
1343
+ x.real, precision, floatmode_real, suppress_small,
1344
+ sign=sign, legacy=legacy
1345
+ )
1346
+ self.imag_format = FloatingFormat(
1347
+ x.imag, precision, floatmode_imag, suppress_small,
1348
+ sign='+', legacy=legacy
1349
+ )
1350
+
1351
+ def __call__(self, x):
1352
+ r = self.real_format(x.real)
1353
+ i = self.imag_format(x.imag)
1354
+
1355
+ # add the 'j' before the terminal whitespace in i
1356
+ sp = len(i.rstrip())
1357
+ i = i[:sp] + 'j' + i[sp:]
1358
+
1359
+ return r + i
1360
+
1361
+
1362
+ class _TimelikeFormat:
1363
+ def __init__(self, data):
1364
+ non_nat = data[~isnat(data)]
1365
+ if len(non_nat) > 0:
1366
+ # Max str length of non-NaT elements
1367
+ max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
1368
+ len(self._format_non_nat(np.min(non_nat))))
1369
+ else:
1370
+ max_str_len = 0
1371
+ if len(non_nat) < data.size:
1372
+ # data contains a NaT
1373
+ max_str_len = max(max_str_len, 5)
1374
+ self._format = f'%{max_str_len}s'
1375
+ self._nat = "'NaT'".rjust(max_str_len)
1376
+
1377
+ def _format_non_nat(self, x):
1378
+ # override in subclass
1379
+ raise NotImplementedError
1380
+
1381
+ def __call__(self, x):
1382
+ if isnat(x):
1383
+ return self._nat
1384
+ else:
1385
+ return self._format % self._format_non_nat(x)
1386
+
1387
+
1388
+ class DatetimeFormat(_TimelikeFormat):
1389
+ def __init__(self, x, unit=None, timezone=None, casting='same_kind',
1390
+ legacy=False):
1391
+ # Get the unit from the dtype
1392
+ if unit is None:
1393
+ if x.dtype.kind == 'M':
1394
+ unit = datetime_data(x.dtype)[0]
1395
+ else:
1396
+ unit = 's'
1397
+
1398
+ if timezone is None:
1399
+ timezone = 'naive'
1400
+ self.timezone = timezone
1401
+ self.unit = unit
1402
+ self.casting = casting
1403
+ self.legacy = legacy
1404
+
1405
+ # must be called after the above are configured
1406
+ super().__init__(x)
1407
+
1408
+ def __call__(self, x):
1409
+ if self.legacy <= 113:
1410
+ return self._format_non_nat(x)
1411
+ return super().__call__(x)
1412
+
1413
+ def _format_non_nat(self, x):
1414
+ return "'%s'" % datetime_as_string(x,
1415
+ unit=self.unit,
1416
+ timezone=self.timezone,
1417
+ casting=self.casting)
1418
+
1419
+
1420
+ class TimedeltaFormat(_TimelikeFormat):
1421
+ def _format_non_nat(self, x):
1422
+ return str(x.astype('i8'))
1423
+
1424
+
1425
+ class SubArrayFormat:
1426
+ def __init__(self, format_function, **options):
1427
+ self.format_function = format_function
1428
+ self.threshold = options['threshold']
1429
+ self.edge_items = options['edgeitems']
1430
+
1431
+ def __call__(self, a):
1432
+ self.summary_insert = "..." if a.size > self.threshold else ""
1433
+ return self.format_array(a)
1434
+
1435
+ def format_array(self, a):
1436
+ if np.ndim(a) == 0:
1437
+ return self.format_function(a)
1438
+
1439
+ if self.summary_insert and a.shape[0] > 2 * self.edge_items:
1440
+ formatted = (
1441
+ [self.format_array(a_) for a_ in a[:self.edge_items]]
1442
+ + [self.summary_insert]
1443
+ + [self.format_array(a_) for a_ in a[-self.edge_items:]]
1444
+ )
1445
+ else:
1446
+ formatted = [self.format_array(a_) for a_ in a]
1447
+
1448
+ return "[" + ", ".join(formatted) + "]"
1449
+
1450
+
1451
+ class StructuredVoidFormat:
1452
+ """
1453
+ Formatter for structured np.void objects.
1454
+
1455
+ This does not work on structured alias types like
1456
+ np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information,
1457
+ and the implementation relies upon np.void.__getitem__.
1458
+ """
1459
+ def __init__(self, format_functions):
1460
+ self.format_functions = format_functions
1461
+
1462
+ @classmethod
1463
+ def from_data(cls, data, **options):
1464
+ """
1465
+ This is a second way to initialize StructuredVoidFormat,
1466
+ using the raw data as input. Added to avoid changing
1467
+ the signature of __init__.
1468
+ """
1469
+ format_functions = []
1470
+ for field_name in data.dtype.names:
1471
+ format_function = _get_format_function(data[field_name], **options)
1472
+ if data.dtype[field_name].shape != ():
1473
+ format_function = SubArrayFormat(format_function, **options)
1474
+ format_functions.append(format_function)
1475
+ return cls(format_functions)
1476
+
1477
+ def __call__(self, x):
1478
+ str_fields = [
1479
+ format_function(field)
1480
+ for field, format_function in zip(x, self.format_functions)
1481
+ ]
1482
+ if len(str_fields) == 1:
1483
+ return f"({str_fields[0]},)"
1484
+ else:
1485
+ return f"({', '.join(str_fields)})"
1486
+
1487
+
1488
+ def _void_scalar_to_string(x, is_repr=True):
1489
+ """
1490
+ Implements the repr for structured-void scalars. It is called from the
1491
+ scalartypes.c.src code, and is placed here because it uses the elementwise
1492
+ formatters defined above.
1493
+ """
1494
+ options = format_options.get().copy()
1495
+
1496
+ if options["legacy"] <= 125:
1497
+ return StructuredVoidFormat.from_data(array(x), **options)(x)
1498
+
1499
+ if options.get('formatter') is None:
1500
+ options['formatter'] = {}
1501
+ options['formatter'].setdefault('float_kind', str)
1502
+ val_repr = StructuredVoidFormat.from_data(array(x), **options)(x)
1503
+ if not is_repr:
1504
+ return val_repr
1505
+ cls = type(x)
1506
+ cls_fqn = cls.__module__.replace("numpy", "np") + "." + cls.__name__
1507
+ void_dtype = np.dtype((np.void, x.dtype))
1508
+ return f"{cls_fqn}({val_repr}, dtype={void_dtype!s})"
1509
+
1510
+
1511
+ _typelessdata = [int_, float64, complex128, _nt.bool]
1512
+
1513
+
1514
+ def dtype_is_implied(dtype):
1515
+ """
1516
+ Determine if the given dtype is implied by the representation
1517
+ of its values.
1518
+
1519
+ Parameters
1520
+ ----------
1521
+ dtype : dtype
1522
+ Data type
1523
+
1524
+ Returns
1525
+ -------
1526
+ implied : bool
1527
+ True if the dtype is implied by the representation of its values.
1528
+
1529
+ Examples
1530
+ --------
1531
+ >>> import numpy as np
1532
+ >>> np._core.arrayprint.dtype_is_implied(int)
1533
+ True
1534
+ >>> np.array([1, 2, 3], int)
1535
+ array([1, 2, 3])
1536
+ >>> np._core.arrayprint.dtype_is_implied(np.int8)
1537
+ False
1538
+ >>> np.array([1, 2, 3], np.int8)
1539
+ array([1, 2, 3], dtype=int8)
1540
+ """
1541
+ dtype = np.dtype(dtype)
1542
+ if format_options.get()['legacy'] <= 113 and dtype.type == np.bool:
1543
+ return False
1544
+
1545
+ # not just void types can be structured, and names are not part of the repr
1546
+ if dtype.names is not None:
1547
+ return False
1548
+
1549
+ # should care about endianness *unless size is 1* (e.g., int8, bool)
1550
+ if not dtype.isnative:
1551
+ return False
1552
+
1553
+ return dtype.type in _typelessdata
1554
+
1555
+
1556
+ def dtype_short_repr(dtype):
1557
+ """
1558
+ Convert a dtype to a short form which evaluates to the same dtype.
1559
+
1560
+ The intent is roughly that the following holds
1561
+
1562
+ >>> from numpy import *
1563
+ >>> dt = np.int64([1, 2]).dtype
1564
+ >>> assert eval(dtype_short_repr(dt)) == dt
1565
+ """
1566
+ if type(dtype).__repr__ != np.dtype.__repr__:
1567
+ # TODO: Custom repr for user DTypes, logic should likely move.
1568
+ return repr(dtype)
1569
+ if dtype.names is not None:
1570
+ # structured dtypes give a list or tuple repr
1571
+ return str(dtype)
1572
+ elif issubclass(dtype.type, flexible):
1573
+ # handle these separately so they don't give garbage like str256
1574
+ return f"'{str(dtype)}'"
1575
+
1576
+ typename = dtype.name
1577
+ if not dtype.isnative:
1578
+ # deal with cases like dtype('<u2') that are identical to an
1579
+ # established dtype (in this case uint16)
1580
+ # except that they have a different endianness.
1581
+ return f"'{str(dtype)}'"
1582
+ # quote typenames which can't be represented as python variable names
1583
+ if typename and not (typename[0].isalpha() and typename.isalnum()):
1584
+ typename = repr(typename)
1585
+ return typename
1586
+
1587
+
1588
+ def _array_repr_implementation(
1589
+ arr, max_line_width=None, precision=None, suppress_small=None,
1590
+ array2string=array2string):
1591
+ """Internal version of array_repr() that allows overriding array2string."""
1592
+ current_options = format_options.get()
1593
+ override_repr = current_options["override_repr"]
1594
+ if override_repr is not None:
1595
+ return override_repr(arr)
1596
+
1597
+ if max_line_width is None:
1598
+ max_line_width = current_options['linewidth']
1599
+
1600
+ if type(arr) is not ndarray:
1601
+ class_name = type(arr).__name__
1602
+ else:
1603
+ class_name = "array"
1604
+
1605
+ prefix = class_name + "("
1606
+ if (current_options['legacy'] <= 113 and
1607
+ arr.shape == () and not arr.dtype.names):
1608
+ lst = repr(arr.item())
1609
+ else:
1610
+ lst = array2string(arr, max_line_width, precision, suppress_small,
1611
+ ', ', prefix, suffix=")")
1612
+
1613
+ # Add dtype and shape information if these cannot be inferred from
1614
+ # the array string.
1615
+ extras = []
1616
+ if ((arr.size == 0 and arr.shape != (0,))
1617
+ or (current_options['legacy'] > 210
1618
+ and arr.size > current_options['threshold'])):
1619
+ extras.append(f"shape={arr.shape}")
1620
+ if not dtype_is_implied(arr.dtype) or arr.size == 0:
1621
+ extras.append(f"dtype={dtype_short_repr(arr.dtype)}")
1622
+
1623
+ if not extras:
1624
+ return prefix + lst + ")"
1625
+
1626
+ arr_str = prefix + lst + ","
1627
+ extra_str = ", ".join(extras) + ")"
1628
+ # compute whether we should put extras on a new line: Do so if adding the
1629
+ # extras would extend the last line past max_line_width.
1630
+ # Note: This line gives the correct result even when rfind returns -1.
1631
+ last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
1632
+ spacer = " "
1633
+ if current_options['legacy'] <= 113:
1634
+ if issubclass(arr.dtype.type, flexible):
1635
+ spacer = '\n' + ' ' * len(prefix)
1636
+ elif last_line_len + len(extra_str) + 1 > max_line_width:
1637
+ spacer = '\n' + ' ' * len(prefix)
1638
+
1639
+ return arr_str + spacer + extra_str
1640
+
1641
+
1642
+ def _array_repr_dispatcher(
1643
+ arr, max_line_width=None, precision=None, suppress_small=None):
1644
+ return (arr,)
1645
+
1646
+
1647
+ @array_function_dispatch(_array_repr_dispatcher, module='numpy')
1648
+ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
1649
+ """
1650
+ Return the string representation of an array.
1651
+
1652
+ Parameters
1653
+ ----------
1654
+ arr : ndarray
1655
+ Input array.
1656
+ max_line_width : int, optional
1657
+ Inserts newlines if text is longer than `max_line_width`.
1658
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
1659
+ precision : int, optional
1660
+ Floating point precision.
1661
+ Defaults to ``numpy.get_printoptions()['precision']``.
1662
+ suppress_small : bool, optional
1663
+ Represent numbers "very close" to zero as zero; default is False.
1664
+ Very close is defined by precision: if the precision is 8, e.g.,
1665
+ numbers smaller (in absolute value) than 5e-9 are represented as
1666
+ zero.
1667
+ Defaults to ``numpy.get_printoptions()['suppress']``.
1668
+
1669
+ Returns
1670
+ -------
1671
+ string : str
1672
+ The string representation of an array.
1673
+
1674
+ See Also
1675
+ --------
1676
+ array_str, array2string, set_printoptions
1677
+
1678
+ Examples
1679
+ --------
1680
+ >>> import numpy as np
1681
+ >>> np.array_repr(np.array([1,2]))
1682
+ 'array([1, 2])'
1683
+ >>> np.array_repr(np.ma.array([0.]))
1684
+ 'MaskedArray([0.])'
1685
+ >>> np.array_repr(np.array([], np.int32))
1686
+ 'array([], dtype=int32)'
1687
+
1688
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
1689
+ >>> np.array_repr(x, precision=6, suppress_small=True)
1690
+ 'array([0.000001, 0. , 2. , 3. ])'
1691
+
1692
+ """
1693
+ return _array_repr_implementation(
1694
+ arr, max_line_width, precision, suppress_small)
1695
+
1696
+
1697
+ @_recursive_guard()
1698
+ def _guarded_repr_or_str(v):
1699
+ if isinstance(v, bytes):
1700
+ return repr(v)
1701
+ return str(v)
1702
+
1703
+
1704
+ def _array_str_implementation(
1705
+ a, max_line_width=None, precision=None, suppress_small=None,
1706
+ array2string=array2string):
1707
+ """Internal version of array_str() that allows overriding array2string."""
1708
+ if (format_options.get()['legacy'] <= 113 and
1709
+ a.shape == () and not a.dtype.names):
1710
+ return str(a.item())
1711
+
1712
+ # the str of 0d arrays is a special case: It should appear like a scalar,
1713
+ # so floats are not truncated by `precision`, and strings are not wrapped
1714
+ # in quotes. So we return the str of the scalar value.
1715
+ if a.shape == ():
1716
+ # obtain a scalar and call str on it, avoiding problems for subclasses
1717
+ # for which indexing with () returns a 0d instead of a scalar by using
1718
+ # ndarray's getindex. Also guard against recursive 0d object arrays.
1719
+ return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
1720
+
1721
+ return array2string(a, max_line_width, precision, suppress_small, ' ', "")
1722
+
1723
+
1724
+ def _array_str_dispatcher(
1725
+ a, max_line_width=None, precision=None, suppress_small=None):
1726
+ return (a,)
1727
+
1728
+
1729
+ @array_function_dispatch(_array_str_dispatcher, module='numpy')
1730
+ def array_str(a, max_line_width=None, precision=None, suppress_small=None):
1731
+ """
1732
+ Return a string representation of the data in an array.
1733
+
1734
+ The data in the array is returned as a single string. This function is
1735
+ similar to `array_repr`, the difference being that `array_repr` also
1736
+ returns information on the kind of array and its data type.
1737
+
1738
+ Parameters
1739
+ ----------
1740
+ a : ndarray
1741
+ Input array.
1742
+ max_line_width : int, optional
1743
+ Inserts newlines if text is longer than `max_line_width`.
1744
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
1745
+ precision : int, optional
1746
+ Floating point precision.
1747
+ Defaults to ``numpy.get_printoptions()['precision']``.
1748
+ suppress_small : bool, optional
1749
+ Represent numbers "very close" to zero as zero; default is False.
1750
+ Very close is defined by precision: if the precision is 8, e.g.,
1751
+ numbers smaller (in absolute value) than 5e-9 are represented as
1752
+ zero.
1753
+ Defaults to ``numpy.get_printoptions()['suppress']``.
1754
+
1755
+ See Also
1756
+ --------
1757
+ array2string, array_repr, set_printoptions
1758
+
1759
+ Examples
1760
+ --------
1761
+ >>> import numpy as np
1762
+ >>> np.array_str(np.arange(3))
1763
+ '[0 1 2]'
1764
+
1765
+ """
1766
+ return _array_str_implementation(
1767
+ a, max_line_width, precision, suppress_small)
1768
+
1769
+
1770
+ # needed if __array_function__ is disabled
1771
+ _array2string_impl = getattr(array2string, '__wrapped__', array2string)
1772
+ _default_array_str = functools.partial(_array_str_implementation,
1773
+ array2string=_array2string_impl)
1774
+ _default_array_repr = functools.partial(_array_repr_implementation,
1775
+ array2string=_array2string_impl)
venv/lib/python3.13/site-packages/numpy/_core/arrayprint.pyi ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+
3
+ # Using a private class is by no means ideal, but it is simply a consequence
4
+ # of a `contextlib.context` returning an instance of aforementioned class
5
+ from contextlib import _GeneratorContextManager
6
+ from typing import (
7
+ Any,
8
+ Final,
9
+ Literal,
10
+ SupportsIndex,
11
+ TypeAlias,
12
+ TypedDict,
13
+ overload,
14
+ type_check_only,
15
+ )
16
+
17
+ from typing_extensions import deprecated
18
+
19
+ import numpy as np
20
+ from numpy._globals import _NoValueType
21
+ from numpy._typing import NDArray, _CharLike_co, _FloatLike_co
22
+
23
+ __all__ = [
24
+ "array2string",
25
+ "array_repr",
26
+ "array_str",
27
+ "format_float_positional",
28
+ "format_float_scientific",
29
+ "get_printoptions",
30
+ "printoptions",
31
+ "set_printoptions",
32
+ ]
33
+
34
+ ###
35
+
36
+ _FloatMode: TypeAlias = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
37
+ _LegacyNoStyle: TypeAlias = Literal["1.21", "1.25", "2.1", False]
38
+ _Legacy: TypeAlias = Literal["1.13", _LegacyNoStyle]
39
+ _Sign: TypeAlias = Literal["-", "+", " "]
40
+ _Trim: TypeAlias = Literal["k", ".", "0", "-"]
41
+ _ReprFunc: TypeAlias = Callable[[NDArray[Any]], str]
42
+
43
+ @type_check_only
44
+ class _FormatDict(TypedDict, total=False):
45
+ bool: Callable[[np.bool], str]
46
+ int: Callable[[np.integer], str]
47
+ timedelta: Callable[[np.timedelta64], str]
48
+ datetime: Callable[[np.datetime64], str]
49
+ float: Callable[[np.floating], str]
50
+ longfloat: Callable[[np.longdouble], str]
51
+ complexfloat: Callable[[np.complexfloating], str]
52
+ longcomplexfloat: Callable[[np.clongdouble], str]
53
+ void: Callable[[np.void], str]
54
+ numpystr: Callable[[_CharLike_co], str]
55
+ object: Callable[[object], str]
56
+ all: Callable[[object], str]
57
+ int_kind: Callable[[np.integer], str]
58
+ float_kind: Callable[[np.floating], str]
59
+ complex_kind: Callable[[np.complexfloating], str]
60
+ str_kind: Callable[[_CharLike_co], str]
61
+
62
+ @type_check_only
63
+ class _FormatOptions(TypedDict):
64
+ precision: int
65
+ threshold: int
66
+ edgeitems: int
67
+ linewidth: int
68
+ suppress: bool
69
+ nanstr: str
70
+ infstr: str
71
+ formatter: _FormatDict | None
72
+ sign: _Sign
73
+ floatmode: _FloatMode
74
+ legacy: _Legacy
75
+
76
+ ###
77
+
78
+ __docformat__: Final = "restructuredtext" # undocumented
79
+
80
+ def set_printoptions(
81
+ precision: SupportsIndex | None = ...,
82
+ threshold: int | None = ...,
83
+ edgeitems: int | None = ...,
84
+ linewidth: int | None = ...,
85
+ suppress: bool | None = ...,
86
+ nanstr: str | None = ...,
87
+ infstr: str | None = ...,
88
+ formatter: _FormatDict | None = ...,
89
+ sign: _Sign | None = None,
90
+ floatmode: _FloatMode | None = None,
91
+ *,
92
+ legacy: _Legacy | None = None,
93
+ override_repr: _ReprFunc | None = None,
94
+ ) -> None: ...
95
+ def get_printoptions() -> _FormatOptions: ...
96
+
97
+ # public numpy export
98
+ @overload # no style
99
+ def array2string(
100
+ a: NDArray[Any],
101
+ max_line_width: int | None = None,
102
+ precision: SupportsIndex | None = None,
103
+ suppress_small: bool | None = None,
104
+ separator: str = " ",
105
+ prefix: str = "",
106
+ style: _NoValueType = ...,
107
+ formatter: _FormatDict | None = None,
108
+ threshold: int | None = None,
109
+ edgeitems: int | None = None,
110
+ sign: _Sign | None = None,
111
+ floatmode: _FloatMode | None = None,
112
+ suffix: str = "",
113
+ *,
114
+ legacy: _Legacy | None = None,
115
+ ) -> str: ...
116
+ @overload # style=<given> (positional), legacy="1.13"
117
+ def array2string(
118
+ a: NDArray[Any],
119
+ max_line_width: int | None,
120
+ precision: SupportsIndex | None,
121
+ suppress_small: bool | None,
122
+ separator: str,
123
+ prefix: str,
124
+ style: _ReprFunc,
125
+ formatter: _FormatDict | None = None,
126
+ threshold: int | None = None,
127
+ edgeitems: int | None = None,
128
+ sign: _Sign | None = None,
129
+ floatmode: _FloatMode | None = None,
130
+ suffix: str = "",
131
+ *,
132
+ legacy: Literal["1.13"],
133
+ ) -> str: ...
134
+ @overload # style=<given> (keyword), legacy="1.13"
135
+ def array2string(
136
+ a: NDArray[Any],
137
+ max_line_width: int | None = None,
138
+ precision: SupportsIndex | None = None,
139
+ suppress_small: bool | None = None,
140
+ separator: str = " ",
141
+ prefix: str = "",
142
+ *,
143
+ style: _ReprFunc,
144
+ formatter: _FormatDict | None = None,
145
+ threshold: int | None = None,
146
+ edgeitems: int | None = None,
147
+ sign: _Sign | None = None,
148
+ floatmode: _FloatMode | None = None,
149
+ suffix: str = "",
150
+ legacy: Literal["1.13"],
151
+ ) -> str: ...
152
+ @overload # style=<given> (positional), legacy!="1.13"
153
+ @deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
154
+ def array2string(
155
+ a: NDArray[Any],
156
+ max_line_width: int | None,
157
+ precision: SupportsIndex | None,
158
+ suppress_small: bool | None,
159
+ separator: str,
160
+ prefix: str,
161
+ style: _ReprFunc,
162
+ formatter: _FormatDict | None = None,
163
+ threshold: int | None = None,
164
+ edgeitems: int | None = None,
165
+ sign: _Sign | None = None,
166
+ floatmode: _FloatMode | None = None,
167
+ suffix: str = "",
168
+ *,
169
+ legacy: _LegacyNoStyle | None = None,
170
+ ) -> str: ...
171
+ @overload # style=<given> (keyword), legacy="1.13"
172
+ @deprecated("'style' argument is deprecated and no longer functional except in 1.13 'legacy' mode")
173
+ def array2string(
174
+ a: NDArray[Any],
175
+ max_line_width: int | None = None,
176
+ precision: SupportsIndex | None = None,
177
+ suppress_small: bool | None = None,
178
+ separator: str = " ",
179
+ prefix: str = "",
180
+ *,
181
+ style: _ReprFunc,
182
+ formatter: _FormatDict | None = None,
183
+ threshold: int | None = None,
184
+ edgeitems: int | None = None,
185
+ sign: _Sign | None = None,
186
+ floatmode: _FloatMode | None = None,
187
+ suffix: str = "",
188
+ legacy: _LegacyNoStyle | None = None,
189
+ ) -> str: ...
190
+
191
+ def format_float_scientific(
192
+ x: _FloatLike_co,
193
+ precision: int | None = ...,
194
+ unique: bool = ...,
195
+ trim: _Trim = "k",
196
+ sign: bool = ...,
197
+ pad_left: int | None = ...,
198
+ exp_digits: int | None = ...,
199
+ min_digits: int | None = ...,
200
+ ) -> str: ...
201
+ def format_float_positional(
202
+ x: _FloatLike_co,
203
+ precision: int | None = ...,
204
+ unique: bool = ...,
205
+ fractional: bool = ...,
206
+ trim: _Trim = "k",
207
+ sign: bool = ...,
208
+ pad_left: int | None = ...,
209
+ pad_right: int | None = ...,
210
+ min_digits: int | None = ...,
211
+ ) -> str: ...
212
+ def array_repr(
213
+ arr: NDArray[Any],
214
+ max_line_width: int | None = ...,
215
+ precision: SupportsIndex | None = ...,
216
+ suppress_small: bool | None = ...,
217
+ ) -> str: ...
218
+ def array_str(
219
+ a: NDArray[Any],
220
+ max_line_width: int | None = ...,
221
+ precision: SupportsIndex | None = ...,
222
+ suppress_small: bool | None = ...,
223
+ ) -> str: ...
224
+ def printoptions(
225
+ precision: SupportsIndex | None = ...,
226
+ threshold: int | None = ...,
227
+ edgeitems: int | None = ...,
228
+ linewidth: int | None = ...,
229
+ suppress: bool | None = ...,
230
+ nanstr: str | None = ...,
231
+ infstr: str | None = ...,
232
+ formatter: _FormatDict | None = ...,
233
+ sign: _Sign | None = None,
234
+ floatmode: _FloatMode | None = None,
235
+ *,
236
+ legacy: _Legacy | None = None,
237
+ override_repr: _ReprFunc | None = None,
238
+ ) -> _GeneratorContextManager[_FormatOptions]: ...
venv/lib/python3.13/site-packages/numpy/_core/cversions.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simple script to compute the api hash of the current API.
2
+
3
+ The API has is defined by numpy_api_order and ufunc_api_order.
4
+
5
+ """
6
+ from os.path import dirname
7
+
8
+ from code_generators.genapi import fullapi_hash
9
+ from code_generators.numpy_api import full_api
10
+
11
+ if __name__ == '__main__':
12
+ curdir = dirname(__file__)
13
+ print(fullapi_hash(full_api))
venv/lib/python3.13/site-packages/numpy/_core/defchararray.py ADDED
@@ -0,0 +1,1427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains a set of functions for vectorized string
3
+ operations and methods.
4
+
5
+ .. note::
6
+ The `chararray` class exists for backwards compatibility with
7
+ Numarray, it is not recommended for new development. Starting from numpy
8
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
9
+ `dtype` `object_`, `bytes_` or `str_`, and use the free functions
10
+ in the `numpy.char` module for fast vectorized string operations.
11
+
12
+ Some methods will only be available if the corresponding string method is
13
+ available in your version of Python.
14
+
15
+ The preferred alias for `defchararray` is `numpy.char`.
16
+
17
+ """
18
+ import functools
19
+
20
+ import numpy as np
21
+ from numpy._core import overrides
22
+ from numpy._core.multiarray import compare_chararrays
23
+ from numpy._core.strings import (
24
+ _join as join,
25
+ )
26
+ from numpy._core.strings import (
27
+ _rsplit as rsplit,
28
+ )
29
+ from numpy._core.strings import (
30
+ _split as split,
31
+ )
32
+ from numpy._core.strings import (
33
+ _splitlines as splitlines,
34
+ )
35
+ from numpy._utils import set_module
36
+ from numpy.strings import *
37
+ from numpy.strings import (
38
+ multiply as strings_multiply,
39
+ )
40
+ from numpy.strings import (
41
+ partition as strings_partition,
42
+ )
43
+ from numpy.strings import (
44
+ rpartition as strings_rpartition,
45
+ )
46
+
47
+ from .numeric import array as narray
48
+ from .numeric import asarray as asnarray
49
+ from .numeric import ndarray
50
+ from .numerictypes import bytes_, character, str_
51
+
52
+ __all__ = [
53
+ 'equal', 'not_equal', 'greater_equal', 'less_equal',
54
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
55
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
56
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
57
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
58
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
59
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
60
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
61
+ 'array', 'asarray', 'compare_chararrays', 'chararray'
62
+ ]
63
+
64
+
65
+ array_function_dispatch = functools.partial(
66
+ overrides.array_function_dispatch, module='numpy.char')
67
+
68
+
69
+ def _binary_op_dispatcher(x1, x2):
70
+ return (x1, x2)
71
+
72
+
73
+ @array_function_dispatch(_binary_op_dispatcher)
74
+ def equal(x1, x2):
75
+ """
76
+ Return (x1 == x2) element-wise.
77
+
78
+ Unlike `numpy.equal`, this comparison is performed by first
79
+ stripping whitespace characters from the end of the string. This
80
+ behavior is provided for backward-compatibility with numarray.
81
+
82
+ Parameters
83
+ ----------
84
+ x1, x2 : array_like of str or unicode
85
+ Input arrays of the same shape.
86
+
87
+ Returns
88
+ -------
89
+ out : ndarray
90
+ Output array of bools.
91
+
92
+ Examples
93
+ --------
94
+ >>> import numpy as np
95
+ >>> y = "aa "
96
+ >>> x = "aa"
97
+ >>> np.char.equal(x, y)
98
+ array(True)
99
+
100
+ See Also
101
+ --------
102
+ not_equal, greater_equal, less_equal, greater, less
103
+ """
104
+ return compare_chararrays(x1, x2, '==', True)
105
+
106
+
107
+ @array_function_dispatch(_binary_op_dispatcher)
108
+ def not_equal(x1, x2):
109
+ """
110
+ Return (x1 != x2) element-wise.
111
+
112
+ Unlike `numpy.not_equal`, this comparison is performed by first
113
+ stripping whitespace characters from the end of the string. This
114
+ behavior is provided for backward-compatibility with numarray.
115
+
116
+ Parameters
117
+ ----------
118
+ x1, x2 : array_like of str or unicode
119
+ Input arrays of the same shape.
120
+
121
+ Returns
122
+ -------
123
+ out : ndarray
124
+ Output array of bools.
125
+
126
+ See Also
127
+ --------
128
+ equal, greater_equal, less_equal, greater, less
129
+
130
+ Examples
131
+ --------
132
+ >>> import numpy as np
133
+ >>> x1 = np.array(['a', 'b', 'c'])
134
+ >>> np.char.not_equal(x1, 'b')
135
+ array([ True, False, True])
136
+
137
+ """
138
+ return compare_chararrays(x1, x2, '!=', True)
139
+
140
+
141
+ @array_function_dispatch(_binary_op_dispatcher)
142
+ def greater_equal(x1, x2):
143
+ """
144
+ Return (x1 >= x2) element-wise.
145
+
146
+ Unlike `numpy.greater_equal`, this comparison is performed by
147
+ first stripping whitespace characters from the end of the string.
148
+ This behavior is provided for backward-compatibility with
149
+ numarray.
150
+
151
+ Parameters
152
+ ----------
153
+ x1, x2 : array_like of str or unicode
154
+ Input arrays of the same shape.
155
+
156
+ Returns
157
+ -------
158
+ out : ndarray
159
+ Output array of bools.
160
+
161
+ See Also
162
+ --------
163
+ equal, not_equal, less_equal, greater, less
164
+
165
+ Examples
166
+ --------
167
+ >>> import numpy as np
168
+ >>> x1 = np.array(['a', 'b', 'c'])
169
+ >>> np.char.greater_equal(x1, 'b')
170
+ array([False, True, True])
171
+
172
+ """
173
+ return compare_chararrays(x1, x2, '>=', True)
174
+
175
+
176
+ @array_function_dispatch(_binary_op_dispatcher)
177
+ def less_equal(x1, x2):
178
+ """
179
+ Return (x1 <= x2) element-wise.
180
+
181
+ Unlike `numpy.less_equal`, this comparison is performed by first
182
+ stripping whitespace characters from the end of the string. This
183
+ behavior is provided for backward-compatibility with numarray.
184
+
185
+ Parameters
186
+ ----------
187
+ x1, x2 : array_like of str or unicode
188
+ Input arrays of the same shape.
189
+
190
+ Returns
191
+ -------
192
+ out : ndarray
193
+ Output array of bools.
194
+
195
+ See Also
196
+ --------
197
+ equal, not_equal, greater_equal, greater, less
198
+
199
+ Examples
200
+ --------
201
+ >>> import numpy as np
202
+ >>> x1 = np.array(['a', 'b', 'c'])
203
+ >>> np.char.less_equal(x1, 'b')
204
+ array([ True, True, False])
205
+
206
+ """
207
+ return compare_chararrays(x1, x2, '<=', True)
208
+
209
+
210
+ @array_function_dispatch(_binary_op_dispatcher)
211
+ def greater(x1, x2):
212
+ """
213
+ Return (x1 > x2) element-wise.
214
+
215
+ Unlike `numpy.greater`, this comparison is performed by first
216
+ stripping whitespace characters from the end of the string. This
217
+ behavior is provided for backward-compatibility with numarray.
218
+
219
+ Parameters
220
+ ----------
221
+ x1, x2 : array_like of str or unicode
222
+ Input arrays of the same shape.
223
+
224
+ Returns
225
+ -------
226
+ out : ndarray
227
+ Output array of bools.
228
+
229
+ See Also
230
+ --------
231
+ equal, not_equal, greater_equal, less_equal, less
232
+
233
+ Examples
234
+ --------
235
+ >>> import numpy as np
236
+ >>> x1 = np.array(['a', 'b', 'c'])
237
+ >>> np.char.greater(x1, 'b')
238
+ array([False, False, True])
239
+
240
+ """
241
+ return compare_chararrays(x1, x2, '>', True)
242
+
243
+
244
+ @array_function_dispatch(_binary_op_dispatcher)
245
+ def less(x1, x2):
246
+ """
247
+ Return (x1 < x2) element-wise.
248
+
249
+ Unlike `numpy.greater`, this comparison is performed by first
250
+ stripping whitespace characters from the end of the string. This
251
+ behavior is provided for backward-compatibility with numarray.
252
+
253
+ Parameters
254
+ ----------
255
+ x1, x2 : array_like of str or unicode
256
+ Input arrays of the same shape.
257
+
258
+ Returns
259
+ -------
260
+ out : ndarray
261
+ Output array of bools.
262
+
263
+ See Also
264
+ --------
265
+ equal, not_equal, greater_equal, less_equal, greater
266
+
267
+ Examples
268
+ --------
269
+ >>> import numpy as np
270
+ >>> x1 = np.array(['a', 'b', 'c'])
271
+ >>> np.char.less(x1, 'b')
272
+ array([True, False, False])
273
+
274
+ """
275
+ return compare_chararrays(x1, x2, '<', True)
276
+
277
+
278
+ @set_module("numpy.char")
279
+ def multiply(a, i):
280
+ """
281
+ Return (a * i), that is string multiple concatenation,
282
+ element-wise.
283
+
284
+ Values in ``i`` of less than 0 are treated as 0 (which yields an
285
+ empty string).
286
+
287
+ Parameters
288
+ ----------
289
+ a : array_like, with `np.bytes_` or `np.str_` dtype
290
+
291
+ i : array_like, with any integer dtype
292
+
293
+ Returns
294
+ -------
295
+ out : ndarray
296
+ Output array of str or unicode, depending on input types
297
+
298
+ Notes
299
+ -----
300
+ This is a thin wrapper around np.strings.multiply that raises
301
+ `ValueError` when ``i`` is not an integer. It only
302
+ exists for backwards-compatibility.
303
+
304
+ Examples
305
+ --------
306
+ >>> import numpy as np
307
+ >>> a = np.array(["a", "b", "c"])
308
+ >>> np.strings.multiply(a, 3)
309
+ array(['aaa', 'bbb', 'ccc'], dtype='<U3')
310
+ >>> i = np.array([1, 2, 3])
311
+ >>> np.strings.multiply(a, i)
312
+ array(['a', 'bb', 'ccc'], dtype='<U3')
313
+ >>> np.strings.multiply(np.array(['a']), i)
314
+ array(['a', 'aa', 'aaa'], dtype='<U3')
315
+ >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
316
+ >>> np.strings.multiply(a, 3)
317
+ array([['aaa', 'bbb', 'ccc'],
318
+ ['ddd', 'eee', 'fff']], dtype='<U3')
319
+ >>> np.strings.multiply(a, i)
320
+ array([['a', 'bb', 'ccc'],
321
+ ['d', 'ee', 'fff']], dtype='<U3')
322
+
323
+ """
324
+ try:
325
+ return strings_multiply(a, i)
326
+ except TypeError:
327
+ raise ValueError("Can only multiply by integers")
328
+
329
+
330
+ @set_module("numpy.char")
331
+ def partition(a, sep):
332
+ """
333
+ Partition each element in `a` around `sep`.
334
+
335
+ Calls :meth:`str.partition` element-wise.
336
+
337
+ For each element in `a`, split the element as the first
338
+ occurrence of `sep`, and return 3 strings containing the part
339
+ before the separator, the separator itself, and the part after
340
+ the separator. If the separator is not found, return 3 strings
341
+ containing the string itself, followed by two empty strings.
342
+
343
+ Parameters
344
+ ----------
345
+ a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
346
+ Input array
347
+ sep : {str, unicode}
348
+ Separator to split each string element in `a`.
349
+
350
+ Returns
351
+ -------
352
+ out : ndarray
353
+ Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
354
+ depending on input types. The output array will have an extra
355
+ dimension with 3 elements per input element.
356
+
357
+ Examples
358
+ --------
359
+ >>> import numpy as np
360
+ >>> x = np.array(["Numpy is nice!"])
361
+ >>> np.char.partition(x, " ")
362
+ array([['Numpy', ' ', 'is nice!']], dtype='<U8')
363
+
364
+ See Also
365
+ --------
366
+ str.partition
367
+
368
+ """
369
+ return np.stack(strings_partition(a, sep), axis=-1)
370
+
371
+
372
+ @set_module("numpy.char")
373
+ def rpartition(a, sep):
374
+ """
375
+ Partition (split) each element around the right-most separator.
376
+
377
+ Calls :meth:`str.rpartition` element-wise.
378
+
379
+ For each element in `a`, split the element as the last
380
+ occurrence of `sep`, and return 3 strings containing the part
381
+ before the separator, the separator itself, and the part after
382
+ the separator. If the separator is not found, return 3 strings
383
+ containing the string itself, followed by two empty strings.
384
+
385
+ Parameters
386
+ ----------
387
+ a : array-like, with ``StringDType``, ``bytes_``, or ``str_`` dtype
388
+ Input array
389
+ sep : str or unicode
390
+ Right-most separator to split each element in array.
391
+
392
+ Returns
393
+ -------
394
+ out : ndarray
395
+ Output array of ``StringDType``, ``bytes_`` or ``str_`` dtype,
396
+ depending on input types. The output array will have an extra
397
+ dimension with 3 elements per input element.
398
+
399
+ See Also
400
+ --------
401
+ str.rpartition
402
+
403
+ Examples
404
+ --------
405
+ >>> import numpy as np
406
+ >>> a = np.array(['aAaAaA', ' aA ', 'abBABba'])
407
+ >>> np.char.rpartition(a, 'A')
408
+ array([['aAaAa', 'A', ''],
409
+ [' a', 'A', ' '],
410
+ ['abB', 'A', 'Bba']], dtype='<U5')
411
+
412
+ """
413
+ return np.stack(strings_rpartition(a, sep), axis=-1)
414
+
415
+
416
+ @set_module("numpy.char")
417
+ class chararray(ndarray):
418
+ """
419
+ chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
420
+ strides=None, order=None)
421
+
422
+ Provides a convenient view on arrays of string and unicode values.
423
+
424
+ .. note::
425
+ The `chararray` class exists for backwards compatibility with
426
+ Numarray, it is not recommended for new development. Starting from numpy
427
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
428
+ `dtype` `~numpy.object_`, `~numpy.bytes_` or `~numpy.str_`, and use
429
+ the free functions in the `numpy.char` module for fast vectorized
430
+ string operations.
431
+
432
+ Versus a NumPy array of dtype `~numpy.bytes_` or `~numpy.str_`, this
433
+ class adds the following functionality:
434
+
435
+ 1) values automatically have whitespace removed from the end
436
+ when indexed
437
+
438
+ 2) comparison operators automatically remove whitespace from the
439
+ end when comparing values
440
+
441
+ 3) vectorized string operations are provided as methods
442
+ (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
443
+
444
+ chararrays should be created using `numpy.char.array` or
445
+ `numpy.char.asarray`, rather than this constructor directly.
446
+
447
+ This constructor creates the array, using `buffer` (with `offset`
448
+ and `strides`) if it is not ``None``. If `buffer` is ``None``, then
449
+ constructs a new array with `strides` in "C order", unless both
450
+ ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
451
+ is in "Fortran order".
452
+
453
+ Methods
454
+ -------
455
+ astype
456
+ argsort
457
+ copy
458
+ count
459
+ decode
460
+ dump
461
+ dumps
462
+ encode
463
+ endswith
464
+ expandtabs
465
+ fill
466
+ find
467
+ flatten
468
+ getfield
469
+ index
470
+ isalnum
471
+ isalpha
472
+ isdecimal
473
+ isdigit
474
+ islower
475
+ isnumeric
476
+ isspace
477
+ istitle
478
+ isupper
479
+ item
480
+ join
481
+ ljust
482
+ lower
483
+ lstrip
484
+ nonzero
485
+ put
486
+ ravel
487
+ repeat
488
+ replace
489
+ reshape
490
+ resize
491
+ rfind
492
+ rindex
493
+ rjust
494
+ rsplit
495
+ rstrip
496
+ searchsorted
497
+ setfield
498
+ setflags
499
+ sort
500
+ split
501
+ splitlines
502
+ squeeze
503
+ startswith
504
+ strip
505
+ swapaxes
506
+ swapcase
507
+ take
508
+ title
509
+ tofile
510
+ tolist
511
+ tostring
512
+ translate
513
+ transpose
514
+ upper
515
+ view
516
+ zfill
517
+
518
+ Parameters
519
+ ----------
520
+ shape : tuple
521
+ Shape of the array.
522
+ itemsize : int, optional
523
+ Length of each array element, in number of characters. Default is 1.
524
+ unicode : bool, optional
525
+ Are the array elements of type unicode (True) or string (False).
526
+ Default is False.
527
+ buffer : object exposing the buffer interface or str, optional
528
+ Memory address of the start of the array data. Default is None,
529
+ in which case a new array is created.
530
+ offset : int, optional
531
+ Fixed stride displacement from the beginning of an axis?
532
+ Default is 0. Needs to be >=0.
533
+ strides : array_like of ints, optional
534
+ Strides for the array (see `~numpy.ndarray.strides` for
535
+ full description). Default is None.
536
+ order : {'C', 'F'}, optional
537
+ The order in which the array data is stored in memory: 'C' ->
538
+ "row major" order (the default), 'F' -> "column major"
539
+ (Fortran) order.
540
+
541
+ Examples
542
+ --------
543
+ >>> import numpy as np
544
+ >>> charar = np.char.chararray((3, 3))
545
+ >>> charar[:] = 'a'
546
+ >>> charar
547
+ chararray([[b'a', b'a', b'a'],
548
+ [b'a', b'a', b'a'],
549
+ [b'a', b'a', b'a']], dtype='|S1')
550
+
551
+ >>> charar = np.char.chararray(charar.shape, itemsize=5)
552
+ >>> charar[:] = 'abc'
553
+ >>> charar
554
+ chararray([[b'abc', b'abc', b'abc'],
555
+ [b'abc', b'abc', b'abc'],
556
+ [b'abc', b'abc', b'abc']], dtype='|S5')
557
+
558
+ """
559
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
560
+ offset=0, strides=None, order='C'):
561
+ if unicode:
562
+ dtype = str_
563
+ else:
564
+ dtype = bytes_
565
+
566
+ # force itemsize to be a Python int, since using NumPy integer
567
+ # types results in itemsize.itemsize being used as the size of
568
+ # strings in the new array.
569
+ itemsize = int(itemsize)
570
+
571
+ if isinstance(buffer, str):
572
+ # unicode objects do not have the buffer interface
573
+ filler = buffer
574
+ buffer = None
575
+ else:
576
+ filler = None
577
+
578
+ if buffer is None:
579
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
580
+ order=order)
581
+ else:
582
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
583
+ buffer=buffer,
584
+ offset=offset, strides=strides,
585
+ order=order)
586
+ if filler is not None:
587
+ self[...] = filler
588
+
589
+ return self
590
+
591
+ def __array_wrap__(self, arr, context=None, return_scalar=False):
592
+ # When calling a ufunc (and some other functions), we return a
593
+ # chararray if the ufunc output is a string-like array,
594
+ # or an ndarray otherwise
595
+ if arr.dtype.char in "SUbc":
596
+ return arr.view(type(self))
597
+ return arr
598
+
599
+ def __array_finalize__(self, obj):
600
+ # The b is a special case because it is used for reconstructing.
601
+ if self.dtype.char not in 'VSUbc':
602
+ raise ValueError("Can only create a chararray from string data.")
603
+
604
+ def __getitem__(self, obj):
605
+ val = ndarray.__getitem__(self, obj)
606
+ if isinstance(val, character):
607
+ return val.rstrip()
608
+ return val
609
+
610
+ # IMPLEMENTATION NOTE: Most of the methods of this class are
611
+ # direct delegations to the free functions in this module.
612
+ # However, those that return an array of strings should instead
613
+ # return a chararray, so some extra wrapping is required.
614
+
615
+ def __eq__(self, other):
616
+ """
617
+ Return (self == other) element-wise.
618
+
619
+ See Also
620
+ --------
621
+ equal
622
+ """
623
+ return equal(self, other)
624
+
625
+ def __ne__(self, other):
626
+ """
627
+ Return (self != other) element-wise.
628
+
629
+ See Also
630
+ --------
631
+ not_equal
632
+ """
633
+ return not_equal(self, other)
634
+
635
+ def __ge__(self, other):
636
+ """
637
+ Return (self >= other) element-wise.
638
+
639
+ See Also
640
+ --------
641
+ greater_equal
642
+ """
643
+ return greater_equal(self, other)
644
+
645
+ def __le__(self, other):
646
+ """
647
+ Return (self <= other) element-wise.
648
+
649
+ See Also
650
+ --------
651
+ less_equal
652
+ """
653
+ return less_equal(self, other)
654
+
655
+ def __gt__(self, other):
656
+ """
657
+ Return (self > other) element-wise.
658
+
659
+ See Also
660
+ --------
661
+ greater
662
+ """
663
+ return greater(self, other)
664
+
665
+ def __lt__(self, other):
666
+ """
667
+ Return (self < other) element-wise.
668
+
669
+ See Also
670
+ --------
671
+ less
672
+ """
673
+ return less(self, other)
674
+
675
+ def __add__(self, other):
676
+ """
677
+ Return (self + other), that is string concatenation,
678
+ element-wise for a pair of array_likes of str or unicode.
679
+
680
+ See Also
681
+ --------
682
+ add
683
+ """
684
+ return add(self, other)
685
+
686
+ def __radd__(self, other):
687
+ """
688
+ Return (other + self), that is string concatenation,
689
+ element-wise for a pair of array_likes of `bytes_` or `str_`.
690
+
691
+ See Also
692
+ --------
693
+ add
694
+ """
695
+ return add(other, self)
696
+
697
+ def __mul__(self, i):
698
+ """
699
+ Return (self * i), that is string multiple concatenation,
700
+ element-wise.
701
+
702
+ See Also
703
+ --------
704
+ multiply
705
+ """
706
+ return asarray(multiply(self, i))
707
+
708
+ def __rmul__(self, i):
709
+ """
710
+ Return (self * i), that is string multiple concatenation,
711
+ element-wise.
712
+
713
+ See Also
714
+ --------
715
+ multiply
716
+ """
717
+ return asarray(multiply(self, i))
718
+
719
+ def __mod__(self, i):
720
+ """
721
+ Return (self % i), that is pre-Python 2.6 string formatting
722
+ (interpolation), element-wise for a pair of array_likes of `bytes_`
723
+ or `str_`.
724
+
725
+ See Also
726
+ --------
727
+ mod
728
+ """
729
+ return asarray(mod(self, i))
730
+
731
+ def __rmod__(self, other):
732
+ return NotImplemented
733
+
734
+ def argsort(self, axis=-1, kind=None, order=None):
735
+ """
736
+ Return the indices that sort the array lexicographically.
737
+
738
+ For full documentation see `numpy.argsort`, for which this method is
739
+ in fact merely a "thin wrapper."
740
+
741
+ Examples
742
+ --------
743
+ >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
744
+ >>> c = c.view(np.char.chararray); c
745
+ chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
746
+ dtype='|S5')
747
+ >>> c[c.argsort()]
748
+ chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
749
+ dtype='|S5')
750
+
751
+ """
752
+ return self.__array__().argsort(axis, kind, order)
753
+ argsort.__doc__ = ndarray.argsort.__doc__
754
+
755
+ def capitalize(self):
756
+ """
757
+ Return a copy of `self` with only the first character of each element
758
+ capitalized.
759
+
760
+ See Also
761
+ --------
762
+ char.capitalize
763
+
764
+ """
765
+ return asarray(capitalize(self))
766
+
767
+ def center(self, width, fillchar=' '):
768
+ """
769
+ Return a copy of `self` with its elements centered in a
770
+ string of length `width`.
771
+
772
+ See Also
773
+ --------
774
+ center
775
+ """
776
+ return asarray(center(self, width, fillchar))
777
+
778
+ def count(self, sub, start=0, end=None):
779
+ """
780
+ Returns an array with the number of non-overlapping occurrences of
781
+ substring `sub` in the range [`start`, `end`].
782
+
783
+ See Also
784
+ --------
785
+ char.count
786
+
787
+ """
788
+ return count(self, sub, start, end)
789
+
790
+ def decode(self, encoding=None, errors=None):
791
+ """
792
+ Calls ``bytes.decode`` element-wise.
793
+
794
+ See Also
795
+ --------
796
+ char.decode
797
+
798
+ """
799
+ return decode(self, encoding, errors)
800
+
801
+ def encode(self, encoding=None, errors=None):
802
+ """
803
+ Calls :meth:`str.encode` element-wise.
804
+
805
+ See Also
806
+ --------
807
+ char.encode
808
+
809
+ """
810
+ return encode(self, encoding, errors)
811
+
812
+ def endswith(self, suffix, start=0, end=None):
813
+ """
814
+ Returns a boolean array which is `True` where the string element
815
+ in `self` ends with `suffix`, otherwise `False`.
816
+
817
+ See Also
818
+ --------
819
+ char.endswith
820
+
821
+ """
822
+ return endswith(self, suffix, start, end)
823
+
824
+ def expandtabs(self, tabsize=8):
825
+ """
826
+ Return a copy of each string element where all tab characters are
827
+ replaced by one or more spaces.
828
+
829
+ See Also
830
+ --------
831
+ char.expandtabs
832
+
833
+ """
834
+ return asarray(expandtabs(self, tabsize))
835
+
836
+ def find(self, sub, start=0, end=None):
837
+ """
838
+ For each element, return the lowest index in the string where
839
+ substring `sub` is found.
840
+
841
+ See Also
842
+ --------
843
+ char.find
844
+
845
+ """
846
+ return find(self, sub, start, end)
847
+
848
+ def index(self, sub, start=0, end=None):
849
+ """
850
+ Like `find`, but raises :exc:`ValueError` when the substring is not
851
+ found.
852
+
853
+ See Also
854
+ --------
855
+ char.index
856
+
857
+ """
858
+ return index(self, sub, start, end)
859
+
860
+ def isalnum(self):
861
+ """
862
+ Returns true for each element if all characters in the string
863
+ are alphanumeric and there is at least one character, false
864
+ otherwise.
865
+
866
+ See Also
867
+ --------
868
+ char.isalnum
869
+
870
+ """
871
+ return isalnum(self)
872
+
873
+ def isalpha(self):
874
+ """
875
+ Returns true for each element if all characters in the string
876
+ are alphabetic and there is at least one character, false
877
+ otherwise.
878
+
879
+ See Also
880
+ --------
881
+ char.isalpha
882
+
883
+ """
884
+ return isalpha(self)
885
+
886
+ def isdigit(self):
887
+ """
888
+ Returns true for each element if all characters in the string are
889
+ digits and there is at least one character, false otherwise.
890
+
891
+ See Also
892
+ --------
893
+ char.isdigit
894
+
895
+ """
896
+ return isdigit(self)
897
+
898
+ def islower(self):
899
+ """
900
+ Returns true for each element if all cased characters in the
901
+ string are lowercase and there is at least one cased character,
902
+ false otherwise.
903
+
904
+ See Also
905
+ --------
906
+ char.islower
907
+
908
+ """
909
+ return islower(self)
910
+
911
+ def isspace(self):
912
+ """
913
+ Returns true for each element if there are only whitespace
914
+ characters in the string and there is at least one character,
915
+ false otherwise.
916
+
917
+ See Also
918
+ --------
919
+ char.isspace
920
+
921
+ """
922
+ return isspace(self)
923
+
924
+ def istitle(self):
925
+ """
926
+ Returns true for each element if the element is a titlecased
927
+ string and there is at least one character, false otherwise.
928
+
929
+ See Also
930
+ --------
931
+ char.istitle
932
+
933
+ """
934
+ return istitle(self)
935
+
936
+ def isupper(self):
937
+ """
938
+ Returns true for each element if all cased characters in the
939
+ string are uppercase and there is at least one character, false
940
+ otherwise.
941
+
942
+ See Also
943
+ --------
944
+ char.isupper
945
+
946
+ """
947
+ return isupper(self)
948
+
949
+ def join(self, seq):
950
+ """
951
+ Return a string which is the concatenation of the strings in the
952
+ sequence `seq`.
953
+
954
+ See Also
955
+ --------
956
+ char.join
957
+
958
+ """
959
+ return join(self, seq)
960
+
961
+ def ljust(self, width, fillchar=' '):
962
+ """
963
+ Return an array with the elements of `self` left-justified in a
964
+ string of length `width`.
965
+
966
+ See Also
967
+ --------
968
+ char.ljust
969
+
970
+ """
971
+ return asarray(ljust(self, width, fillchar))
972
+
973
+ def lower(self):
974
+ """
975
+ Return an array with the elements of `self` converted to
976
+ lowercase.
977
+
978
+ See Also
979
+ --------
980
+ char.lower
981
+
982
+ """
983
+ return asarray(lower(self))
984
+
985
+ def lstrip(self, chars=None):
986
+ """
987
+ For each element in `self`, return a copy with the leading characters
988
+ removed.
989
+
990
+ See Also
991
+ --------
992
+ char.lstrip
993
+
994
+ """
995
+ return lstrip(self, chars)
996
+
997
+ def partition(self, sep):
998
+ """
999
+ Partition each element in `self` around `sep`.
1000
+
1001
+ See Also
1002
+ --------
1003
+ partition
1004
+ """
1005
+ return asarray(partition(self, sep))
1006
+
1007
+ def replace(self, old, new, count=None):
1008
+ """
1009
+ For each element in `self`, return a copy of the string with all
1010
+ occurrences of substring `old` replaced by `new`.
1011
+
1012
+ See Also
1013
+ --------
1014
+ char.replace
1015
+
1016
+ """
1017
+ return replace(self, old, new, count if count is not None else -1)
1018
+
1019
+ def rfind(self, sub, start=0, end=None):
1020
+ """
1021
+ For each element in `self`, return the highest index in the string
1022
+ where substring `sub` is found, such that `sub` is contained
1023
+ within [`start`, `end`].
1024
+
1025
+ See Also
1026
+ --------
1027
+ char.rfind
1028
+
1029
+ """
1030
+ return rfind(self, sub, start, end)
1031
+
1032
+ def rindex(self, sub, start=0, end=None):
1033
+ """
1034
+ Like `rfind`, but raises :exc:`ValueError` when the substring `sub` is
1035
+ not found.
1036
+
1037
+ See Also
1038
+ --------
1039
+ char.rindex
1040
+
1041
+ """
1042
+ return rindex(self, sub, start, end)
1043
+
1044
+ def rjust(self, width, fillchar=' '):
1045
+ """
1046
+ Return an array with the elements of `self`
1047
+ right-justified in a string of length `width`.
1048
+
1049
+ See Also
1050
+ --------
1051
+ char.rjust
1052
+
1053
+ """
1054
+ return asarray(rjust(self, width, fillchar))
1055
+
1056
+ def rpartition(self, sep):
1057
+ """
1058
+ Partition each element in `self` around `sep`.
1059
+
1060
+ See Also
1061
+ --------
1062
+ rpartition
1063
+ """
1064
+ return asarray(rpartition(self, sep))
1065
+
1066
+ def rsplit(self, sep=None, maxsplit=None):
1067
+ """
1068
+ For each element in `self`, return a list of the words in
1069
+ the string, using `sep` as the delimiter string.
1070
+
1071
+ See Also
1072
+ --------
1073
+ char.rsplit
1074
+
1075
+ """
1076
+ return rsplit(self, sep, maxsplit)
1077
+
1078
+ def rstrip(self, chars=None):
1079
+ """
1080
+ For each element in `self`, return a copy with the trailing
1081
+ characters removed.
1082
+
1083
+ See Also
1084
+ --------
1085
+ char.rstrip
1086
+
1087
+ """
1088
+ return rstrip(self, chars)
1089
+
1090
+ def split(self, sep=None, maxsplit=None):
1091
+ """
1092
+ For each element in `self`, return a list of the words in the
1093
+ string, using `sep` as the delimiter string.
1094
+
1095
+ See Also
1096
+ --------
1097
+ char.split
1098
+
1099
+ """
1100
+ return split(self, sep, maxsplit)
1101
+
1102
+ def splitlines(self, keepends=None):
1103
+ """
1104
+ For each element in `self`, return a list of the lines in the
1105
+ element, breaking at line boundaries.
1106
+
1107
+ See Also
1108
+ --------
1109
+ char.splitlines
1110
+
1111
+ """
1112
+ return splitlines(self, keepends)
1113
+
1114
+ def startswith(self, prefix, start=0, end=None):
1115
+ """
1116
+ Returns a boolean array which is `True` where the string element
1117
+ in `self` starts with `prefix`, otherwise `False`.
1118
+
1119
+ See Also
1120
+ --------
1121
+ char.startswith
1122
+
1123
+ """
1124
+ return startswith(self, prefix, start, end)
1125
+
1126
+ def strip(self, chars=None):
1127
+ """
1128
+ For each element in `self`, return a copy with the leading and
1129
+ trailing characters removed.
1130
+
1131
+ See Also
1132
+ --------
1133
+ char.strip
1134
+
1135
+ """
1136
+ return strip(self, chars)
1137
+
1138
+ def swapcase(self):
1139
+ """
1140
+ For each element in `self`, return a copy of the string with
1141
+ uppercase characters converted to lowercase and vice versa.
1142
+
1143
+ See Also
1144
+ --------
1145
+ char.swapcase
1146
+
1147
+ """
1148
+ return asarray(swapcase(self))
1149
+
1150
+ def title(self):
1151
+ """
1152
+ For each element in `self`, return a titlecased version of the
1153
+ string: words start with uppercase characters, all remaining cased
1154
+ characters are lowercase.
1155
+
1156
+ See Also
1157
+ --------
1158
+ char.title
1159
+
1160
+ """
1161
+ return asarray(title(self))
1162
+
1163
+ def translate(self, table, deletechars=None):
1164
+ """
1165
+ For each element in `self`, return a copy of the string where
1166
+ all characters occurring in the optional argument
1167
+ `deletechars` are removed, and the remaining characters have
1168
+ been mapped through the given translation table.
1169
+
1170
+ See Also
1171
+ --------
1172
+ char.translate
1173
+
1174
+ """
1175
+ return asarray(translate(self, table, deletechars))
1176
+
1177
+ def upper(self):
1178
+ """
1179
+ Return an array with the elements of `self` converted to
1180
+ uppercase.
1181
+
1182
+ See Also
1183
+ --------
1184
+ char.upper
1185
+
1186
+ """
1187
+ return asarray(upper(self))
1188
+
1189
+ def zfill(self, width):
1190
+ """
1191
+ Return the numeric string left-filled with zeros in a string of
1192
+ length `width`.
1193
+
1194
+ See Also
1195
+ --------
1196
+ char.zfill
1197
+
1198
+ """
1199
+ return asarray(zfill(self, width))
1200
+
1201
+ def isnumeric(self):
1202
+ """
1203
+ For each element in `self`, return True if there are only
1204
+ numeric characters in the element.
1205
+
1206
+ See Also
1207
+ --------
1208
+ char.isnumeric
1209
+
1210
+ """
1211
+ return isnumeric(self)
1212
+
1213
+ def isdecimal(self):
1214
+ """
1215
+ For each element in `self`, return True if there are only
1216
+ decimal characters in the element.
1217
+
1218
+ See Also
1219
+ --------
1220
+ char.isdecimal
1221
+
1222
+ """
1223
+ return isdecimal(self)
1224
+
1225
+
1226
+ @set_module("numpy.char")
1227
+ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
1228
+ """
1229
+ Create a `~numpy.char.chararray`.
1230
+
1231
+ .. note::
1232
+ This class is provided for numarray backward-compatibility.
1233
+ New code (not concerned with numarray compatibility) should use
1234
+ arrays of type `bytes_` or `str_` and use the free functions
1235
+ in :mod:`numpy.char` for fast vectorized string operations instead.
1236
+
1237
+ Versus a NumPy array of dtype `bytes_` or `str_`, this
1238
+ class adds the following functionality:
1239
+
1240
+ 1) values automatically have whitespace removed from the end
1241
+ when indexed
1242
+
1243
+ 2) comparison operators automatically remove whitespace from the
1244
+ end when comparing values
1245
+
1246
+ 3) vectorized string operations are provided as methods
1247
+ (e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
1248
+ and infix operators (e.g. ``+, *, %``)
1249
+
1250
+ Parameters
1251
+ ----------
1252
+ obj : array of str or unicode-like
1253
+
1254
+ itemsize : int, optional
1255
+ `itemsize` is the number of characters per scalar in the
1256
+ resulting array. If `itemsize` is None, and `obj` is an
1257
+ object array or a Python list, the `itemsize` will be
1258
+ automatically determined. If `itemsize` is provided and `obj`
1259
+ is of type str or unicode, then the `obj` string will be
1260
+ chunked into `itemsize` pieces.
1261
+
1262
+ copy : bool, optional
1263
+ If true (default), then the object is copied. Otherwise, a copy
1264
+ will only be made if ``__array__`` returns a copy, if obj is a
1265
+ nested sequence, or if a copy is needed to satisfy any of the other
1266
+ requirements (`itemsize`, unicode, `order`, etc.).
1267
+
1268
+ unicode : bool, optional
1269
+ When true, the resulting `~numpy.char.chararray` can contain Unicode
1270
+ characters, when false only 8-bit characters. If unicode is
1271
+ None and `obj` is one of the following:
1272
+
1273
+ - a `~numpy.char.chararray`,
1274
+ - an ndarray of type :class:`str_` or :class:`bytes_`
1275
+ - a Python :class:`str` or :class:`bytes` object,
1276
+
1277
+ then the unicode setting of the output array will be
1278
+ automatically determined.
1279
+
1280
+ order : {'C', 'F', 'A'}, optional
1281
+ Specify the order of the array. If order is 'C' (default), then the
1282
+ array will be in C-contiguous order (last-index varies the
1283
+ fastest). If order is 'F', then the returned array
1284
+ will be in Fortran-contiguous order (first-index varies the
1285
+ fastest). If order is 'A', then the returned array may
1286
+ be in any order (either C-, Fortran-contiguous, or even
1287
+ discontiguous).
1288
+
1289
+ Examples
1290
+ --------
1291
+
1292
+ >>> import numpy as np
1293
+ >>> char_array = np.char.array(['hello', 'world', 'numpy','array'])
1294
+ >>> char_array
1295
+ chararray(['hello', 'world', 'numpy', 'array'], dtype='<U5')
1296
+
1297
+ """
1298
+ if isinstance(obj, (bytes, str)):
1299
+ if unicode is None:
1300
+ if isinstance(obj, str):
1301
+ unicode = True
1302
+ else:
1303
+ unicode = False
1304
+
1305
+ if itemsize is None:
1306
+ itemsize = len(obj)
1307
+ shape = len(obj) // itemsize
1308
+
1309
+ return chararray(shape, itemsize=itemsize, unicode=unicode,
1310
+ buffer=obj, order=order)
1311
+
1312
+ if isinstance(obj, (list, tuple)):
1313
+ obj = asnarray(obj)
1314
+
1315
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
1316
+ # If we just have a vanilla chararray, create a chararray
1317
+ # view around it.
1318
+ if not isinstance(obj, chararray):
1319
+ obj = obj.view(chararray)
1320
+
1321
+ if itemsize is None:
1322
+ itemsize = obj.itemsize
1323
+ # itemsize is in 8-bit chars, so for Unicode, we need
1324
+ # to divide by the size of a single Unicode character,
1325
+ # which for NumPy is always 4
1326
+ if issubclass(obj.dtype.type, str_):
1327
+ itemsize //= 4
1328
+
1329
+ if unicode is None:
1330
+ if issubclass(obj.dtype.type, str_):
1331
+ unicode = True
1332
+ else:
1333
+ unicode = False
1334
+
1335
+ if unicode:
1336
+ dtype = str_
1337
+ else:
1338
+ dtype = bytes_
1339
+
1340
+ if order is not None:
1341
+ obj = asnarray(obj, order=order)
1342
+ if (copy or
1343
+ (itemsize != obj.itemsize) or
1344
+ (not unicode and isinstance(obj, str_)) or
1345
+ (unicode and isinstance(obj, bytes_))):
1346
+ obj = obj.astype((dtype, int(itemsize)))
1347
+ return obj
1348
+
1349
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
1350
+ if itemsize is None:
1351
+ # Since no itemsize was specified, convert the input array to
1352
+ # a list so the ndarray constructor will automatically
1353
+ # determine the itemsize for us.
1354
+ obj = obj.tolist()
1355
+ # Fall through to the default case
1356
+
1357
+ if unicode:
1358
+ dtype = str_
1359
+ else:
1360
+ dtype = bytes_
1361
+
1362
+ if itemsize is None:
1363
+ val = narray(obj, dtype=dtype, order=order, subok=True)
1364
+ else:
1365
+ val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
1366
+ return val.view(chararray)
1367
+
1368
+
1369
+ @set_module("numpy.char")
1370
+ def asarray(obj, itemsize=None, unicode=None, order=None):
1371
+ """
1372
+ Convert the input to a `~numpy.char.chararray`, copying the data only if
1373
+ necessary.
1374
+
1375
+ Versus a NumPy array of dtype `bytes_` or `str_`, this
1376
+ class adds the following functionality:
1377
+
1378
+ 1) values automatically have whitespace removed from the end
1379
+ when indexed
1380
+
1381
+ 2) comparison operators automatically remove whitespace from the
1382
+ end when comparing values
1383
+
1384
+ 3) vectorized string operations are provided as methods
1385
+ (e.g. `chararray.endswith <numpy.char.chararray.endswith>`)
1386
+ and infix operators (e.g. ``+``, ``*``, ``%``)
1387
+
1388
+ Parameters
1389
+ ----------
1390
+ obj : array of str or unicode-like
1391
+
1392
+ itemsize : int, optional
1393
+ `itemsize` is the number of characters per scalar in the
1394
+ resulting array. If `itemsize` is None, and `obj` is an
1395
+ object array or a Python list, the `itemsize` will be
1396
+ automatically determined. If `itemsize` is provided and `obj`
1397
+ is of type str or unicode, then the `obj` string will be
1398
+ chunked into `itemsize` pieces.
1399
+
1400
+ unicode : bool, optional
1401
+ When true, the resulting `~numpy.char.chararray` can contain Unicode
1402
+ characters, when false only 8-bit characters. If unicode is
1403
+ None and `obj` is one of the following:
1404
+
1405
+ - a `~numpy.char.chararray`,
1406
+ - an ndarray of type `str_` or `unicode_`
1407
+ - a Python str or unicode object,
1408
+
1409
+ then the unicode setting of the output array will be
1410
+ automatically determined.
1411
+
1412
+ order : {'C', 'F'}, optional
1413
+ Specify the order of the array. If order is 'C' (default), then the
1414
+ array will be in C-contiguous order (last-index varies the
1415
+ fastest). If order is 'F', then the returned array
1416
+ will be in Fortran-contiguous order (first-index varies the
1417
+ fastest).
1418
+
1419
+ Examples
1420
+ --------
1421
+ >>> import numpy as np
1422
+ >>> np.char.asarray(['hello', 'world'])
1423
+ chararray(['hello', 'world'], dtype='<U5')
1424
+
1425
+ """
1426
+ return array(obj, itemsize, copy=False,
1427
+ unicode=unicode, order=order)
venv/lib/python3.13/site-packages/numpy/_core/defchararray.pyi ADDED
@@ -0,0 +1,1135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Self, SupportsIndex, SupportsInt, TypeAlias, overload
2
+ from typing import Literal as L
3
+
4
+ from typing_extensions import TypeVar
5
+
6
+ import numpy as np
7
+ from numpy import (
8
+ _OrderKACF,
9
+ _SupportsBuffer,
10
+ bytes_,
11
+ dtype,
12
+ int_,
13
+ ndarray,
14
+ object_,
15
+ str_,
16
+ )
17
+ from numpy._core.multiarray import compare_chararrays
18
+ from numpy._typing import NDArray, _AnyShape, _Shape, _ShapeLike, _SupportsArray
19
+ from numpy._typing import _ArrayLikeAnyString_co as UST_co
20
+ from numpy._typing import _ArrayLikeBool_co as b_co
21
+ from numpy._typing import _ArrayLikeBytes_co as S_co
22
+ from numpy._typing import _ArrayLikeInt_co as i_co
23
+ from numpy._typing import _ArrayLikeStr_co as U_co
24
+ from numpy._typing import _ArrayLikeString_co as T_co
25
+
26
+ __all__ = [
27
+ "equal",
28
+ "not_equal",
29
+ "greater_equal",
30
+ "less_equal",
31
+ "greater",
32
+ "less",
33
+ "str_len",
34
+ "add",
35
+ "multiply",
36
+ "mod",
37
+ "capitalize",
38
+ "center",
39
+ "count",
40
+ "decode",
41
+ "encode",
42
+ "endswith",
43
+ "expandtabs",
44
+ "find",
45
+ "index",
46
+ "isalnum",
47
+ "isalpha",
48
+ "isdigit",
49
+ "islower",
50
+ "isspace",
51
+ "istitle",
52
+ "isupper",
53
+ "join",
54
+ "ljust",
55
+ "lower",
56
+ "lstrip",
57
+ "partition",
58
+ "replace",
59
+ "rfind",
60
+ "rindex",
61
+ "rjust",
62
+ "rpartition",
63
+ "rsplit",
64
+ "rstrip",
65
+ "split",
66
+ "splitlines",
67
+ "startswith",
68
+ "strip",
69
+ "swapcase",
70
+ "title",
71
+ "translate",
72
+ "upper",
73
+ "zfill",
74
+ "isnumeric",
75
+ "isdecimal",
76
+ "array",
77
+ "asarray",
78
+ "compare_chararrays",
79
+ "chararray",
80
+ ]
81
+
82
+ _ShapeT_co = TypeVar("_ShapeT_co", bound=_Shape, default=_AnyShape, covariant=True)
83
+ _CharacterT = TypeVar("_CharacterT", bound=np.character)
84
+ _CharDTypeT_co = TypeVar("_CharDTypeT_co", bound=dtype[np.character], default=dtype, covariant=True)
85
+
86
+ _CharArray: TypeAlias = chararray[_AnyShape, dtype[_CharacterT]]
87
+
88
+ _StringDTypeArray: TypeAlias = np.ndarray[_AnyShape, np.dtypes.StringDType]
89
+ _StringDTypeOrUnicodeArray: TypeAlias = _StringDTypeArray | NDArray[np.str_]
90
+ _StringDTypeSupportsArray: TypeAlias = _SupportsArray[np.dtypes.StringDType]
91
+
92
+ class chararray(ndarray[_ShapeT_co, _CharDTypeT_co]):
93
+ @overload
94
+ def __new__(
95
+ subtype,
96
+ shape: _ShapeLike,
97
+ itemsize: SupportsIndex | SupportsInt = ...,
98
+ unicode: L[False] = ...,
99
+ buffer: _SupportsBuffer = ...,
100
+ offset: SupportsIndex = ...,
101
+ strides: _ShapeLike = ...,
102
+ order: _OrderKACF = ...,
103
+ ) -> _CharArray[bytes_]: ...
104
+ @overload
105
+ def __new__(
106
+ subtype,
107
+ shape: _ShapeLike,
108
+ itemsize: SupportsIndex | SupportsInt = ...,
109
+ unicode: L[True] = ...,
110
+ buffer: _SupportsBuffer = ...,
111
+ offset: SupportsIndex = ...,
112
+ strides: _ShapeLike = ...,
113
+ order: _OrderKACF = ...,
114
+ ) -> _CharArray[str_]: ...
115
+
116
+ def __array_finalize__(self, obj: object) -> None: ...
117
+ def __mul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...
118
+ def __rmul__(self, other: i_co) -> chararray[_AnyShape, _CharDTypeT_co]: ...
119
+ def __mod__(self, i: Any) -> chararray[_AnyShape, _CharDTypeT_co]: ...
120
+
121
+ @overload
122
+ def __eq__(
123
+ self: _CharArray[str_],
124
+ other: U_co,
125
+ ) -> NDArray[np.bool]: ...
126
+ @overload
127
+ def __eq__(
128
+ self: _CharArray[bytes_],
129
+ other: S_co,
130
+ ) -> NDArray[np.bool]: ...
131
+
132
+ @overload
133
+ def __ne__(
134
+ self: _CharArray[str_],
135
+ other: U_co,
136
+ ) -> NDArray[np.bool]: ...
137
+ @overload
138
+ def __ne__(
139
+ self: _CharArray[bytes_],
140
+ other: S_co,
141
+ ) -> NDArray[np.bool]: ...
142
+
143
+ @overload
144
+ def __ge__(
145
+ self: _CharArray[str_],
146
+ other: U_co,
147
+ ) -> NDArray[np.bool]: ...
148
+ @overload
149
+ def __ge__(
150
+ self: _CharArray[bytes_],
151
+ other: S_co,
152
+ ) -> NDArray[np.bool]: ...
153
+
154
+ @overload
155
+ def __le__(
156
+ self: _CharArray[str_],
157
+ other: U_co,
158
+ ) -> NDArray[np.bool]: ...
159
+ @overload
160
+ def __le__(
161
+ self: _CharArray[bytes_],
162
+ other: S_co,
163
+ ) -> NDArray[np.bool]: ...
164
+
165
+ @overload
166
+ def __gt__(
167
+ self: _CharArray[str_],
168
+ other: U_co,
169
+ ) -> NDArray[np.bool]: ...
170
+ @overload
171
+ def __gt__(
172
+ self: _CharArray[bytes_],
173
+ other: S_co,
174
+ ) -> NDArray[np.bool]: ...
175
+
176
+ @overload
177
+ def __lt__(
178
+ self: _CharArray[str_],
179
+ other: U_co,
180
+ ) -> NDArray[np.bool]: ...
181
+ @overload
182
+ def __lt__(
183
+ self: _CharArray[bytes_],
184
+ other: S_co,
185
+ ) -> NDArray[np.bool]: ...
186
+
187
+ @overload
188
+ def __add__(
189
+ self: _CharArray[str_],
190
+ other: U_co,
191
+ ) -> _CharArray[str_]: ...
192
+ @overload
193
+ def __add__(
194
+ self: _CharArray[bytes_],
195
+ other: S_co,
196
+ ) -> _CharArray[bytes_]: ...
197
+
198
+ @overload
199
+ def __radd__(
200
+ self: _CharArray[str_],
201
+ other: U_co,
202
+ ) -> _CharArray[str_]: ...
203
+ @overload
204
+ def __radd__(
205
+ self: _CharArray[bytes_],
206
+ other: S_co,
207
+ ) -> _CharArray[bytes_]: ...
208
+
209
+ @overload
210
+ def center(
211
+ self: _CharArray[str_],
212
+ width: i_co,
213
+ fillchar: U_co = ...,
214
+ ) -> _CharArray[str_]: ...
215
+ @overload
216
+ def center(
217
+ self: _CharArray[bytes_],
218
+ width: i_co,
219
+ fillchar: S_co = ...,
220
+ ) -> _CharArray[bytes_]: ...
221
+
222
+ @overload
223
+ def count(
224
+ self: _CharArray[str_],
225
+ sub: U_co,
226
+ start: i_co = ...,
227
+ end: i_co | None = ...,
228
+ ) -> NDArray[int_]: ...
229
+ @overload
230
+ def count(
231
+ self: _CharArray[bytes_],
232
+ sub: S_co,
233
+ start: i_co = ...,
234
+ end: i_co | None = ...,
235
+ ) -> NDArray[int_]: ...
236
+
237
+ def decode(
238
+ self: _CharArray[bytes_],
239
+ encoding: str | None = ...,
240
+ errors: str | None = ...,
241
+ ) -> _CharArray[str_]: ...
242
+
243
+ def encode(
244
+ self: _CharArray[str_],
245
+ encoding: str | None = ...,
246
+ errors: str | None = ...,
247
+ ) -> _CharArray[bytes_]: ...
248
+
249
+ @overload
250
+ def endswith(
251
+ self: _CharArray[str_],
252
+ suffix: U_co,
253
+ start: i_co = ...,
254
+ end: i_co | None = ...,
255
+ ) -> NDArray[np.bool]: ...
256
+ @overload
257
+ def endswith(
258
+ self: _CharArray[bytes_],
259
+ suffix: S_co,
260
+ start: i_co = ...,
261
+ end: i_co | None = ...,
262
+ ) -> NDArray[np.bool]: ...
263
+
264
+ def expandtabs(
265
+ self,
266
+ tabsize: i_co = ...,
267
+ ) -> Self: ...
268
+
269
+ @overload
270
+ def find(
271
+ self: _CharArray[str_],
272
+ sub: U_co,
273
+ start: i_co = ...,
274
+ end: i_co | None = ...,
275
+ ) -> NDArray[int_]: ...
276
+ @overload
277
+ def find(
278
+ self: _CharArray[bytes_],
279
+ sub: S_co,
280
+ start: i_co = ...,
281
+ end: i_co | None = ...,
282
+ ) -> NDArray[int_]: ...
283
+
284
+ @overload
285
+ def index(
286
+ self: _CharArray[str_],
287
+ sub: U_co,
288
+ start: i_co = ...,
289
+ end: i_co | None = ...,
290
+ ) -> NDArray[int_]: ...
291
+ @overload
292
+ def index(
293
+ self: _CharArray[bytes_],
294
+ sub: S_co,
295
+ start: i_co = ...,
296
+ end: i_co | None = ...,
297
+ ) -> NDArray[int_]: ...
298
+
299
+ @overload
300
+ def join(
301
+ self: _CharArray[str_],
302
+ seq: U_co,
303
+ ) -> _CharArray[str_]: ...
304
+ @overload
305
+ def join(
306
+ self: _CharArray[bytes_],
307
+ seq: S_co,
308
+ ) -> _CharArray[bytes_]: ...
309
+
310
+ @overload
311
+ def ljust(
312
+ self: _CharArray[str_],
313
+ width: i_co,
314
+ fillchar: U_co = ...,
315
+ ) -> _CharArray[str_]: ...
316
+ @overload
317
+ def ljust(
318
+ self: _CharArray[bytes_],
319
+ width: i_co,
320
+ fillchar: S_co = ...,
321
+ ) -> _CharArray[bytes_]: ...
322
+
323
+ @overload
324
+ def lstrip(
325
+ self: _CharArray[str_],
326
+ chars: U_co | None = ...,
327
+ ) -> _CharArray[str_]: ...
328
+ @overload
329
+ def lstrip(
330
+ self: _CharArray[bytes_],
331
+ chars: S_co | None = ...,
332
+ ) -> _CharArray[bytes_]: ...
333
+
334
+ @overload
335
+ def partition(
336
+ self: _CharArray[str_],
337
+ sep: U_co,
338
+ ) -> _CharArray[str_]: ...
339
+ @overload
340
+ def partition(
341
+ self: _CharArray[bytes_],
342
+ sep: S_co,
343
+ ) -> _CharArray[bytes_]: ...
344
+
345
+ @overload
346
+ def replace(
347
+ self: _CharArray[str_],
348
+ old: U_co,
349
+ new: U_co,
350
+ count: i_co | None = ...,
351
+ ) -> _CharArray[str_]: ...
352
+ @overload
353
+ def replace(
354
+ self: _CharArray[bytes_],
355
+ old: S_co,
356
+ new: S_co,
357
+ count: i_co | None = ...,
358
+ ) -> _CharArray[bytes_]: ...
359
+
360
+ @overload
361
+ def rfind(
362
+ self: _CharArray[str_],
363
+ sub: U_co,
364
+ start: i_co = ...,
365
+ end: i_co | None = ...,
366
+ ) -> NDArray[int_]: ...
367
+ @overload
368
+ def rfind(
369
+ self: _CharArray[bytes_],
370
+ sub: S_co,
371
+ start: i_co = ...,
372
+ end: i_co | None = ...,
373
+ ) -> NDArray[int_]: ...
374
+
375
+ @overload
376
+ def rindex(
377
+ self: _CharArray[str_],
378
+ sub: U_co,
379
+ start: i_co = ...,
380
+ end: i_co | None = ...,
381
+ ) -> NDArray[int_]: ...
382
+ @overload
383
+ def rindex(
384
+ self: _CharArray[bytes_],
385
+ sub: S_co,
386
+ start: i_co = ...,
387
+ end: i_co | None = ...,
388
+ ) -> NDArray[int_]: ...
389
+
390
+ @overload
391
+ def rjust(
392
+ self: _CharArray[str_],
393
+ width: i_co,
394
+ fillchar: U_co = ...,
395
+ ) -> _CharArray[str_]: ...
396
+ @overload
397
+ def rjust(
398
+ self: _CharArray[bytes_],
399
+ width: i_co,
400
+ fillchar: S_co = ...,
401
+ ) -> _CharArray[bytes_]: ...
402
+
403
+ @overload
404
+ def rpartition(
405
+ self: _CharArray[str_],
406
+ sep: U_co,
407
+ ) -> _CharArray[str_]: ...
408
+ @overload
409
+ def rpartition(
410
+ self: _CharArray[bytes_],
411
+ sep: S_co,
412
+ ) -> _CharArray[bytes_]: ...
413
+
414
+ @overload
415
+ def rsplit(
416
+ self: _CharArray[str_],
417
+ sep: U_co | None = ...,
418
+ maxsplit: i_co | None = ...,
419
+ ) -> NDArray[object_]: ...
420
+ @overload
421
+ def rsplit(
422
+ self: _CharArray[bytes_],
423
+ sep: S_co | None = ...,
424
+ maxsplit: i_co | None = ...,
425
+ ) -> NDArray[object_]: ...
426
+
427
+ @overload
428
+ def rstrip(
429
+ self: _CharArray[str_],
430
+ chars: U_co | None = ...,
431
+ ) -> _CharArray[str_]: ...
432
+ @overload
433
+ def rstrip(
434
+ self: _CharArray[bytes_],
435
+ chars: S_co | None = ...,
436
+ ) -> _CharArray[bytes_]: ...
437
+
438
+ @overload
439
+ def split(
440
+ self: _CharArray[str_],
441
+ sep: U_co | None = ...,
442
+ maxsplit: i_co | None = ...,
443
+ ) -> NDArray[object_]: ...
444
+ @overload
445
+ def split(
446
+ self: _CharArray[bytes_],
447
+ sep: S_co | None = ...,
448
+ maxsplit: i_co | None = ...,
449
+ ) -> NDArray[object_]: ...
450
+
451
+ def splitlines(self, keepends: b_co | None = ...) -> NDArray[object_]: ...
452
+
453
+ @overload
454
+ def startswith(
455
+ self: _CharArray[str_],
456
+ prefix: U_co,
457
+ start: i_co = ...,
458
+ end: i_co | None = ...,
459
+ ) -> NDArray[np.bool]: ...
460
+ @overload
461
+ def startswith(
462
+ self: _CharArray[bytes_],
463
+ prefix: S_co,
464
+ start: i_co = ...,
465
+ end: i_co | None = ...,
466
+ ) -> NDArray[np.bool]: ...
467
+
468
+ @overload
469
+ def strip(
470
+ self: _CharArray[str_],
471
+ chars: U_co | None = ...,
472
+ ) -> _CharArray[str_]: ...
473
+ @overload
474
+ def strip(
475
+ self: _CharArray[bytes_],
476
+ chars: S_co | None = ...,
477
+ ) -> _CharArray[bytes_]: ...
478
+
479
+ @overload
480
+ def translate(
481
+ self: _CharArray[str_],
482
+ table: U_co,
483
+ deletechars: U_co | None = ...,
484
+ ) -> _CharArray[str_]: ...
485
+ @overload
486
+ def translate(
487
+ self: _CharArray[bytes_],
488
+ table: S_co,
489
+ deletechars: S_co | None = ...,
490
+ ) -> _CharArray[bytes_]: ...
491
+
492
+ def zfill(self, width: i_co) -> Self: ...
493
+ def capitalize(self) -> Self: ...
494
+ def title(self) -> Self: ...
495
+ def swapcase(self) -> Self: ...
496
+ def lower(self) -> Self: ...
497
+ def upper(self) -> Self: ...
498
+ def isalnum(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
499
+ def isalpha(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
500
+ def isdigit(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
501
+ def islower(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
502
+ def isspace(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
503
+ def istitle(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
504
+ def isupper(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
505
+ def isnumeric(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
506
+ def isdecimal(self) -> ndarray[_ShapeT_co, dtype[np.bool]]: ...
507
+
508
+ # Comparison
509
+ @overload
510
+ def equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
511
+ @overload
512
+ def equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
513
+ @overload
514
+ def equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
515
+
516
+ @overload
517
+ def not_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
518
+ @overload
519
+ def not_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
520
+ @overload
521
+ def not_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
522
+
523
+ @overload
524
+ def greater_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
525
+ @overload
526
+ def greater_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
527
+ @overload
528
+ def greater_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
529
+
530
+ @overload
531
+ def less_equal(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
532
+ @overload
533
+ def less_equal(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
534
+ @overload
535
+ def less_equal(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
536
+
537
+ @overload
538
+ def greater(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
539
+ @overload
540
+ def greater(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
541
+ @overload
542
+ def greater(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
543
+
544
+ @overload
545
+ def less(x1: U_co, x2: U_co) -> NDArray[np.bool]: ...
546
+ @overload
547
+ def less(x1: S_co, x2: S_co) -> NDArray[np.bool]: ...
548
+ @overload
549
+ def less(x1: T_co, x2: T_co) -> NDArray[np.bool]: ...
550
+
551
+ @overload
552
+ def add(x1: U_co, x2: U_co) -> NDArray[np.str_]: ...
553
+ @overload
554
+ def add(x1: S_co, x2: S_co) -> NDArray[np.bytes_]: ...
555
+ @overload
556
+ def add(x1: _StringDTypeSupportsArray, x2: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
557
+ @overload
558
+ def add(x1: T_co, x2: T_co) -> _StringDTypeOrUnicodeArray: ...
559
+
560
+ @overload
561
+ def multiply(a: U_co, i: i_co) -> NDArray[np.str_]: ...
562
+ @overload
563
+ def multiply(a: S_co, i: i_co) -> NDArray[np.bytes_]: ...
564
+ @overload
565
+ def multiply(a: _StringDTypeSupportsArray, i: i_co) -> _StringDTypeArray: ...
566
+ @overload
567
+ def multiply(a: T_co, i: i_co) -> _StringDTypeOrUnicodeArray: ...
568
+
569
+ @overload
570
+ def mod(a: U_co, value: Any) -> NDArray[np.str_]: ...
571
+ @overload
572
+ def mod(a: S_co, value: Any) -> NDArray[np.bytes_]: ...
573
+ @overload
574
+ def mod(a: _StringDTypeSupportsArray, value: Any) -> _StringDTypeArray: ...
575
+ @overload
576
+ def mod(a: T_co, value: Any) -> _StringDTypeOrUnicodeArray: ...
577
+
578
+ @overload
579
+ def capitalize(a: U_co) -> NDArray[str_]: ...
580
+ @overload
581
+ def capitalize(a: S_co) -> NDArray[bytes_]: ...
582
+ @overload
583
+ def capitalize(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
584
+ @overload
585
+ def capitalize(a: T_co) -> _StringDTypeOrUnicodeArray: ...
586
+
587
+ @overload
588
+ def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
589
+ @overload
590
+ def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
591
+ @overload
592
+ def center(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
593
+ @overload
594
+ def center(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
595
+
596
+ def decode(
597
+ a: S_co,
598
+ encoding: str | None = ...,
599
+ errors: str | None = ...,
600
+ ) -> NDArray[str_]: ...
601
+ def encode(
602
+ a: U_co | T_co,
603
+ encoding: str | None = ...,
604
+ errors: str | None = ...,
605
+ ) -> NDArray[bytes_]: ...
606
+
607
+ @overload
608
+ def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
609
+ @overload
610
+ def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
611
+ @overload
612
+ def expandtabs(a: _StringDTypeSupportsArray, tabsize: i_co = ...) -> _StringDTypeArray: ...
613
+ @overload
614
+ def expandtabs(a: T_co, tabsize: i_co = ...) -> _StringDTypeOrUnicodeArray: ...
615
+
616
+ @overload
617
+ def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
618
+ @overload
619
+ def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
620
+ @overload
621
+ def join(sep: _StringDTypeSupportsArray, seq: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
622
+ @overload
623
+ def join(sep: T_co, seq: T_co) -> _StringDTypeOrUnicodeArray: ...
624
+
625
+ @overload
626
+ def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
627
+ @overload
628
+ def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
629
+ @overload
630
+ def ljust(a: _StringDTypeSupportsArray, width: i_co, fillchar: _StringDTypeSupportsArray = ...) -> _StringDTypeArray: ...
631
+ @overload
632
+ def ljust(a: T_co, width: i_co, fillchar: T_co = ...) -> _StringDTypeOrUnicodeArray: ...
633
+
634
+ @overload
635
+ def lower(a: U_co) -> NDArray[str_]: ...
636
+ @overload
637
+ def lower(a: S_co) -> NDArray[bytes_]: ...
638
+ @overload
639
+ def lower(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
640
+ @overload
641
+ def lower(a: T_co) -> _StringDTypeOrUnicodeArray: ...
642
+
643
+ @overload
644
+ def lstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
645
+ @overload
646
+ def lstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
647
+ @overload
648
+ def lstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
649
+ @overload
650
+ def lstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
651
+
652
+ @overload
653
+ def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
654
+ @overload
655
+ def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
656
+ @overload
657
+ def partition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
658
+ @overload
659
+ def partition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
660
+
661
+ @overload
662
+ def replace(
663
+ a: U_co,
664
+ old: U_co,
665
+ new: U_co,
666
+ count: i_co | None = ...,
667
+ ) -> NDArray[str_]: ...
668
+ @overload
669
+ def replace(
670
+ a: S_co,
671
+ old: S_co,
672
+ new: S_co,
673
+ count: i_co | None = ...,
674
+ ) -> NDArray[bytes_]: ...
675
+ @overload
676
+ def replace(
677
+ a: _StringDTypeSupportsArray,
678
+ old: _StringDTypeSupportsArray,
679
+ new: _StringDTypeSupportsArray,
680
+ count: i_co = ...,
681
+ ) -> _StringDTypeArray: ...
682
+ @overload
683
+ def replace(
684
+ a: T_co,
685
+ old: T_co,
686
+ new: T_co,
687
+ count: i_co = ...,
688
+ ) -> _StringDTypeOrUnicodeArray: ...
689
+
690
+ @overload
691
+ def rjust(
692
+ a: U_co,
693
+ width: i_co,
694
+ fillchar: U_co = ...,
695
+ ) -> NDArray[str_]: ...
696
+ @overload
697
+ def rjust(
698
+ a: S_co,
699
+ width: i_co,
700
+ fillchar: S_co = ...,
701
+ ) -> NDArray[bytes_]: ...
702
+ @overload
703
+ def rjust(
704
+ a: _StringDTypeSupportsArray,
705
+ width: i_co,
706
+ fillchar: _StringDTypeSupportsArray = ...,
707
+ ) -> _StringDTypeArray: ...
708
+ @overload
709
+ def rjust(
710
+ a: T_co,
711
+ width: i_co,
712
+ fillchar: T_co = ...,
713
+ ) -> _StringDTypeOrUnicodeArray: ...
714
+
715
+ @overload
716
+ def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
717
+ @overload
718
+ def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
719
+ @overload
720
+ def rpartition(a: _StringDTypeSupportsArray, sep: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
721
+ @overload
722
+ def rpartition(a: T_co, sep: T_co) -> _StringDTypeOrUnicodeArray: ...
723
+
724
+ @overload
725
+ def rsplit(
726
+ a: U_co,
727
+ sep: U_co | None = ...,
728
+ maxsplit: i_co | None = ...,
729
+ ) -> NDArray[object_]: ...
730
+ @overload
731
+ def rsplit(
732
+ a: S_co,
733
+ sep: S_co | None = ...,
734
+ maxsplit: i_co | None = ...,
735
+ ) -> NDArray[object_]: ...
736
+ @overload
737
+ def rsplit(
738
+ a: _StringDTypeSupportsArray,
739
+ sep: _StringDTypeSupportsArray | None = ...,
740
+ maxsplit: i_co | None = ...,
741
+ ) -> NDArray[object_]: ...
742
+ @overload
743
+ def rsplit(
744
+ a: T_co,
745
+ sep: T_co | None = ...,
746
+ maxsplit: i_co | None = ...,
747
+ ) -> NDArray[object_]: ...
748
+
749
+ @overload
750
+ def rstrip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
751
+ @overload
752
+ def rstrip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
753
+ @overload
754
+ def rstrip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
755
+ @overload
756
+ def rstrip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
757
+
758
+ @overload
759
+ def split(
760
+ a: U_co,
761
+ sep: U_co | None = ...,
762
+ maxsplit: i_co | None = ...,
763
+ ) -> NDArray[object_]: ...
764
+ @overload
765
+ def split(
766
+ a: S_co,
767
+ sep: S_co | None = ...,
768
+ maxsplit: i_co | None = ...,
769
+ ) -> NDArray[object_]: ...
770
+ @overload
771
+ def split(
772
+ a: _StringDTypeSupportsArray,
773
+ sep: _StringDTypeSupportsArray | None = ...,
774
+ maxsplit: i_co | None = ...,
775
+ ) -> NDArray[object_]: ...
776
+ @overload
777
+ def split(
778
+ a: T_co,
779
+ sep: T_co | None = ...,
780
+ maxsplit: i_co | None = ...,
781
+ ) -> NDArray[object_]: ...
782
+
783
+ def splitlines(a: UST_co, keepends: b_co | None = ...) -> NDArray[np.object_]: ...
784
+
785
+ @overload
786
+ def strip(a: U_co, chars: U_co | None = ...) -> NDArray[str_]: ...
787
+ @overload
788
+ def strip(a: S_co, chars: S_co | None = ...) -> NDArray[bytes_]: ...
789
+ @overload
790
+ def strip(a: _StringDTypeSupportsArray, chars: _StringDTypeSupportsArray | None = ...) -> _StringDTypeArray: ...
791
+ @overload
792
+ def strip(a: T_co, chars: T_co | None = ...) -> _StringDTypeOrUnicodeArray: ...
793
+
794
+ @overload
795
+ def swapcase(a: U_co) -> NDArray[str_]: ...
796
+ @overload
797
+ def swapcase(a: S_co) -> NDArray[bytes_]: ...
798
+ @overload
799
+ def swapcase(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
800
+ @overload
801
+ def swapcase(a: T_co) -> _StringDTypeOrUnicodeArray: ...
802
+
803
+ @overload
804
+ def title(a: U_co) -> NDArray[str_]: ...
805
+ @overload
806
+ def title(a: S_co) -> NDArray[bytes_]: ...
807
+ @overload
808
+ def title(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
809
+ @overload
810
+ def title(a: T_co) -> _StringDTypeOrUnicodeArray: ...
811
+
812
+ @overload
813
+ def translate(
814
+ a: U_co,
815
+ table: str,
816
+ deletechars: str | None = ...,
817
+ ) -> NDArray[str_]: ...
818
+ @overload
819
+ def translate(
820
+ a: S_co,
821
+ table: str,
822
+ deletechars: str | None = ...,
823
+ ) -> NDArray[bytes_]: ...
824
+ @overload
825
+ def translate(
826
+ a: _StringDTypeSupportsArray,
827
+ table: str,
828
+ deletechars: str | None = ...,
829
+ ) -> _StringDTypeArray: ...
830
+ @overload
831
+ def translate(
832
+ a: T_co,
833
+ table: str,
834
+ deletechars: str | None = ...,
835
+ ) -> _StringDTypeOrUnicodeArray: ...
836
+
837
+ @overload
838
+ def upper(a: U_co) -> NDArray[str_]: ...
839
+ @overload
840
+ def upper(a: S_co) -> NDArray[bytes_]: ...
841
+ @overload
842
+ def upper(a: _StringDTypeSupportsArray) -> _StringDTypeArray: ...
843
+ @overload
844
+ def upper(a: T_co) -> _StringDTypeOrUnicodeArray: ...
845
+
846
+ @overload
847
+ def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
848
+ @overload
849
+ def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
850
+ @overload
851
+ def zfill(a: _StringDTypeSupportsArray, width: i_co) -> _StringDTypeArray: ...
852
+ @overload
853
+ def zfill(a: T_co, width: i_co) -> _StringDTypeOrUnicodeArray: ...
854
+
855
+ # String information
856
+ @overload
857
+ def count(
858
+ a: U_co,
859
+ sub: U_co,
860
+ start: i_co = ...,
861
+ end: i_co | None = ...,
862
+ ) -> NDArray[int_]: ...
863
+ @overload
864
+ def count(
865
+ a: S_co,
866
+ sub: S_co,
867
+ start: i_co = ...,
868
+ end: i_co | None = ...,
869
+ ) -> NDArray[int_]: ...
870
+ @overload
871
+ def count(
872
+ a: T_co,
873
+ sub: T_co,
874
+ start: i_co = ...,
875
+ end: i_co | None = ...,
876
+ ) -> NDArray[np.int_]: ...
877
+
878
+ @overload
879
+ def endswith(
880
+ a: U_co,
881
+ suffix: U_co,
882
+ start: i_co = ...,
883
+ end: i_co | None = ...,
884
+ ) -> NDArray[np.bool]: ...
885
+ @overload
886
+ def endswith(
887
+ a: S_co,
888
+ suffix: S_co,
889
+ start: i_co = ...,
890
+ end: i_co | None = ...,
891
+ ) -> NDArray[np.bool]: ...
892
+ @overload
893
+ def endswith(
894
+ a: T_co,
895
+ suffix: T_co,
896
+ start: i_co = ...,
897
+ end: i_co | None = ...,
898
+ ) -> NDArray[np.bool]: ...
899
+
900
+ @overload
901
+ def find(
902
+ a: U_co,
903
+ sub: U_co,
904
+ start: i_co = ...,
905
+ end: i_co | None = ...,
906
+ ) -> NDArray[int_]: ...
907
+ @overload
908
+ def find(
909
+ a: S_co,
910
+ sub: S_co,
911
+ start: i_co = ...,
912
+ end: i_co | None = ...,
913
+ ) -> NDArray[int_]: ...
914
+ @overload
915
+ def find(
916
+ a: T_co,
917
+ sub: T_co,
918
+ start: i_co = ...,
919
+ end: i_co | None = ...,
920
+ ) -> NDArray[np.int_]: ...
921
+
922
+ @overload
923
+ def index(
924
+ a: U_co,
925
+ sub: U_co,
926
+ start: i_co = ...,
927
+ end: i_co | None = ...,
928
+ ) -> NDArray[int_]: ...
929
+ @overload
930
+ def index(
931
+ a: S_co,
932
+ sub: S_co,
933
+ start: i_co = ...,
934
+ end: i_co | None = ...,
935
+ ) -> NDArray[int_]: ...
936
+ @overload
937
+ def index(
938
+ a: T_co,
939
+ sub: T_co,
940
+ start: i_co = ...,
941
+ end: i_co | None = ...,
942
+ ) -> NDArray[np.int_]: ...
943
+
944
+ def isalpha(a: UST_co) -> NDArray[np.bool]: ...
945
+ def isalnum(a: UST_co) -> NDArray[np.bool]: ...
946
+ def isdecimal(a: U_co | T_co) -> NDArray[np.bool]: ...
947
+ def isdigit(a: UST_co) -> NDArray[np.bool]: ...
948
+ def islower(a: UST_co) -> NDArray[np.bool]: ...
949
+ def isnumeric(a: U_co | T_co) -> NDArray[np.bool]: ...
950
+ def isspace(a: UST_co) -> NDArray[np.bool]: ...
951
+ def istitle(a: UST_co) -> NDArray[np.bool]: ...
952
+ def isupper(a: UST_co) -> NDArray[np.bool]: ...
953
+
954
+ @overload
955
+ def rfind(
956
+ a: U_co,
957
+ sub: U_co,
958
+ start: i_co = ...,
959
+ end: i_co | None = ...,
960
+ ) -> NDArray[int_]: ...
961
+ @overload
962
+ def rfind(
963
+ a: S_co,
964
+ sub: S_co,
965
+ start: i_co = ...,
966
+ end: i_co | None = ...,
967
+ ) -> NDArray[int_]: ...
968
+ @overload
969
+ def rfind(
970
+ a: T_co,
971
+ sub: T_co,
972
+ start: i_co = ...,
973
+ end: i_co | None = ...,
974
+ ) -> NDArray[np.int_]: ...
975
+
976
+ @overload
977
+ def rindex(
978
+ a: U_co,
979
+ sub: U_co,
980
+ start: i_co = ...,
981
+ end: i_co | None = ...,
982
+ ) -> NDArray[int_]: ...
983
+ @overload
984
+ def rindex(
985
+ a: S_co,
986
+ sub: S_co,
987
+ start: i_co = ...,
988
+ end: i_co | None = ...,
989
+ ) -> NDArray[int_]: ...
990
+ @overload
991
+ def rindex(
992
+ a: T_co,
993
+ sub: T_co,
994
+ start: i_co = ...,
995
+ end: i_co | None = ...,
996
+ ) -> NDArray[np.int_]: ...
997
+
998
+ @overload
999
+ def startswith(
1000
+ a: U_co,
1001
+ prefix: U_co,
1002
+ start: i_co = ...,
1003
+ end: i_co | None = ...,
1004
+ ) -> NDArray[np.bool]: ...
1005
+ @overload
1006
+ def startswith(
1007
+ a: S_co,
1008
+ prefix: S_co,
1009
+ start: i_co = ...,
1010
+ end: i_co | None = ...,
1011
+ ) -> NDArray[np.bool]: ...
1012
+ @overload
1013
+ def startswith(
1014
+ a: T_co,
1015
+ prefix: T_co,
1016
+ start: i_co = 0,
1017
+ end: i_co | None = None,
1018
+ ) -> NDArray[np.bool]: ...
1019
+
1020
+ def str_len(A: UST_co) -> NDArray[int_]: ...
1021
+
1022
+ # Overload 1 and 2: str- or bytes-based array-likes
1023
+ # overload 3 and 4: arbitrary object with unicode=False (-> bytes_)
1024
+ # overload 5 and 6: arbitrary object with unicode=True (-> str_)
1025
+ # overload 7: arbitrary object with unicode=None (default) (-> str_ | bytes_)
1026
+ @overload
1027
+ def array(
1028
+ obj: U_co,
1029
+ itemsize: int | None = ...,
1030
+ copy: bool = ...,
1031
+ unicode: L[True] | None = ...,
1032
+ order: _OrderKACF = ...,
1033
+ ) -> _CharArray[str_]: ...
1034
+ @overload
1035
+ def array(
1036
+ obj: S_co,
1037
+ itemsize: int | None = ...,
1038
+ copy: bool = ...,
1039
+ unicode: L[False] | None = ...,
1040
+ order: _OrderKACF = ...,
1041
+ ) -> _CharArray[bytes_]: ...
1042
+ @overload
1043
+ def array(
1044
+ obj: object,
1045
+ itemsize: int | None,
1046
+ copy: bool,
1047
+ unicode: L[False],
1048
+ order: _OrderKACF = ...,
1049
+ ) -> _CharArray[bytes_]: ...
1050
+ @overload
1051
+ def array(
1052
+ obj: object,
1053
+ itemsize: int | None = ...,
1054
+ copy: bool = ...,
1055
+ *,
1056
+ unicode: L[False],
1057
+ order: _OrderKACF = ...,
1058
+ ) -> _CharArray[bytes_]: ...
1059
+ @overload
1060
+ def array(
1061
+ obj: object,
1062
+ itemsize: int | None,
1063
+ copy: bool,
1064
+ unicode: L[True],
1065
+ order: _OrderKACF = ...,
1066
+ ) -> _CharArray[str_]: ...
1067
+ @overload
1068
+ def array(
1069
+ obj: object,
1070
+ itemsize: int | None = ...,
1071
+ copy: bool = ...,
1072
+ *,
1073
+ unicode: L[True],
1074
+ order: _OrderKACF = ...,
1075
+ ) -> _CharArray[str_]: ...
1076
+ @overload
1077
+ def array(
1078
+ obj: object,
1079
+ itemsize: int | None = ...,
1080
+ copy: bool = ...,
1081
+ unicode: bool | None = ...,
1082
+ order: _OrderKACF = ...,
1083
+ ) -> _CharArray[str_] | _CharArray[bytes_]: ...
1084
+
1085
+ @overload
1086
+ def asarray(
1087
+ obj: U_co,
1088
+ itemsize: int | None = ...,
1089
+ unicode: L[True] | None = ...,
1090
+ order: _OrderKACF = ...,
1091
+ ) -> _CharArray[str_]: ...
1092
+ @overload
1093
+ def asarray(
1094
+ obj: S_co,
1095
+ itemsize: int | None = ...,
1096
+ unicode: L[False] | None = ...,
1097
+ order: _OrderKACF = ...,
1098
+ ) -> _CharArray[bytes_]: ...
1099
+ @overload
1100
+ def asarray(
1101
+ obj: object,
1102
+ itemsize: int | None,
1103
+ unicode: L[False],
1104
+ order: _OrderKACF = ...,
1105
+ ) -> _CharArray[bytes_]: ...
1106
+ @overload
1107
+ def asarray(
1108
+ obj: object,
1109
+ itemsize: int | None = ...,
1110
+ *,
1111
+ unicode: L[False],
1112
+ order: _OrderKACF = ...,
1113
+ ) -> _CharArray[bytes_]: ...
1114
+ @overload
1115
+ def asarray(
1116
+ obj: object,
1117
+ itemsize: int | None,
1118
+ unicode: L[True],
1119
+ order: _OrderKACF = ...,
1120
+ ) -> _CharArray[str_]: ...
1121
+ @overload
1122
+ def asarray(
1123
+ obj: object,
1124
+ itemsize: int | None = ...,
1125
+ *,
1126
+ unicode: L[True],
1127
+ order: _OrderKACF = ...,
1128
+ ) -> _CharArray[str_]: ...
1129
+ @overload
1130
+ def asarray(
1131
+ obj: object,
1132
+ itemsize: int | None = ...,
1133
+ unicode: bool | None = ...,
1134
+ order: _OrderKACF = ...,
1135
+ ) -> _CharArray[str_] | _CharArray[bytes_]: ...
venv/lib/python3.13/site-packages/numpy/_core/einsumfunc.py ADDED
@@ -0,0 +1,1498 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of optimized einsum.
3
+
4
+ """
5
+ import itertools
6
+ import operator
7
+
8
+ from numpy._core.multiarray import c_einsum
9
+ from numpy._core.numeric import asanyarray, tensordot
10
+ from numpy._core.overrides import array_function_dispatch
11
+
12
+ __all__ = ['einsum', 'einsum_path']
13
+
14
+ # importing string for string.ascii_letters would be too slow
15
+ # the first import before caching has been measured to take 800 µs (#23777)
16
+ # imports begin with uppercase to mimic ASCII values to avoid sorting issues
17
+ einsum_symbols = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
18
+ einsum_symbols_set = set(einsum_symbols)
19
+
20
+
21
+ def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
22
+ """
23
+ Computes the number of FLOPS in the contraction.
24
+
25
+ Parameters
26
+ ----------
27
+ idx_contraction : iterable
28
+ The indices involved in the contraction
29
+ inner : bool
30
+ Does this contraction require an inner product?
31
+ num_terms : int
32
+ The number of terms in a contraction
33
+ size_dictionary : dict
34
+ The size of each of the indices in idx_contraction
35
+
36
+ Returns
37
+ -------
38
+ flop_count : int
39
+ The total number of FLOPS required for the contraction.
40
+
41
+ Examples
42
+ --------
43
+
44
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
45
+ 30
46
+
47
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
48
+ 60
49
+
50
+ """
51
+
52
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
53
+ op_factor = max(1, num_terms - 1)
54
+ if inner:
55
+ op_factor += 1
56
+
57
+ return overall_size * op_factor
58
+
59
+ def _compute_size_by_dict(indices, idx_dict):
60
+ """
61
+ Computes the product of the elements in indices based on the dictionary
62
+ idx_dict.
63
+
64
+ Parameters
65
+ ----------
66
+ indices : iterable
67
+ Indices to base the product on.
68
+ idx_dict : dictionary
69
+ Dictionary of index sizes
70
+
71
+ Returns
72
+ -------
73
+ ret : int
74
+ The resulting product.
75
+
76
+ Examples
77
+ --------
78
+ >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
79
+ 90
80
+
81
+ """
82
+ ret = 1
83
+ for i in indices:
84
+ ret *= idx_dict[i]
85
+ return ret
86
+
87
+
88
+ def _find_contraction(positions, input_sets, output_set):
89
+ """
90
+ Finds the contraction for a given set of input and output sets.
91
+
92
+ Parameters
93
+ ----------
94
+ positions : iterable
95
+ Integer positions of terms used in the contraction.
96
+ input_sets : list
97
+ List of sets that represent the lhs side of the einsum subscript
98
+ output_set : set
99
+ Set that represents the rhs side of the overall einsum subscript
100
+
101
+ Returns
102
+ -------
103
+ new_result : set
104
+ The indices of the resulting contraction
105
+ remaining : list
106
+ List of sets that have not been contracted, the new set is appended to
107
+ the end of this list
108
+ idx_removed : set
109
+ Indices removed from the entire contraction
110
+ idx_contraction : set
111
+ The indices used in the current contraction
112
+
113
+ Examples
114
+ --------
115
+
116
+ # A simple dot product test case
117
+ >>> pos = (0, 1)
118
+ >>> isets = [set('ab'), set('bc')]
119
+ >>> oset = set('ac')
120
+ >>> _find_contraction(pos, isets, oset)
121
+ ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
122
+
123
+ # A more complex case with additional terms in the contraction
124
+ >>> pos = (0, 2)
125
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
126
+ >>> oset = set('ac')
127
+ >>> _find_contraction(pos, isets, oset)
128
+ ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
129
+ """
130
+
131
+ idx_contract = set()
132
+ idx_remain = output_set.copy()
133
+ remaining = []
134
+ for ind, value in enumerate(input_sets):
135
+ if ind in positions:
136
+ idx_contract |= value
137
+ else:
138
+ remaining.append(value)
139
+ idx_remain |= value
140
+
141
+ new_result = idx_remain & idx_contract
142
+ idx_removed = (idx_contract - new_result)
143
+ remaining.append(new_result)
144
+
145
+ return (new_result, remaining, idx_removed, idx_contract)
146
+
147
+
148
+ def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
149
+ """
150
+ Computes all possible pair contractions, sieves the results based
151
+ on ``memory_limit`` and returns the lowest cost path. This algorithm
152
+ scales factorial with respect to the elements in the list ``input_sets``.
153
+
154
+ Parameters
155
+ ----------
156
+ input_sets : list
157
+ List of sets that represent the lhs side of the einsum subscript
158
+ output_set : set
159
+ Set that represents the rhs side of the overall einsum subscript
160
+ idx_dict : dictionary
161
+ Dictionary of index sizes
162
+ memory_limit : int
163
+ The maximum number of elements in a temporary array
164
+
165
+ Returns
166
+ -------
167
+ path : list
168
+ The optimal contraction order within the memory limit constraint.
169
+
170
+ Examples
171
+ --------
172
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
173
+ >>> oset = set()
174
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
175
+ >>> _optimal_path(isets, oset, idx_sizes, 5000)
176
+ [(0, 2), (0, 1)]
177
+ """
178
+
179
+ full_results = [(0, [], input_sets)]
180
+ for iteration in range(len(input_sets) - 1):
181
+ iter_results = []
182
+
183
+ # Compute all unique pairs
184
+ for curr in full_results:
185
+ cost, positions, remaining = curr
186
+ for con in itertools.combinations(
187
+ range(len(input_sets) - iteration), 2
188
+ ):
189
+
190
+ # Find the contraction
191
+ cont = _find_contraction(con, remaining, output_set)
192
+ new_result, new_input_sets, idx_removed, idx_contract = cont
193
+
194
+ # Sieve the results based on memory_limit
195
+ new_size = _compute_size_by_dict(new_result, idx_dict)
196
+ if new_size > memory_limit:
197
+ continue
198
+
199
+ # Build (total_cost, positions, indices_remaining)
200
+ total_cost = cost + _flop_count(
201
+ idx_contract, idx_removed, len(con), idx_dict
202
+ )
203
+ new_pos = positions + [con]
204
+ iter_results.append((total_cost, new_pos, new_input_sets))
205
+
206
+ # Update combinatorial list, if we did not find anything return best
207
+ # path + remaining contractions
208
+ if iter_results:
209
+ full_results = iter_results
210
+ else:
211
+ path = min(full_results, key=lambda x: x[0])[1]
212
+ path += [tuple(range(len(input_sets) - iteration))]
213
+ return path
214
+
215
+ # If we have not found anything return single einsum contraction
216
+ if len(full_results) == 0:
217
+ return [tuple(range(len(input_sets)))]
218
+
219
+ path = min(full_results, key=lambda x: x[0])[1]
220
+ return path
221
+
222
+ def _parse_possible_contraction(
223
+ positions, input_sets, output_set, idx_dict,
224
+ memory_limit, path_cost, naive_cost
225
+ ):
226
+ """Compute the cost (removed size + flops) and resultant indices for
227
+ performing the contraction specified by ``positions``.
228
+
229
+ Parameters
230
+ ----------
231
+ positions : tuple of int
232
+ The locations of the proposed tensors to contract.
233
+ input_sets : list of sets
234
+ The indices found on each tensors.
235
+ output_set : set
236
+ The output indices of the expression.
237
+ idx_dict : dict
238
+ Mapping of each index to its size.
239
+ memory_limit : int
240
+ The total allowed size for an intermediary tensor.
241
+ path_cost : int
242
+ The contraction cost so far.
243
+ naive_cost : int
244
+ The cost of the unoptimized expression.
245
+
246
+ Returns
247
+ -------
248
+ cost : (int, int)
249
+ A tuple containing the size of any indices removed, and the flop cost.
250
+ positions : tuple of int
251
+ The locations of the proposed tensors to contract.
252
+ new_input_sets : list of sets
253
+ The resulting new list of indices if this proposed contraction
254
+ is performed.
255
+
256
+ """
257
+
258
+ # Find the contraction
259
+ contract = _find_contraction(positions, input_sets, output_set)
260
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
261
+
262
+ # Sieve the results based on memory_limit
263
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
264
+ if new_size > memory_limit:
265
+ return None
266
+
267
+ # Build sort tuple
268
+ old_sizes = (
269
+ _compute_size_by_dict(input_sets[p], idx_dict) for p in positions
270
+ )
271
+ removed_size = sum(old_sizes) - new_size
272
+
273
+ # NB: removed_size used to be just the size of any removed indices i.e.:
274
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
275
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
276
+ sort = (-removed_size, cost)
277
+
278
+ # Sieve based on total cost as well
279
+ if (path_cost + cost) > naive_cost:
280
+ return None
281
+
282
+ # Add contraction to possible choices
283
+ return [sort, positions, new_input_sets]
284
+
285
+
286
+ def _update_other_results(results, best):
287
+ """Update the positions and provisional input_sets of ``results``
288
+ based on performing the contraction result ``best``. Remove any
289
+ involving the tensors contracted.
290
+
291
+ Parameters
292
+ ----------
293
+ results : list
294
+ List of contraction results produced by
295
+ ``_parse_possible_contraction``.
296
+ best : list
297
+ The best contraction of ``results`` i.e. the one that
298
+ will be performed.
299
+
300
+ Returns
301
+ -------
302
+ mod_results : list
303
+ The list of modified results, updated with outcome of
304
+ ``best`` contraction.
305
+ """
306
+
307
+ best_con = best[1]
308
+ bx, by = best_con
309
+ mod_results = []
310
+
311
+ for cost, (x, y), con_sets in results:
312
+
313
+ # Ignore results involving tensors just contracted
314
+ if x in best_con or y in best_con:
315
+ continue
316
+
317
+ # Update the input_sets
318
+ del con_sets[by - int(by > x) - int(by > y)]
319
+ del con_sets[bx - int(bx > x) - int(bx > y)]
320
+ con_sets.insert(-1, best[2][-1])
321
+
322
+ # Update the position indices
323
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
324
+ mod_results.append((cost, mod_con, con_sets))
325
+
326
+ return mod_results
327
+
328
+ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
329
+ """
330
+ Finds the path by contracting the best pair until the input list is
331
+ exhausted. The best pair is found by minimizing the tuple
332
+ ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
333
+ matrix multiplication or inner product operations, then Hadamard like
334
+ operations, and finally outer operations. Outer products are limited by
335
+ ``memory_limit``. This algorithm scales cubically with respect to the
336
+ number of elements in the list ``input_sets``.
337
+
338
+ Parameters
339
+ ----------
340
+ input_sets : list
341
+ List of sets that represent the lhs side of the einsum subscript
342
+ output_set : set
343
+ Set that represents the rhs side of the overall einsum subscript
344
+ idx_dict : dictionary
345
+ Dictionary of index sizes
346
+ memory_limit : int
347
+ The maximum number of elements in a temporary array
348
+
349
+ Returns
350
+ -------
351
+ path : list
352
+ The greedy contraction order within the memory limit constraint.
353
+
354
+ Examples
355
+ --------
356
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
357
+ >>> oset = set()
358
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
359
+ >>> _greedy_path(isets, oset, idx_sizes, 5000)
360
+ [(0, 2), (0, 1)]
361
+ """
362
+
363
+ # Handle trivial cases that leaked through
364
+ if len(input_sets) == 1:
365
+ return [(0,)]
366
+ elif len(input_sets) == 2:
367
+ return [(0, 1)]
368
+
369
+ # Build up a naive cost
370
+ contract = _find_contraction(
371
+ range(len(input_sets)), input_sets, output_set
372
+ )
373
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
374
+ naive_cost = _flop_count(
375
+ idx_contract, idx_removed, len(input_sets), idx_dict
376
+ )
377
+
378
+ # Initially iterate over all pairs
379
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
380
+ known_contractions = []
381
+
382
+ path_cost = 0
383
+ path = []
384
+
385
+ for iteration in range(len(input_sets) - 1):
386
+
387
+ # Iterate over all pairs on the first step, only previously
388
+ # found pairs on subsequent steps
389
+ for positions in comb_iter:
390
+
391
+ # Always initially ignore outer products
392
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
393
+ continue
394
+
395
+ result = _parse_possible_contraction(
396
+ positions, input_sets, output_set, idx_dict,
397
+ memory_limit, path_cost, naive_cost
398
+ )
399
+ if result is not None:
400
+ known_contractions.append(result)
401
+
402
+ # If we do not have a inner contraction, rescan pairs
403
+ # including outer products
404
+ if len(known_contractions) == 0:
405
+
406
+ # Then check the outer products
407
+ for positions in itertools.combinations(
408
+ range(len(input_sets)), 2
409
+ ):
410
+ result = _parse_possible_contraction(
411
+ positions, input_sets, output_set, idx_dict,
412
+ memory_limit, path_cost, naive_cost
413
+ )
414
+ if result is not None:
415
+ known_contractions.append(result)
416
+
417
+ # If we still did not find any remaining contractions,
418
+ # default back to einsum like behavior
419
+ if len(known_contractions) == 0:
420
+ path.append(tuple(range(len(input_sets))))
421
+ break
422
+
423
+ # Sort based on first index
424
+ best = min(known_contractions, key=lambda x: x[0])
425
+
426
+ # Now propagate as many unused contractions as possible
427
+ # to the next iteration
428
+ known_contractions = _update_other_results(known_contractions, best)
429
+
430
+ # Next iteration only compute contractions with the new tensor
431
+ # All other contractions have been accounted for
432
+ input_sets = best[2]
433
+ new_tensor_pos = len(input_sets) - 1
434
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
435
+
436
+ # Update path and total cost
437
+ path.append(best[1])
438
+ path_cost += best[0][1]
439
+
440
+ return path
441
+
442
+
443
+ def _can_dot(inputs, result, idx_removed):
444
+ """
445
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
446
+
447
+ Parameters
448
+ ----------
449
+ inputs : list of str
450
+ Specifies the subscripts for summation.
451
+ result : str
452
+ Resulting summation.
453
+ idx_removed : set
454
+ Indices that are removed in the summation
455
+
456
+
457
+ Returns
458
+ -------
459
+ type : bool
460
+ Returns true if BLAS should and can be used, else False
461
+
462
+ Notes
463
+ -----
464
+ If the operations is BLAS level 1 or 2 and is not already aligned
465
+ we default back to einsum as the memory movement to copy is more
466
+ costly than the operation itself.
467
+
468
+
469
+ Examples
470
+ --------
471
+
472
+ # Standard GEMM operation
473
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
474
+ True
475
+
476
+ # Can use the standard BLAS, but requires odd data movement
477
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
478
+ False
479
+
480
+ # DDOT where the memory is not aligned
481
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
482
+ False
483
+
484
+ """
485
+
486
+ # All `dot` calls remove indices
487
+ if len(idx_removed) == 0:
488
+ return False
489
+
490
+ # BLAS can only handle two operands
491
+ if len(inputs) != 2:
492
+ return False
493
+
494
+ input_left, input_right = inputs
495
+
496
+ for c in set(input_left + input_right):
497
+ # can't deal with repeated indices on same input or more than 2 total
498
+ nl, nr = input_left.count(c), input_right.count(c)
499
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
500
+ return False
501
+
502
+ # can't do implicit summation or dimension collapse e.g.
503
+ # "ab,bc->c" (implicitly sum over 'a')
504
+ # "ab,ca->ca" (take diagonal of 'a')
505
+ if nl + nr - 1 == int(c in result):
506
+ return False
507
+
508
+ # Build a few temporaries
509
+ set_left = set(input_left)
510
+ set_right = set(input_right)
511
+ keep_left = set_left - idx_removed
512
+ keep_right = set_right - idx_removed
513
+ rs = len(idx_removed)
514
+
515
+ # At this point we are a DOT, GEMV, or GEMM operation
516
+
517
+ # Handle inner products
518
+
519
+ # DDOT with aligned data
520
+ if input_left == input_right:
521
+ return True
522
+
523
+ # DDOT without aligned data (better to use einsum)
524
+ if set_left == set_right:
525
+ return False
526
+
527
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
528
+
529
+ # GEMM or GEMV no transpose
530
+ if input_left[-rs:] == input_right[:rs]:
531
+ return True
532
+
533
+ # GEMM or GEMV transpose both
534
+ if input_left[:rs] == input_right[-rs:]:
535
+ return True
536
+
537
+ # GEMM or GEMV transpose right
538
+ if input_left[-rs:] == input_right[-rs:]:
539
+ return True
540
+
541
+ # GEMM or GEMV transpose left
542
+ if input_left[:rs] == input_right[:rs]:
543
+ return True
544
+
545
+ # Einsum is faster than GEMV if we have to copy data
546
+ if not keep_left or not keep_right:
547
+ return False
548
+
549
+ # We are a matrix-matrix product, but we need to copy data
550
+ return True
551
+
552
+
553
+ def _parse_einsum_input(operands):
554
+ """
555
+ A reproduction of einsum c side einsum parsing in python.
556
+
557
+ Returns
558
+ -------
559
+ input_strings : str
560
+ Parsed input strings
561
+ output_string : str
562
+ Parsed output string
563
+ operands : list of array_like
564
+ The operands to use in the numpy contraction
565
+
566
+ Examples
567
+ --------
568
+ The operand list is simplified to reduce printing:
569
+
570
+ >>> np.random.seed(123)
571
+ >>> a = np.random.rand(4, 4)
572
+ >>> b = np.random.rand(4, 4, 4)
573
+ >>> _parse_einsum_input(('...a,...a->...', a, b))
574
+ ('za,xza', 'xz', [a, b]) # may vary
575
+
576
+ >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
577
+ ('za,xza', 'xz', [a, b]) # may vary
578
+ """
579
+
580
+ if len(operands) == 0:
581
+ raise ValueError("No input operands")
582
+
583
+ if isinstance(operands[0], str):
584
+ subscripts = operands[0].replace(" ", "")
585
+ operands = [asanyarray(v) for v in operands[1:]]
586
+
587
+ # Ensure all characters are valid
588
+ for s in subscripts:
589
+ if s in '.,->':
590
+ continue
591
+ if s not in einsum_symbols:
592
+ raise ValueError(f"Character {s} is not a valid symbol.")
593
+
594
+ else:
595
+ tmp_operands = list(operands)
596
+ operand_list = []
597
+ subscript_list = []
598
+ for p in range(len(operands) // 2):
599
+ operand_list.append(tmp_operands.pop(0))
600
+ subscript_list.append(tmp_operands.pop(0))
601
+
602
+ output_list = tmp_operands[-1] if len(tmp_operands) else None
603
+ operands = [asanyarray(v) for v in operand_list]
604
+ subscripts = ""
605
+ last = len(subscript_list) - 1
606
+ for num, sub in enumerate(subscript_list):
607
+ for s in sub:
608
+ if s is Ellipsis:
609
+ subscripts += "..."
610
+ else:
611
+ try:
612
+ s = operator.index(s)
613
+ except TypeError as e:
614
+ raise TypeError(
615
+ "For this input type lists must contain "
616
+ "either int or Ellipsis"
617
+ ) from e
618
+ subscripts += einsum_symbols[s]
619
+ if num != last:
620
+ subscripts += ","
621
+
622
+ if output_list is not None:
623
+ subscripts += "->"
624
+ for s in output_list:
625
+ if s is Ellipsis:
626
+ subscripts += "..."
627
+ else:
628
+ try:
629
+ s = operator.index(s)
630
+ except TypeError as e:
631
+ raise TypeError(
632
+ "For this input type lists must contain "
633
+ "either int or Ellipsis"
634
+ ) from e
635
+ subscripts += einsum_symbols[s]
636
+ # Check for proper "->"
637
+ if ("-" in subscripts) or (">" in subscripts):
638
+ invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
639
+ if invalid or (subscripts.count("->") != 1):
640
+ raise ValueError("Subscripts can only contain one '->'.")
641
+
642
+ # Parse ellipses
643
+ if "." in subscripts:
644
+ used = subscripts.replace(".", "").replace(",", "").replace("->", "")
645
+ unused = list(einsum_symbols_set - set(used))
646
+ ellipse_inds = "".join(unused)
647
+ longest = 0
648
+
649
+ if "->" in subscripts:
650
+ input_tmp, output_sub = subscripts.split("->")
651
+ split_subscripts = input_tmp.split(",")
652
+ out_sub = True
653
+ else:
654
+ split_subscripts = subscripts.split(',')
655
+ out_sub = False
656
+
657
+ for num, sub in enumerate(split_subscripts):
658
+ if "." in sub:
659
+ if (sub.count(".") != 3) or (sub.count("...") != 1):
660
+ raise ValueError("Invalid Ellipses.")
661
+
662
+ # Take into account numerical values
663
+ if operands[num].shape == ():
664
+ ellipse_count = 0
665
+ else:
666
+ ellipse_count = max(operands[num].ndim, 1)
667
+ ellipse_count -= (len(sub) - 3)
668
+
669
+ if ellipse_count > longest:
670
+ longest = ellipse_count
671
+
672
+ if ellipse_count < 0:
673
+ raise ValueError("Ellipses lengths do not match.")
674
+ elif ellipse_count == 0:
675
+ split_subscripts[num] = sub.replace('...', '')
676
+ else:
677
+ rep_inds = ellipse_inds[-ellipse_count:]
678
+ split_subscripts[num] = sub.replace('...', rep_inds)
679
+
680
+ subscripts = ",".join(split_subscripts)
681
+ if longest == 0:
682
+ out_ellipse = ""
683
+ else:
684
+ out_ellipse = ellipse_inds[-longest:]
685
+
686
+ if out_sub:
687
+ subscripts += "->" + output_sub.replace("...", out_ellipse)
688
+ else:
689
+ # Special care for outputless ellipses
690
+ output_subscript = ""
691
+ tmp_subscripts = subscripts.replace(",", "")
692
+ for s in sorted(set(tmp_subscripts)):
693
+ if s not in (einsum_symbols):
694
+ raise ValueError(f"Character {s} is not a valid symbol.")
695
+ if tmp_subscripts.count(s) == 1:
696
+ output_subscript += s
697
+ normal_inds = ''.join(sorted(set(output_subscript) -
698
+ set(out_ellipse)))
699
+
700
+ subscripts += "->" + out_ellipse + normal_inds
701
+
702
+ # Build output string if does not exist
703
+ if "->" in subscripts:
704
+ input_subscripts, output_subscript = subscripts.split("->")
705
+ else:
706
+ input_subscripts = subscripts
707
+ # Build output subscripts
708
+ tmp_subscripts = subscripts.replace(",", "")
709
+ output_subscript = ""
710
+ for s in sorted(set(tmp_subscripts)):
711
+ if s not in einsum_symbols:
712
+ raise ValueError(f"Character {s} is not a valid symbol.")
713
+ if tmp_subscripts.count(s) == 1:
714
+ output_subscript += s
715
+
716
+ # Make sure output subscripts are in the input
717
+ for char in output_subscript:
718
+ if output_subscript.count(char) != 1:
719
+ raise ValueError("Output character %s appeared more than once in "
720
+ "the output." % char)
721
+ if char not in input_subscripts:
722
+ raise ValueError(f"Output character {char} did not appear in the input")
723
+
724
+ # Make sure number operands is equivalent to the number of terms
725
+ if len(input_subscripts.split(',')) != len(operands):
726
+ raise ValueError("Number of einsum subscripts must be equal to the "
727
+ "number of operands.")
728
+
729
+ return (input_subscripts, output_subscript, operands)
730
+
731
+
732
+ def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
733
+ # NOTE: technically, we should only dispatch on array-like arguments, not
734
+ # subscripts (given as strings). But separating operands into
735
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
736
+ # signatures), so as a practical shortcut we dispatch on everything.
737
+ # Strings will be ignored for dispatching since they don't define
738
+ # __array_function__.
739
+ return operands
740
+
741
+
742
+ @array_function_dispatch(_einsum_path_dispatcher, module='numpy')
743
+ def einsum_path(*operands, optimize='greedy', einsum_call=False):
744
+ """
745
+ einsum_path(subscripts, *operands, optimize='greedy')
746
+
747
+ Evaluates the lowest cost contraction order for an einsum expression by
748
+ considering the creation of intermediate arrays.
749
+
750
+ Parameters
751
+ ----------
752
+ subscripts : str
753
+ Specifies the subscripts for summation.
754
+ *operands : list of array_like
755
+ These are the arrays for the operation.
756
+ optimize : {bool, list, tuple, 'greedy', 'optimal'}
757
+ Choose the type of path. If a tuple is provided, the second argument is
758
+ assumed to be the maximum intermediate size created. If only a single
759
+ argument is provided the largest input or output array size is used
760
+ as a maximum intermediate size.
761
+
762
+ * if a list is given that starts with ``einsum_path``, uses this as the
763
+ contraction path
764
+ * if False no optimization is taken
765
+ * if True defaults to the 'greedy' algorithm
766
+ * 'optimal' An algorithm that combinatorially explores all possible
767
+ ways of contracting the listed tensors and chooses the least costly
768
+ path. Scales exponentially with the number of terms in the
769
+ contraction.
770
+ * 'greedy' An algorithm that chooses the best pair contraction
771
+ at each step. Effectively, this algorithm searches the largest inner,
772
+ Hadamard, and then outer products at each step. Scales cubically with
773
+ the number of terms in the contraction. Equivalent to the 'optimal'
774
+ path for most contractions.
775
+
776
+ Default is 'greedy'.
777
+
778
+ Returns
779
+ -------
780
+ path : list of tuples
781
+ A list representation of the einsum path.
782
+ string_repr : str
783
+ A printable representation of the einsum path.
784
+
785
+ Notes
786
+ -----
787
+ The resulting path indicates which terms of the input contraction should be
788
+ contracted first, the result of this contraction is then appended to the
789
+ end of the contraction list. This list can then be iterated over until all
790
+ intermediate contractions are complete.
791
+
792
+ See Also
793
+ --------
794
+ einsum, linalg.multi_dot
795
+
796
+ Examples
797
+ --------
798
+
799
+ We can begin with a chain dot example. In this case, it is optimal to
800
+ contract the ``b`` and ``c`` tensors first as represented by the first
801
+ element of the path ``(1, 2)``. The resulting tensor is added to the end
802
+ of the contraction and the remaining contraction ``(0, 1)`` is then
803
+ completed.
804
+
805
+ >>> np.random.seed(123)
806
+ >>> a = np.random.rand(2, 2)
807
+ >>> b = np.random.rand(2, 5)
808
+ >>> c = np.random.rand(5, 2)
809
+ >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
810
+ >>> print(path_info[0])
811
+ ['einsum_path', (1, 2), (0, 1)]
812
+ >>> print(path_info[1])
813
+ Complete contraction: ij,jk,kl->il # may vary
814
+ Naive scaling: 4
815
+ Optimized scaling: 3
816
+ Naive FLOP count: 1.600e+02
817
+ Optimized FLOP count: 5.600e+01
818
+ Theoretical speedup: 2.857
819
+ Largest intermediate: 4.000e+00 elements
820
+ -------------------------------------------------------------------------
821
+ scaling current remaining
822
+ -------------------------------------------------------------------------
823
+ 3 kl,jk->jl ij,jl->il
824
+ 3 jl,ij->il il->il
825
+
826
+
827
+ A more complex index transformation example.
828
+
829
+ >>> I = np.random.rand(10, 10, 10, 10)
830
+ >>> C = np.random.rand(10, 10)
831
+ >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
832
+ ... optimize='greedy')
833
+
834
+ >>> print(path_info[0])
835
+ ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
836
+ >>> print(path_info[1])
837
+ Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
838
+ Naive scaling: 8
839
+ Optimized scaling: 5
840
+ Naive FLOP count: 8.000e+08
841
+ Optimized FLOP count: 8.000e+05
842
+ Theoretical speedup: 1000.000
843
+ Largest intermediate: 1.000e+04 elements
844
+ --------------------------------------------------------------------------
845
+ scaling current remaining
846
+ --------------------------------------------------------------------------
847
+ 5 abcd,ea->bcde fb,gc,hd,bcde->efgh
848
+ 5 bcde,fb->cdef gc,hd,cdef->efgh
849
+ 5 cdef,gc->defg hd,defg->efgh
850
+ 5 defg,hd->efgh efgh->efgh
851
+ """
852
+
853
+ # Figure out what the path really is
854
+ path_type = optimize
855
+ if path_type is True:
856
+ path_type = 'greedy'
857
+ if path_type is None:
858
+ path_type = False
859
+
860
+ explicit_einsum_path = False
861
+ memory_limit = None
862
+
863
+ # No optimization or a named path algorithm
864
+ if (path_type is False) or isinstance(path_type, str):
865
+ pass
866
+
867
+ # Given an explicit path
868
+ elif len(path_type) and (path_type[0] == 'einsum_path'):
869
+ explicit_einsum_path = True
870
+
871
+ # Path tuple with memory limit
872
+ elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
873
+ isinstance(path_type[1], (int, float))):
874
+ memory_limit = int(path_type[1])
875
+ path_type = path_type[0]
876
+
877
+ else:
878
+ raise TypeError(f"Did not understand the path: {str(path_type)}")
879
+
880
+ # Hidden option, only einsum should call this
881
+ einsum_call_arg = einsum_call
882
+
883
+ # Python side parsing
884
+ input_subscripts, output_subscript, operands = (
885
+ _parse_einsum_input(operands)
886
+ )
887
+
888
+ # Build a few useful list and sets
889
+ input_list = input_subscripts.split(',')
890
+ input_sets = [set(x) for x in input_list]
891
+ output_set = set(output_subscript)
892
+ indices = set(input_subscripts.replace(',', ''))
893
+
894
+ # Get length of each unique dimension and ensure all dimensions are correct
895
+ dimension_dict = {}
896
+ broadcast_indices = [[] for x in range(len(input_list))]
897
+ for tnum, term in enumerate(input_list):
898
+ sh = operands[tnum].shape
899
+ if len(sh) != len(term):
900
+ raise ValueError("Einstein sum subscript %s does not contain the "
901
+ "correct number of indices for operand %d."
902
+ % (input_subscripts[tnum], tnum))
903
+ for cnum, char in enumerate(term):
904
+ dim = sh[cnum]
905
+
906
+ # Build out broadcast indices
907
+ if dim == 1:
908
+ broadcast_indices[tnum].append(char)
909
+
910
+ if char in dimension_dict.keys():
911
+ # For broadcasting cases we always want the largest dim size
912
+ if dimension_dict[char] == 1:
913
+ dimension_dict[char] = dim
914
+ elif dim not in (1, dimension_dict[char]):
915
+ raise ValueError("Size of label '%s' for operand %d (%d) "
916
+ "does not match previous terms (%d)."
917
+ % (char, tnum, dimension_dict[char], dim))
918
+ else:
919
+ dimension_dict[char] = dim
920
+
921
+ # Convert broadcast inds to sets
922
+ broadcast_indices = [set(x) for x in broadcast_indices]
923
+
924
+ # Compute size of each input array plus the output array
925
+ size_list = [_compute_size_by_dict(term, dimension_dict)
926
+ for term in input_list + [output_subscript]]
927
+ max_size = max(size_list)
928
+
929
+ if memory_limit is None:
930
+ memory_arg = max_size
931
+ else:
932
+ memory_arg = memory_limit
933
+
934
+ # Compute naive cost
935
+ # This isn't quite right, need to look into exactly how einsum does this
936
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
937
+ naive_cost = _flop_count(
938
+ indices, inner_product, len(input_list), dimension_dict
939
+ )
940
+
941
+ # Compute the path
942
+ if explicit_einsum_path:
943
+ path = path_type[1:]
944
+ elif (
945
+ (path_type is False)
946
+ or (len(input_list) in [1, 2])
947
+ or (indices == output_set)
948
+ ):
949
+ # Nothing to be optimized, leave it to einsum
950
+ path = [tuple(range(len(input_list)))]
951
+ elif path_type == "greedy":
952
+ path = _greedy_path(
953
+ input_sets, output_set, dimension_dict, memory_arg
954
+ )
955
+ elif path_type == "optimal":
956
+ path = _optimal_path(
957
+ input_sets, output_set, dimension_dict, memory_arg
958
+ )
959
+ else:
960
+ raise KeyError("Path name %s not found", path_type)
961
+
962
+ cost_list, scale_list, size_list, contraction_list = [], [], [], []
963
+
964
+ # Build contraction tuple (positions, gemm, einsum_str, remaining)
965
+ for cnum, contract_inds in enumerate(path):
966
+ # Make sure we remove inds from right to left
967
+ contract_inds = tuple(sorted(contract_inds, reverse=True))
968
+
969
+ contract = _find_contraction(contract_inds, input_sets, output_set)
970
+ out_inds, input_sets, idx_removed, idx_contract = contract
971
+
972
+ cost = _flop_count(
973
+ idx_contract, idx_removed, len(contract_inds), dimension_dict
974
+ )
975
+ cost_list.append(cost)
976
+ scale_list.append(len(idx_contract))
977
+ size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
978
+
979
+ bcast = set()
980
+ tmp_inputs = []
981
+ for x in contract_inds:
982
+ tmp_inputs.append(input_list.pop(x))
983
+ bcast |= broadcast_indices.pop(x)
984
+
985
+ new_bcast_inds = bcast - idx_removed
986
+
987
+ # If we're broadcasting, nix blas
988
+ if not len(idx_removed & bcast):
989
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
990
+ else:
991
+ do_blas = False
992
+
993
+ # Last contraction
994
+ if (cnum - len(path)) == -1:
995
+ idx_result = output_subscript
996
+ else:
997
+ sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
998
+ idx_result = "".join([x[1] for x in sorted(sort_result)])
999
+
1000
+ input_list.append(idx_result)
1001
+ broadcast_indices.append(new_bcast_inds)
1002
+ einsum_str = ",".join(tmp_inputs) + "->" + idx_result
1003
+
1004
+ contraction = (
1005
+ contract_inds, idx_removed, einsum_str, input_list[:], do_blas
1006
+ )
1007
+ contraction_list.append(contraction)
1008
+
1009
+ opt_cost = sum(cost_list) + 1
1010
+
1011
+ if len(input_list) != 1:
1012
+ # Explicit "einsum_path" is usually trusted, but we detect this kind of
1013
+ # mistake in order to prevent from returning an intermediate value.
1014
+ raise RuntimeError(
1015
+ f"Invalid einsum_path is specified: {len(input_list) - 1} more "
1016
+ "operands has to be contracted.")
1017
+
1018
+ if einsum_call_arg:
1019
+ return (operands, contraction_list)
1020
+
1021
+ # Return the path along with a nice string representation
1022
+ overall_contraction = input_subscripts + "->" + output_subscript
1023
+ header = ("scaling", "current", "remaining")
1024
+
1025
+ speedup = naive_cost / opt_cost
1026
+ max_i = max(size_list)
1027
+
1028
+ path_print = f" Complete contraction: {overall_contraction}\n"
1029
+ path_print += f" Naive scaling: {len(indices)}\n"
1030
+ path_print += " Optimized scaling: %d\n" % max(scale_list)
1031
+ path_print += f" Naive FLOP count: {naive_cost:.3e}\n"
1032
+ path_print += f" Optimized FLOP count: {opt_cost:.3e}\n"
1033
+ path_print += f" Theoretical speedup: {speedup:3.3f}\n"
1034
+ path_print += f" Largest intermediate: {max_i:.3e} elements\n"
1035
+ path_print += "-" * 74 + "\n"
1036
+ path_print += "%6s %24s %40s\n" % header
1037
+ path_print += "-" * 74
1038
+
1039
+ for n, contraction in enumerate(contraction_list):
1040
+ inds, idx_rm, einsum_str, remaining, blas = contraction
1041
+ remaining_str = ",".join(remaining) + "->" + output_subscript
1042
+ path_run = (scale_list[n], einsum_str, remaining_str)
1043
+ path_print += "\n%4d %24s %40s" % path_run
1044
+
1045
+ path = ['einsum_path'] + path
1046
+ return (path, path_print)
1047
+
1048
+
1049
+ def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
1050
+ # Arguably we dispatch on more arguments than we really should; see note in
1051
+ # _einsum_path_dispatcher for why.
1052
+ yield from operands
1053
+ yield out
1054
+
1055
+
1056
+ # Rewrite einsum to handle different cases
1057
+ @array_function_dispatch(_einsum_dispatcher, module='numpy')
1058
+ def einsum(*operands, out=None, optimize=False, **kwargs):
1059
+ """
1060
+ einsum(subscripts, *operands, out=None, dtype=None, order='K',
1061
+ casting='safe', optimize=False)
1062
+
1063
+ Evaluates the Einstein summation convention on the operands.
1064
+
1065
+ Using the Einstein summation convention, many common multi-dimensional,
1066
+ linear algebraic array operations can be represented in a simple fashion.
1067
+ In *implicit* mode `einsum` computes these values.
1068
+
1069
+ In *explicit* mode, `einsum` provides further flexibility to compute
1070
+ other array operations that might not be considered classical Einstein
1071
+ summation operations, by disabling, or forcing summation over specified
1072
+ subscript labels.
1073
+
1074
+ See the notes and examples for clarification.
1075
+
1076
+ Parameters
1077
+ ----------
1078
+ subscripts : str
1079
+ Specifies the subscripts for summation as comma separated list of
1080
+ subscript labels. An implicit (classical Einstein summation)
1081
+ calculation is performed unless the explicit indicator '->' is
1082
+ included as well as subscript labels of the precise output form.
1083
+ operands : list of array_like
1084
+ These are the arrays for the operation.
1085
+ out : ndarray, optional
1086
+ If provided, the calculation is done into this array.
1087
+ dtype : {data-type, None}, optional
1088
+ If provided, forces the calculation to use the data type specified.
1089
+ Note that you may have to also give a more liberal `casting`
1090
+ parameter to allow the conversions. Default is None.
1091
+ order : {'C', 'F', 'A', 'K'}, optional
1092
+ Controls the memory layout of the output. 'C' means it should
1093
+ be C contiguous. 'F' means it should be Fortran contiguous,
1094
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
1095
+ 'K' means it should be as close to the layout as the inputs as
1096
+ is possible, including arbitrarily permuted axes.
1097
+ Default is 'K'.
1098
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
1099
+ Controls what kind of data casting may occur. Setting this to
1100
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
1101
+
1102
+ * 'no' means the data types should not be cast at all.
1103
+ * 'equiv' means only byte-order changes are allowed.
1104
+ * 'safe' means only casts which can preserve values are allowed.
1105
+ * 'same_kind' means only safe casts or casts within a kind,
1106
+ like float64 to float32, are allowed.
1107
+ * 'unsafe' means any data conversions may be done.
1108
+
1109
+ Default is 'safe'.
1110
+ optimize : {False, True, 'greedy', 'optimal'}, optional
1111
+ Controls if intermediate optimization should occur. No optimization
1112
+ will occur if False and True will default to the 'greedy' algorithm.
1113
+ Also accepts an explicit contraction list from the ``np.einsum_path``
1114
+ function. See ``np.einsum_path`` for more details. Defaults to False.
1115
+
1116
+ Returns
1117
+ -------
1118
+ output : ndarray
1119
+ The calculation based on the Einstein summation convention.
1120
+
1121
+ See Also
1122
+ --------
1123
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
1124
+ einsum:
1125
+ Similar verbose interface is provided by the
1126
+ `einops <https://github.com/arogozhnikov/einops>`_ package to cover
1127
+ additional operations: transpose, reshape/flatten, repeat/tile,
1128
+ squeeze/unsqueeze and reductions.
1129
+ The `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
1130
+ optimizes contraction order for einsum-like expressions
1131
+ in backend-agnostic manner.
1132
+
1133
+ Notes
1134
+ -----
1135
+ The Einstein summation convention can be used to compute
1136
+ many multi-dimensional, linear algebraic array operations. `einsum`
1137
+ provides a succinct way of representing these.
1138
+
1139
+ A non-exhaustive list of these operations,
1140
+ which can be computed by `einsum`, is shown below along with examples:
1141
+
1142
+ * Trace of an array, :py:func:`numpy.trace`.
1143
+ * Return a diagonal, :py:func:`numpy.diag`.
1144
+ * Array axis summations, :py:func:`numpy.sum`.
1145
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
1146
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul`
1147
+ :py:func:`numpy.dot`.
1148
+ * Vector inner and outer products, :py:func:`numpy.inner`
1149
+ :py:func:`numpy.outer`.
1150
+ * Broadcasting, element-wise and scalar multiplication,
1151
+ :py:func:`numpy.multiply`.
1152
+ * Tensor contractions, :py:func:`numpy.tensordot`.
1153
+ * Chained array operations, in efficient calculation order,
1154
+ :py:func:`numpy.einsum_path`.
1155
+
1156
+ The subscripts string is a comma-separated list of subscript labels,
1157
+ where each label refers to a dimension of the corresponding operand.
1158
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
1159
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
1160
+ appears only once, it is not summed, so ``np.einsum('i', a)``
1161
+ produces a view of ``a`` with no changes. A further example
1162
+ ``np.einsum('ij,jk', a, b)`` describes traditional matrix multiplication
1163
+ and is equivalent to :py:func:`np.matmul(a,b) <numpy.matmul>`.
1164
+ Repeated subscript labels in one operand take the diagonal.
1165
+ For example, ``np.einsum('ii', a)`` is equivalent to
1166
+ :py:func:`np.trace(a) <numpy.trace>`.
1167
+
1168
+ In *implicit mode*, the chosen subscripts are important
1169
+ since the axes of the output are reordered alphabetically. This
1170
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
1171
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
1172
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
1173
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
1174
+ multiplication since subscript 'h' precedes subscript 'i'.
1175
+
1176
+ In *explicit mode* the output can be directly controlled by
1177
+ specifying output subscript labels. This requires the
1178
+ identifier '->' as well as the list of output subscript labels.
1179
+ This feature increases the flexibility of the function since
1180
+ summing can be disabled or forced when required. The call
1181
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a) <numpy.sum>`
1182
+ if ``a`` is a 1-D array, and ``np.einsum('ii->i', a)``
1183
+ is like :py:func:`np.diag(a) <numpy.diag>` if ``a`` is a square 2-D array.
1184
+ The difference is that `einsum` does not allow broadcasting by default.
1185
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
1186
+ order of the output subscript labels and therefore returns matrix
1187
+ multiplication, unlike the example above in implicit mode.
1188
+
1189
+ To enable and control broadcasting, use an ellipsis. Default
1190
+ NumPy-style broadcasting is done by adding an ellipsis
1191
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
1192
+ ``np.einsum('...i->...', a)`` is like
1193
+ :py:func:`np.sum(a, axis=-1) <numpy.sum>` for array ``a`` of any shape.
1194
+ To take the trace along the first and last axes,
1195
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
1196
+ product with the left-most indices instead of rightmost, one can do
1197
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
1198
+
1199
+ When there is only one operand, no axes are summed, and no output
1200
+ parameter is provided, a view into the operand is returned instead
1201
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
1202
+ produces a view (changed in version 1.10.0).
1203
+
1204
+ `einsum` also provides an alternative way to provide the subscripts and
1205
+ operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
1206
+ If the output shape is not provided in this format `einsum` will be
1207
+ calculated in implicit mode, otherwise it will be performed explicitly.
1208
+ The examples below have corresponding `einsum` calls with the two
1209
+ parameter methods.
1210
+
1211
+ Views returned from einsum are now writeable whenever the input array
1212
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
1213
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
1214
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
1215
+ of a 2D array.
1216
+
1217
+ Added the ``optimize`` argument which will optimize the contraction order
1218
+ of an einsum expression. For a contraction with three or more operands
1219
+ this can greatly increase the computational efficiency at the cost of
1220
+ a larger memory footprint during computation.
1221
+
1222
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
1223
+ returns the optimal path in the majority of cases. In some cases 'optimal'
1224
+ will return the superlative path through a more expensive, exhaustive
1225
+ search. For iterative calculations it may be advisable to calculate
1226
+ the optimal path once and reuse that path by supplying it as an argument.
1227
+ An example is given below.
1228
+
1229
+ See :py:func:`numpy.einsum_path` for more details.
1230
+
1231
+ Examples
1232
+ --------
1233
+ >>> a = np.arange(25).reshape(5,5)
1234
+ >>> b = np.arange(5)
1235
+ >>> c = np.arange(6).reshape(2,3)
1236
+
1237
+ Trace of a matrix:
1238
+
1239
+ >>> np.einsum('ii', a)
1240
+ 60
1241
+ >>> np.einsum(a, [0,0])
1242
+ 60
1243
+ >>> np.trace(a)
1244
+ 60
1245
+
1246
+ Extract the diagonal (requires explicit form):
1247
+
1248
+ >>> np.einsum('ii->i', a)
1249
+ array([ 0, 6, 12, 18, 24])
1250
+ >>> np.einsum(a, [0,0], [0])
1251
+ array([ 0, 6, 12, 18, 24])
1252
+ >>> np.diag(a)
1253
+ array([ 0, 6, 12, 18, 24])
1254
+
1255
+ Sum over an axis (requires explicit form):
1256
+
1257
+ >>> np.einsum('ij->i', a)
1258
+ array([ 10, 35, 60, 85, 110])
1259
+ >>> np.einsum(a, [0,1], [0])
1260
+ array([ 10, 35, 60, 85, 110])
1261
+ >>> np.sum(a, axis=1)
1262
+ array([ 10, 35, 60, 85, 110])
1263
+
1264
+ For higher dimensional arrays summing a single axis can be done
1265
+ with ellipsis:
1266
+
1267
+ >>> np.einsum('...j->...', a)
1268
+ array([ 10, 35, 60, 85, 110])
1269
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
1270
+ array([ 10, 35, 60, 85, 110])
1271
+
1272
+ Compute a matrix transpose, or reorder any number of axes:
1273
+
1274
+ >>> np.einsum('ji', c)
1275
+ array([[0, 3],
1276
+ [1, 4],
1277
+ [2, 5]])
1278
+ >>> np.einsum('ij->ji', c)
1279
+ array([[0, 3],
1280
+ [1, 4],
1281
+ [2, 5]])
1282
+ >>> np.einsum(c, [1,0])
1283
+ array([[0, 3],
1284
+ [1, 4],
1285
+ [2, 5]])
1286
+ >>> np.transpose(c)
1287
+ array([[0, 3],
1288
+ [1, 4],
1289
+ [2, 5]])
1290
+
1291
+ Vector inner products:
1292
+
1293
+ >>> np.einsum('i,i', b, b)
1294
+ 30
1295
+ >>> np.einsum(b, [0], b, [0])
1296
+ 30
1297
+ >>> np.inner(b,b)
1298
+ 30
1299
+
1300
+ Matrix vector multiplication:
1301
+
1302
+ >>> np.einsum('ij,j', a, b)
1303
+ array([ 30, 80, 130, 180, 230])
1304
+ >>> np.einsum(a, [0,1], b, [1])
1305
+ array([ 30, 80, 130, 180, 230])
1306
+ >>> np.dot(a, b)
1307
+ array([ 30, 80, 130, 180, 230])
1308
+ >>> np.einsum('...j,j', a, b)
1309
+ array([ 30, 80, 130, 180, 230])
1310
+
1311
+ Broadcasting and scalar multiplication:
1312
+
1313
+ >>> np.einsum('..., ...', 3, c)
1314
+ array([[ 0, 3, 6],
1315
+ [ 9, 12, 15]])
1316
+ >>> np.einsum(',ij', 3, c)
1317
+ array([[ 0, 3, 6],
1318
+ [ 9, 12, 15]])
1319
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
1320
+ array([[ 0, 3, 6],
1321
+ [ 9, 12, 15]])
1322
+ >>> np.multiply(3, c)
1323
+ array([[ 0, 3, 6],
1324
+ [ 9, 12, 15]])
1325
+
1326
+ Vector outer product:
1327
+
1328
+ >>> np.einsum('i,j', np.arange(2)+1, b)
1329
+ array([[0, 1, 2, 3, 4],
1330
+ [0, 2, 4, 6, 8]])
1331
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
1332
+ array([[0, 1, 2, 3, 4],
1333
+ [0, 2, 4, 6, 8]])
1334
+ >>> np.outer(np.arange(2)+1, b)
1335
+ array([[0, 1, 2, 3, 4],
1336
+ [0, 2, 4, 6, 8]])
1337
+
1338
+ Tensor contraction:
1339
+
1340
+ >>> a = np.arange(60.).reshape(3,4,5)
1341
+ >>> b = np.arange(24.).reshape(4,3,2)
1342
+ >>> np.einsum('ijk,jil->kl', a, b)
1343
+ array([[4400., 4730.],
1344
+ [4532., 4874.],
1345
+ [4664., 5018.],
1346
+ [4796., 5162.],
1347
+ [4928., 5306.]])
1348
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
1349
+ array([[4400., 4730.],
1350
+ [4532., 4874.],
1351
+ [4664., 5018.],
1352
+ [4796., 5162.],
1353
+ [4928., 5306.]])
1354
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
1355
+ array([[4400., 4730.],
1356
+ [4532., 4874.],
1357
+ [4664., 5018.],
1358
+ [4796., 5162.],
1359
+ [4928., 5306.]])
1360
+
1361
+ Writeable returned arrays (since version 1.10.0):
1362
+
1363
+ >>> a = np.zeros((3, 3))
1364
+ >>> np.einsum('ii->i', a)[:] = 1
1365
+ >>> a
1366
+ array([[1., 0., 0.],
1367
+ [0., 1., 0.],
1368
+ [0., 0., 1.]])
1369
+
1370
+ Example of ellipsis use:
1371
+
1372
+ >>> a = np.arange(6).reshape((3,2))
1373
+ >>> b = np.arange(12).reshape((4,3))
1374
+ >>> np.einsum('ki,jk->ij', a, b)
1375
+ array([[10, 28, 46, 64],
1376
+ [13, 40, 67, 94]])
1377
+ >>> np.einsum('ki,...k->i...', a, b)
1378
+ array([[10, 28, 46, 64],
1379
+ [13, 40, 67, 94]])
1380
+ >>> np.einsum('k...,jk', a, b)
1381
+ array([[10, 28, 46, 64],
1382
+ [13, 40, 67, 94]])
1383
+
1384
+ Chained array operations. For more complicated contractions, speed ups
1385
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing
1386
+ the 'optimal' path and repeatedly applying it, using an `einsum_path`
1387
+ insertion (since version 1.12.0). Performance improvements can be
1388
+ particularly significant with larger arrays:
1389
+
1390
+ >>> a = np.ones(64).reshape(2,4,8)
1391
+
1392
+ Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
1393
+
1394
+ >>> for iteration in range(500):
1395
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
1396
+
1397
+ Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
1398
+
1399
+ >>> for iteration in range(500):
1400
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
1401
+ ... optimize='optimal')
1402
+
1403
+ Greedy `einsum` (faster optimal path approximation): ~160ms
1404
+
1405
+ >>> for iteration in range(500):
1406
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
1407
+
1408
+ Optimal `einsum` (best usage pattern in some use cases): ~110ms
1409
+
1410
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a,
1411
+ ... optimize='optimal')[0]
1412
+ >>> for iteration in range(500):
1413
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
1414
+
1415
+ """
1416
+ # Special handling if out is specified
1417
+ specified_out = out is not None
1418
+
1419
+ # If no optimization, run pure einsum
1420
+ if optimize is False:
1421
+ if specified_out:
1422
+ kwargs['out'] = out
1423
+ return c_einsum(*operands, **kwargs)
1424
+
1425
+ # Check the kwargs to avoid a more cryptic error later, without having to
1426
+ # repeat default values here
1427
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
1428
+ unknown_kwargs = [k for (k, v) in kwargs.items() if
1429
+ k not in valid_einsum_kwargs]
1430
+ if len(unknown_kwargs):
1431
+ raise TypeError(f"Did not understand the following kwargs: {unknown_kwargs}")
1432
+
1433
+ # Build the contraction list and operand
1434
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
1435
+ einsum_call=True)
1436
+
1437
+ # Handle order kwarg for output array, c_einsum allows mixed case
1438
+ output_order = kwargs.pop('order', 'K')
1439
+ if output_order.upper() == 'A':
1440
+ if all(arr.flags.f_contiguous for arr in operands):
1441
+ output_order = 'F'
1442
+ else:
1443
+ output_order = 'C'
1444
+
1445
+ # Start contraction loop
1446
+ for num, contraction in enumerate(contraction_list):
1447
+ inds, idx_rm, einsum_str, remaining, blas = contraction
1448
+ tmp_operands = [operands.pop(x) for x in inds]
1449
+
1450
+ # Do we need to deal with the output?
1451
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
1452
+
1453
+ # Call tensordot if still possible
1454
+ if blas:
1455
+ # Checks have already been handled
1456
+ input_str, results_index = einsum_str.split('->')
1457
+ input_left, input_right = input_str.split(',')
1458
+
1459
+ tensor_result = input_left + input_right
1460
+ for s in idx_rm:
1461
+ tensor_result = tensor_result.replace(s, "")
1462
+
1463
+ # Find indices to contract over
1464
+ left_pos, right_pos = [], []
1465
+ for s in sorted(idx_rm):
1466
+ left_pos.append(input_left.find(s))
1467
+ right_pos.append(input_right.find(s))
1468
+
1469
+ # Contract!
1470
+ new_view = tensordot(
1471
+ *tmp_operands, axes=(tuple(left_pos), tuple(right_pos))
1472
+ )
1473
+
1474
+ # Build a new view if needed
1475
+ if (tensor_result != results_index) or handle_out:
1476
+ if handle_out:
1477
+ kwargs["out"] = out
1478
+ new_view = c_einsum(
1479
+ tensor_result + '->' + results_index, new_view, **kwargs
1480
+ )
1481
+
1482
+ # Call einsum
1483
+ else:
1484
+ # If out was specified
1485
+ if handle_out:
1486
+ kwargs["out"] = out
1487
+
1488
+ # Do the contraction
1489
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
1490
+
1491
+ # Append new items and dereference what we can
1492
+ operands.append(new_view)
1493
+ del tmp_operands, new_view
1494
+
1495
+ if specified_out:
1496
+ return out
1497
+ else:
1498
+ return asanyarray(operands[0], order=output_order)