Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- janus/lib/python3.10/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/_globals.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/matlib.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/__pycache__/version.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/_add_newdocs_scalars.py +389 -0
- janus/lib/python3.10/site-packages/numpy/_core/_asarray.py +135 -0
- janus/lib/python3.10/site-packages/numpy/_core/_internal.py +963 -0
- janus/lib/python3.10/site-packages/numpy/_core/_machar.py +356 -0
- janus/lib/python3.10/site-packages/numpy/_core/_rational_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- janus/lib/python3.10/site-packages/numpy/_core/_string_helpers.py +100 -0
- janus/lib/python3.10/site-packages/numpy/_core/_ufunc_config.pyi +39 -0
- janus/lib/python3.10/site-packages/numpy/_core/memmap.py +361 -0
- janus/lib/python3.10/site-packages/numpy/_core/umath.py +40 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/array_utils.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/introspect.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-310.pyc +0 -0
- janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-310.pyc +0 -0
janus/lib/python3.10/site-packages/numpy/__pycache__/_array_api_info.cpython-310.pyc
ADDED
|
Binary file (9.19 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/_configtool.cpython-310.pyc
ADDED
|
Binary file (1.13 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/_globals.cpython-310.pyc
ADDED
|
Binary file (3.47 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/_pytesttester.cpython-310.pyc
ADDED
|
Binary file (5.69 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/ctypeslib.cpython-310.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/dtypes.cpython-310.pyc
ADDED
|
Binary file (1.28 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/matlib.cpython-310.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/__pycache__/version.cpython-310.pyc
ADDED
|
Binary file (439 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/_add_newdocs_scalars.py
ADDED
|
@@ -0,0 +1,389 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
|
| 3 |
+
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
|
| 4 |
+
platform-dependent information.
|
| 5 |
+
"""
|
| 6 |
+
import sys
|
| 7 |
+
import os
|
| 8 |
+
from numpy._core import dtype
|
| 9 |
+
from numpy._core import numerictypes as _numerictypes
|
| 10 |
+
from numpy._core.function_base import add_newdoc
|
| 11 |
+
|
| 12 |
+
##############################################################################
|
| 13 |
+
#
|
| 14 |
+
# Documentation for concrete scalar classes
|
| 15 |
+
#
|
| 16 |
+
##############################################################################
|
| 17 |
+
|
| 18 |
+
def numeric_type_aliases(aliases):
|
| 19 |
+
def type_aliases_gen():
|
| 20 |
+
for alias, doc in aliases:
|
| 21 |
+
try:
|
| 22 |
+
alias_type = getattr(_numerictypes, alias)
|
| 23 |
+
except AttributeError:
|
| 24 |
+
# The set of aliases that actually exist varies between platforms
|
| 25 |
+
pass
|
| 26 |
+
else:
|
| 27 |
+
yield (alias_type, alias, doc)
|
| 28 |
+
return list(type_aliases_gen())
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
possible_aliases = numeric_type_aliases([
|
| 32 |
+
('int8', '8-bit signed integer (``-128`` to ``127``)'),
|
| 33 |
+
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
|
| 34 |
+
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
|
| 35 |
+
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
|
| 36 |
+
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
|
| 37 |
+
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
|
| 38 |
+
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
|
| 39 |
+
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
|
| 40 |
+
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
|
| 41 |
+
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
|
| 42 |
+
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
|
| 43 |
+
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
|
| 44 |
+
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
|
| 45 |
+
('float96', '96-bit extended-precision floating-point number type'),
|
| 46 |
+
('float128', '128-bit extended-precision floating-point number type'),
|
| 47 |
+
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
|
| 48 |
+
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
|
| 49 |
+
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
|
| 50 |
+
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
|
| 51 |
+
])
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _get_platform_and_machine():
|
| 55 |
+
try:
|
| 56 |
+
system, _, _, _, machine = os.uname()
|
| 57 |
+
except AttributeError:
|
| 58 |
+
system = sys.platform
|
| 59 |
+
if system == 'win32':
|
| 60 |
+
machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
|
| 61 |
+
or os.environ.get('PROCESSOR_ARCHITECTURE', '')
|
| 62 |
+
else:
|
| 63 |
+
machine = 'unknown'
|
| 64 |
+
return system, machine
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
_system, _machine = _get_platform_and_machine()
|
| 68 |
+
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
|
| 72 |
+
# note: `:field: value` is rST syntax which renders as field lists.
|
| 73 |
+
o = getattr(_numerictypes, obj)
|
| 74 |
+
|
| 75 |
+
character_code = dtype(o).char
|
| 76 |
+
canonical_name_doc = "" if obj == o.__name__ else \
|
| 77 |
+
f":Canonical name: `numpy.{obj}`\n "
|
| 78 |
+
if fixed_aliases:
|
| 79 |
+
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
|
| 80 |
+
for alias in fixed_aliases)
|
| 81 |
+
else:
|
| 82 |
+
alias_doc = ''
|
| 83 |
+
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
|
| 84 |
+
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
|
| 85 |
+
|
| 86 |
+
docstring = f"""
|
| 87 |
+
{doc.strip()}
|
| 88 |
+
|
| 89 |
+
:Character code: ``'{character_code}'``
|
| 90 |
+
{canonical_name_doc}{alias_doc}
|
| 91 |
+
"""
|
| 92 |
+
|
| 93 |
+
add_newdoc('numpy._core.numerictypes', obj, docstring)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
_bool_docstring = (
|
| 97 |
+
"""
|
| 98 |
+
Boolean type (True or False), stored as a byte.
|
| 99 |
+
|
| 100 |
+
.. warning::
|
| 101 |
+
|
| 102 |
+
The :class:`bool` type is not a subclass of the :class:`int_` type
|
| 103 |
+
(the :class:`bool` is not even a number type). This is different
|
| 104 |
+
than Python's default implementation of :class:`bool` as a
|
| 105 |
+
sub-class of :class:`int`.
|
| 106 |
+
"""
|
| 107 |
+
)
|
| 108 |
+
|
| 109 |
+
add_newdoc_for_scalar_type('bool', [], _bool_docstring)
|
| 110 |
+
|
| 111 |
+
add_newdoc_for_scalar_type('bool_', [], _bool_docstring)
|
| 112 |
+
|
| 113 |
+
add_newdoc_for_scalar_type('byte', [],
|
| 114 |
+
"""
|
| 115 |
+
Signed integer type, compatible with C ``char``.
|
| 116 |
+
""")
|
| 117 |
+
|
| 118 |
+
add_newdoc_for_scalar_type('short', [],
|
| 119 |
+
"""
|
| 120 |
+
Signed integer type, compatible with C ``short``.
|
| 121 |
+
""")
|
| 122 |
+
|
| 123 |
+
add_newdoc_for_scalar_type('intc', [],
|
| 124 |
+
"""
|
| 125 |
+
Signed integer type, compatible with C ``int``.
|
| 126 |
+
""")
|
| 127 |
+
|
| 128 |
+
# TODO: These docs probably need an if to highlight the default rather than
|
| 129 |
+
# the C-types (and be correct).
|
| 130 |
+
add_newdoc_for_scalar_type('int_', [],
|
| 131 |
+
"""
|
| 132 |
+
Default signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
| 133 |
+
systems.
|
| 134 |
+
""")
|
| 135 |
+
|
| 136 |
+
add_newdoc_for_scalar_type('longlong', [],
|
| 137 |
+
"""
|
| 138 |
+
Signed integer type, compatible with C ``long long``.
|
| 139 |
+
""")
|
| 140 |
+
|
| 141 |
+
add_newdoc_for_scalar_type('ubyte', [],
|
| 142 |
+
"""
|
| 143 |
+
Unsigned integer type, compatible with C ``unsigned char``.
|
| 144 |
+
""")
|
| 145 |
+
|
| 146 |
+
add_newdoc_for_scalar_type('ushort', [],
|
| 147 |
+
"""
|
| 148 |
+
Unsigned integer type, compatible with C ``unsigned short``.
|
| 149 |
+
""")
|
| 150 |
+
|
| 151 |
+
add_newdoc_for_scalar_type('uintc', [],
|
| 152 |
+
"""
|
| 153 |
+
Unsigned integer type, compatible with C ``unsigned int``.
|
| 154 |
+
""")
|
| 155 |
+
|
| 156 |
+
add_newdoc_for_scalar_type('uint', [],
|
| 157 |
+
"""
|
| 158 |
+
Unsigned signed integer type, 64bit on 64bit systems and 32bit on 32bit
|
| 159 |
+
systems.
|
| 160 |
+
""")
|
| 161 |
+
|
| 162 |
+
add_newdoc_for_scalar_type('ulonglong', [],
|
| 163 |
+
"""
|
| 164 |
+
Signed integer type, compatible with C ``unsigned long long``.
|
| 165 |
+
""")
|
| 166 |
+
|
| 167 |
+
add_newdoc_for_scalar_type('half', [],
|
| 168 |
+
"""
|
| 169 |
+
Half-precision floating-point number type.
|
| 170 |
+
""")
|
| 171 |
+
|
| 172 |
+
add_newdoc_for_scalar_type('single', [],
|
| 173 |
+
"""
|
| 174 |
+
Single-precision floating-point number type, compatible with C ``float``.
|
| 175 |
+
""")
|
| 176 |
+
|
| 177 |
+
add_newdoc_for_scalar_type('double', [],
|
| 178 |
+
"""
|
| 179 |
+
Double-precision floating-point number type, compatible with Python
|
| 180 |
+
:class:`float` and C ``double``.
|
| 181 |
+
""")
|
| 182 |
+
|
| 183 |
+
add_newdoc_for_scalar_type('longdouble', [],
|
| 184 |
+
"""
|
| 185 |
+
Extended-precision floating-point number type, compatible with C
|
| 186 |
+
``long double`` but not necessarily with IEEE 754 quadruple-precision.
|
| 187 |
+
""")
|
| 188 |
+
|
| 189 |
+
add_newdoc_for_scalar_type('csingle', [],
|
| 190 |
+
"""
|
| 191 |
+
Complex number type composed of two single-precision floating-point
|
| 192 |
+
numbers.
|
| 193 |
+
""")
|
| 194 |
+
|
| 195 |
+
add_newdoc_for_scalar_type('cdouble', [],
|
| 196 |
+
"""
|
| 197 |
+
Complex number type composed of two double-precision floating-point
|
| 198 |
+
numbers, compatible with Python :class:`complex`.
|
| 199 |
+
""")
|
| 200 |
+
|
| 201 |
+
add_newdoc_for_scalar_type('clongdouble', [],
|
| 202 |
+
"""
|
| 203 |
+
Complex number type composed of two extended-precision floating-point
|
| 204 |
+
numbers.
|
| 205 |
+
""")
|
| 206 |
+
|
| 207 |
+
add_newdoc_for_scalar_type('object_', [],
|
| 208 |
+
"""
|
| 209 |
+
Any Python object.
|
| 210 |
+
""")
|
| 211 |
+
|
| 212 |
+
add_newdoc_for_scalar_type('str_', [],
|
| 213 |
+
r"""
|
| 214 |
+
A unicode string.
|
| 215 |
+
|
| 216 |
+
This type strips trailing null codepoints.
|
| 217 |
+
|
| 218 |
+
>>> s = np.str_("abc\x00")
|
| 219 |
+
>>> s
|
| 220 |
+
'abc'
|
| 221 |
+
|
| 222 |
+
Unlike the builtin :class:`str`, this supports the
|
| 223 |
+
:ref:`python:bufferobjects`, exposing its contents as UCS4:
|
| 224 |
+
|
| 225 |
+
>>> m = memoryview(np.str_("abc"))
|
| 226 |
+
>>> m.format
|
| 227 |
+
'3w'
|
| 228 |
+
>>> m.tobytes()
|
| 229 |
+
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
| 230 |
+
""")
|
| 231 |
+
|
| 232 |
+
add_newdoc_for_scalar_type('bytes_', [],
|
| 233 |
+
r"""
|
| 234 |
+
A byte string.
|
| 235 |
+
|
| 236 |
+
When used in arrays, this type strips trailing null bytes.
|
| 237 |
+
""")
|
| 238 |
+
|
| 239 |
+
add_newdoc_for_scalar_type('void', [],
|
| 240 |
+
r"""
|
| 241 |
+
np.void(length_or_data, /, dtype=None)
|
| 242 |
+
|
| 243 |
+
Create a new structured or unstructured void scalar.
|
| 244 |
+
|
| 245 |
+
Parameters
|
| 246 |
+
----------
|
| 247 |
+
length_or_data : int, array-like, bytes-like, object
|
| 248 |
+
One of multiple meanings (see notes). The length or
|
| 249 |
+
bytes data of an unstructured void. Or alternatively,
|
| 250 |
+
the data to be stored in the new scalar when `dtype`
|
| 251 |
+
is provided.
|
| 252 |
+
This can be an array-like, in which case an array may
|
| 253 |
+
be returned.
|
| 254 |
+
dtype : dtype, optional
|
| 255 |
+
If provided the dtype of the new scalar. This dtype must
|
| 256 |
+
be "void" dtype (i.e. a structured or unstructured void,
|
| 257 |
+
see also :ref:`defining-structured-types`).
|
| 258 |
+
|
| 259 |
+
.. versionadded:: 1.24
|
| 260 |
+
|
| 261 |
+
Notes
|
| 262 |
+
-----
|
| 263 |
+
For historical reasons and because void scalars can represent both
|
| 264 |
+
arbitrary byte data and structured dtypes, the void constructor
|
| 265 |
+
has three calling conventions:
|
| 266 |
+
|
| 267 |
+
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
| 268 |
+
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
| 269 |
+
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
| 270 |
+
The dtype itemsize will match the byte string length, here ``"V10"``.
|
| 271 |
+
3. When a ``dtype=`` is passed the call is roughly the same as an
|
| 272 |
+
array creation. However, a void scalar rather than array is returned.
|
| 273 |
+
|
| 274 |
+
Please see the examples which show all three different conventions.
|
| 275 |
+
|
| 276 |
+
Examples
|
| 277 |
+
--------
|
| 278 |
+
>>> np.void(5)
|
| 279 |
+
np.void(b'\x00\x00\x00\x00\x00')
|
| 280 |
+
>>> np.void(b'abcd')
|
| 281 |
+
np.void(b'\x61\x62\x63\x64')
|
| 282 |
+
>>> np.void((3.2, b'eggs'), dtype="d,S5")
|
| 283 |
+
np.void((3.2, b'eggs'), dtype=[('f0', '<f8'), ('f1', 'S5')])
|
| 284 |
+
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
| 285 |
+
np.void((3, 3), dtype=[('x', 'i1'), ('y', 'i1')])
|
| 286 |
+
|
| 287 |
+
""")
|
| 288 |
+
|
| 289 |
+
add_newdoc_for_scalar_type('datetime64', [],
|
| 290 |
+
"""
|
| 291 |
+
If created from a 64-bit integer, it represents an offset from
|
| 292 |
+
``1970-01-01T00:00:00``.
|
| 293 |
+
If created from string, the string can be in ISO 8601 date
|
| 294 |
+
or datetime format.
|
| 295 |
+
|
| 296 |
+
When parsing a string to create a datetime object, if the string contains
|
| 297 |
+
a trailing timezone (A 'Z' or a timezone offset), the timezone will be
|
| 298 |
+
dropped and a User Warning is given.
|
| 299 |
+
|
| 300 |
+
Datetime64 objects should be considered to be UTC and therefore have an
|
| 301 |
+
offset of +0000.
|
| 302 |
+
|
| 303 |
+
>>> np.datetime64(10, 'Y')
|
| 304 |
+
np.datetime64('1980')
|
| 305 |
+
>>> np.datetime64('1980', 'Y')
|
| 306 |
+
np.datetime64('1980')
|
| 307 |
+
>>> np.datetime64(10, 'D')
|
| 308 |
+
np.datetime64('1970-01-11')
|
| 309 |
+
|
| 310 |
+
See :ref:`arrays.datetime` for more information.
|
| 311 |
+
""")
|
| 312 |
+
|
| 313 |
+
add_newdoc_for_scalar_type('timedelta64', [],
|
| 314 |
+
"""
|
| 315 |
+
A timedelta stored as a 64-bit integer.
|
| 316 |
+
|
| 317 |
+
See :ref:`arrays.datetime` for more information.
|
| 318 |
+
""")
|
| 319 |
+
|
| 320 |
+
add_newdoc('numpy._core.numerictypes', "integer", ('is_integer',
|
| 321 |
+
"""
|
| 322 |
+
integer.is_integer() -> bool
|
| 323 |
+
|
| 324 |
+
Return ``True`` if the number is finite with integral value.
|
| 325 |
+
|
| 326 |
+
.. versionadded:: 1.22
|
| 327 |
+
|
| 328 |
+
Examples
|
| 329 |
+
--------
|
| 330 |
+
>>> import numpy as np
|
| 331 |
+
>>> np.int64(-2).is_integer()
|
| 332 |
+
True
|
| 333 |
+
>>> np.uint32(5).is_integer()
|
| 334 |
+
True
|
| 335 |
+
"""))
|
| 336 |
+
|
| 337 |
+
# TODO: work out how to put this on the base class, np.floating
|
| 338 |
+
for float_name in ('half', 'single', 'double', 'longdouble'):
|
| 339 |
+
add_newdoc('numpy._core.numerictypes', float_name, ('as_integer_ratio',
|
| 340 |
+
"""
|
| 341 |
+
{ftype}.as_integer_ratio() -> (int, int)
|
| 342 |
+
|
| 343 |
+
Return a pair of integers, whose ratio is exactly equal to the original
|
| 344 |
+
floating point number, and with a positive denominator.
|
| 345 |
+
Raise `OverflowError` on infinities and a `ValueError` on NaNs.
|
| 346 |
+
|
| 347 |
+
>>> np.{ftype}(10.0).as_integer_ratio()
|
| 348 |
+
(10, 1)
|
| 349 |
+
>>> np.{ftype}(0.0).as_integer_ratio()
|
| 350 |
+
(0, 1)
|
| 351 |
+
>>> np.{ftype}(-.25).as_integer_ratio()
|
| 352 |
+
(-1, 4)
|
| 353 |
+
""".format(ftype=float_name)))
|
| 354 |
+
|
| 355 |
+
add_newdoc('numpy._core.numerictypes', float_name, ('is_integer',
|
| 356 |
+
f"""
|
| 357 |
+
{float_name}.is_integer() -> bool
|
| 358 |
+
|
| 359 |
+
Return ``True`` if the floating point number is finite with integral
|
| 360 |
+
value, and ``False`` otherwise.
|
| 361 |
+
|
| 362 |
+
.. versionadded:: 1.22
|
| 363 |
+
|
| 364 |
+
Examples
|
| 365 |
+
--------
|
| 366 |
+
>>> np.{float_name}(-2.0).is_integer()
|
| 367 |
+
True
|
| 368 |
+
>>> np.{float_name}(3.2).is_integer()
|
| 369 |
+
False
|
| 370 |
+
"""))
|
| 371 |
+
|
| 372 |
+
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
|
| 373 |
+
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
|
| 374 |
+
# Add negative examples for signed cases by checking typecode
|
| 375 |
+
add_newdoc('numpy._core.numerictypes', int_name, ('bit_count',
|
| 376 |
+
f"""
|
| 377 |
+
{int_name}.bit_count() -> int
|
| 378 |
+
|
| 379 |
+
Computes the number of 1-bits in the absolute value of the input.
|
| 380 |
+
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
|
| 381 |
+
|
| 382 |
+
Examples
|
| 383 |
+
--------
|
| 384 |
+
>>> np.{int_name}(127).bit_count()
|
| 385 |
+
7""" +
|
| 386 |
+
(f"""
|
| 387 |
+
>>> np.{int_name}(-127).bit_count()
|
| 388 |
+
7
|
| 389 |
+
""" if dtype(int_name).char.islower() else "")))
|
janus/lib/python3.10/site-packages/numpy/_core/_asarray.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Functions in the ``as*array`` family that promote array-likes into arrays.
|
| 3 |
+
|
| 4 |
+
`require` fits this category despite its name not matching this pattern.
|
| 5 |
+
"""
|
| 6 |
+
from .overrides import (
|
| 7 |
+
array_function_dispatch,
|
| 8 |
+
finalize_array_function_like,
|
| 9 |
+
set_module,
|
| 10 |
+
)
|
| 11 |
+
from .multiarray import array, asanyarray
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
__all__ = ["require"]
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
POSSIBLE_FLAGS = {
|
| 18 |
+
'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
|
| 19 |
+
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
|
| 20 |
+
'A': 'A', 'ALIGNED': 'A',
|
| 21 |
+
'W': 'W', 'WRITEABLE': 'W',
|
| 22 |
+
'O': 'O', 'OWNDATA': 'O',
|
| 23 |
+
'E': 'E', 'ENSUREARRAY': 'E'
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@finalize_array_function_like
|
| 28 |
+
@set_module('numpy')
|
| 29 |
+
def require(a, dtype=None, requirements=None, *, like=None):
|
| 30 |
+
"""
|
| 31 |
+
Return an ndarray of the provided type that satisfies requirements.
|
| 32 |
+
|
| 33 |
+
This function is useful to be sure that an array with the correct flags
|
| 34 |
+
is returned for passing to compiled code (perhaps through ctypes).
|
| 35 |
+
|
| 36 |
+
Parameters
|
| 37 |
+
----------
|
| 38 |
+
a : array_like
|
| 39 |
+
The object to be converted to a type-and-requirement-satisfying array.
|
| 40 |
+
dtype : data-type
|
| 41 |
+
The required data-type. If None preserve the current dtype. If your
|
| 42 |
+
application requires the data to be in native byteorder, include
|
| 43 |
+
a byteorder specification as a part of the dtype specification.
|
| 44 |
+
requirements : str or sequence of str
|
| 45 |
+
The requirements list can be any of the following
|
| 46 |
+
|
| 47 |
+
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
|
| 48 |
+
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
|
| 49 |
+
* 'ALIGNED' ('A') - ensure a data-type aligned array
|
| 50 |
+
* 'WRITEABLE' ('W') - ensure a writable array
|
| 51 |
+
* 'OWNDATA' ('O') - ensure an array that owns its own data
|
| 52 |
+
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
|
| 53 |
+
${ARRAY_FUNCTION_LIKE}
|
| 54 |
+
|
| 55 |
+
.. versionadded:: 1.20.0
|
| 56 |
+
|
| 57 |
+
Returns
|
| 58 |
+
-------
|
| 59 |
+
out : ndarray
|
| 60 |
+
Array with specified requirements and type if given.
|
| 61 |
+
|
| 62 |
+
See Also
|
| 63 |
+
--------
|
| 64 |
+
asarray : Convert input to an ndarray.
|
| 65 |
+
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
|
| 66 |
+
ascontiguousarray : Convert input to a contiguous array.
|
| 67 |
+
asfortranarray : Convert input to an ndarray with column-major
|
| 68 |
+
memory order.
|
| 69 |
+
ndarray.flags : Information about the memory layout of the array.
|
| 70 |
+
|
| 71 |
+
Notes
|
| 72 |
+
-----
|
| 73 |
+
The returned array will be guaranteed to have the listed requirements
|
| 74 |
+
by making a copy if needed.
|
| 75 |
+
|
| 76 |
+
Examples
|
| 77 |
+
--------
|
| 78 |
+
>>> import numpy as np
|
| 79 |
+
>>> x = np.arange(6).reshape(2,3)
|
| 80 |
+
>>> x.flags
|
| 81 |
+
C_CONTIGUOUS : True
|
| 82 |
+
F_CONTIGUOUS : False
|
| 83 |
+
OWNDATA : False
|
| 84 |
+
WRITEABLE : True
|
| 85 |
+
ALIGNED : True
|
| 86 |
+
WRITEBACKIFCOPY : False
|
| 87 |
+
|
| 88 |
+
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
|
| 89 |
+
>>> y.flags
|
| 90 |
+
C_CONTIGUOUS : False
|
| 91 |
+
F_CONTIGUOUS : True
|
| 92 |
+
OWNDATA : True
|
| 93 |
+
WRITEABLE : True
|
| 94 |
+
ALIGNED : True
|
| 95 |
+
WRITEBACKIFCOPY : False
|
| 96 |
+
|
| 97 |
+
"""
|
| 98 |
+
if like is not None:
|
| 99 |
+
return _require_with_like(
|
| 100 |
+
like,
|
| 101 |
+
a,
|
| 102 |
+
dtype=dtype,
|
| 103 |
+
requirements=requirements,
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
if not requirements:
|
| 107 |
+
return asanyarray(a, dtype=dtype)
|
| 108 |
+
|
| 109 |
+
requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
|
| 110 |
+
|
| 111 |
+
if 'E' in requirements:
|
| 112 |
+
requirements.remove('E')
|
| 113 |
+
subok = False
|
| 114 |
+
else:
|
| 115 |
+
subok = True
|
| 116 |
+
|
| 117 |
+
order = 'A'
|
| 118 |
+
if requirements >= {'C', 'F'}:
|
| 119 |
+
raise ValueError('Cannot specify both "C" and "F" order')
|
| 120 |
+
elif 'F' in requirements:
|
| 121 |
+
order = 'F'
|
| 122 |
+
requirements.remove('F')
|
| 123 |
+
elif 'C' in requirements:
|
| 124 |
+
order = 'C'
|
| 125 |
+
requirements.remove('C')
|
| 126 |
+
|
| 127 |
+
arr = array(a, dtype=dtype, order=order, copy=None, subok=subok)
|
| 128 |
+
|
| 129 |
+
for prop in requirements:
|
| 130 |
+
if not arr.flags[prop]:
|
| 131 |
+
return arr.copy(order)
|
| 132 |
+
return arr
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
_require_with_like = array_function_dispatch()(require)
|
janus/lib/python3.10/site-packages/numpy/_core/_internal.py
ADDED
|
@@ -0,0 +1,963 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
A place for internal code
|
| 3 |
+
|
| 4 |
+
Some things are more easily handled Python.
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
import ast
|
| 8 |
+
import math
|
| 9 |
+
import re
|
| 10 |
+
import sys
|
| 11 |
+
import warnings
|
| 12 |
+
|
| 13 |
+
from ..exceptions import DTypePromotionError
|
| 14 |
+
from .multiarray import dtype, array, ndarray, promote_types, StringDType
|
| 15 |
+
from numpy import _NoValue
|
| 16 |
+
try:
|
| 17 |
+
import ctypes
|
| 18 |
+
except ImportError:
|
| 19 |
+
ctypes = None
|
| 20 |
+
|
| 21 |
+
IS_PYPY = sys.implementation.name == 'pypy'
|
| 22 |
+
|
| 23 |
+
if sys.byteorder == 'little':
|
| 24 |
+
_nbo = '<'
|
| 25 |
+
else:
|
| 26 |
+
_nbo = '>'
|
| 27 |
+
|
| 28 |
+
def _makenames_list(adict, align):
|
| 29 |
+
allfields = []
|
| 30 |
+
|
| 31 |
+
for fname, obj in adict.items():
|
| 32 |
+
n = len(obj)
|
| 33 |
+
if not isinstance(obj, tuple) or n not in (2, 3):
|
| 34 |
+
raise ValueError("entry not a 2- or 3- tuple")
|
| 35 |
+
if n > 2 and obj[2] == fname:
|
| 36 |
+
continue
|
| 37 |
+
num = int(obj[1])
|
| 38 |
+
if num < 0:
|
| 39 |
+
raise ValueError("invalid offset.")
|
| 40 |
+
format = dtype(obj[0], align=align)
|
| 41 |
+
if n > 2:
|
| 42 |
+
title = obj[2]
|
| 43 |
+
else:
|
| 44 |
+
title = None
|
| 45 |
+
allfields.append((fname, format, num, title))
|
| 46 |
+
# sort by offsets
|
| 47 |
+
allfields.sort(key=lambda x: x[2])
|
| 48 |
+
names = [x[0] for x in allfields]
|
| 49 |
+
formats = [x[1] for x in allfields]
|
| 50 |
+
offsets = [x[2] for x in allfields]
|
| 51 |
+
titles = [x[3] for x in allfields]
|
| 52 |
+
|
| 53 |
+
return names, formats, offsets, titles
|
| 54 |
+
|
| 55 |
+
# Called in PyArray_DescrConverter function when
|
| 56 |
+
# a dictionary without "names" and "formats"
|
| 57 |
+
# fields is used as a data-type descriptor.
|
| 58 |
+
def _usefields(adict, align):
|
| 59 |
+
try:
|
| 60 |
+
names = adict[-1]
|
| 61 |
+
except KeyError:
|
| 62 |
+
names = None
|
| 63 |
+
if names is None:
|
| 64 |
+
names, formats, offsets, titles = _makenames_list(adict, align)
|
| 65 |
+
else:
|
| 66 |
+
formats = []
|
| 67 |
+
offsets = []
|
| 68 |
+
titles = []
|
| 69 |
+
for name in names:
|
| 70 |
+
res = adict[name]
|
| 71 |
+
formats.append(res[0])
|
| 72 |
+
offsets.append(res[1])
|
| 73 |
+
if len(res) > 2:
|
| 74 |
+
titles.append(res[2])
|
| 75 |
+
else:
|
| 76 |
+
titles.append(None)
|
| 77 |
+
|
| 78 |
+
return dtype({"names": names,
|
| 79 |
+
"formats": formats,
|
| 80 |
+
"offsets": offsets,
|
| 81 |
+
"titles": titles}, align)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
# construct an array_protocol descriptor list
|
| 85 |
+
# from the fields attribute of a descriptor
|
| 86 |
+
# This calls itself recursively but should eventually hit
|
| 87 |
+
# a descriptor that has no fields and then return
|
| 88 |
+
# a simple typestring
|
| 89 |
+
|
| 90 |
+
def _array_descr(descriptor):
|
| 91 |
+
fields = descriptor.fields
|
| 92 |
+
if fields is None:
|
| 93 |
+
subdtype = descriptor.subdtype
|
| 94 |
+
if subdtype is None:
|
| 95 |
+
if descriptor.metadata is None:
|
| 96 |
+
return descriptor.str
|
| 97 |
+
else:
|
| 98 |
+
new = descriptor.metadata.copy()
|
| 99 |
+
if new:
|
| 100 |
+
return (descriptor.str, new)
|
| 101 |
+
else:
|
| 102 |
+
return descriptor.str
|
| 103 |
+
else:
|
| 104 |
+
return (_array_descr(subdtype[0]), subdtype[1])
|
| 105 |
+
|
| 106 |
+
names = descriptor.names
|
| 107 |
+
ordered_fields = [fields[x] + (x,) for x in names]
|
| 108 |
+
result = []
|
| 109 |
+
offset = 0
|
| 110 |
+
for field in ordered_fields:
|
| 111 |
+
if field[1] > offset:
|
| 112 |
+
num = field[1] - offset
|
| 113 |
+
result.append(('', f'|V{num}'))
|
| 114 |
+
offset += num
|
| 115 |
+
elif field[1] < offset:
|
| 116 |
+
raise ValueError(
|
| 117 |
+
"dtype.descr is not defined for types with overlapping or "
|
| 118 |
+
"out-of-order fields")
|
| 119 |
+
if len(field) > 3:
|
| 120 |
+
name = (field[2], field[3])
|
| 121 |
+
else:
|
| 122 |
+
name = field[2]
|
| 123 |
+
if field[0].subdtype:
|
| 124 |
+
tup = (name, _array_descr(field[0].subdtype[0]),
|
| 125 |
+
field[0].subdtype[1])
|
| 126 |
+
else:
|
| 127 |
+
tup = (name, _array_descr(field[0]))
|
| 128 |
+
offset += field[0].itemsize
|
| 129 |
+
result.append(tup)
|
| 130 |
+
|
| 131 |
+
if descriptor.itemsize > offset:
|
| 132 |
+
num = descriptor.itemsize - offset
|
| 133 |
+
result.append(('', f'|V{num}'))
|
| 134 |
+
|
| 135 |
+
return result
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
# format_re was originally from numarray by J. Todd Miller
|
| 139 |
+
|
| 140 |
+
format_re = re.compile(r'(?P<order1>[<>|=]?)'
|
| 141 |
+
r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
|
| 142 |
+
r'(?P<order2>[<>|=]?)'
|
| 143 |
+
r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
|
| 144 |
+
sep_re = re.compile(r'\s*,\s*')
|
| 145 |
+
space_re = re.compile(r'\s+$')
|
| 146 |
+
|
| 147 |
+
# astr is a string (perhaps comma separated)
|
| 148 |
+
|
| 149 |
+
_convorder = {'=': _nbo}
|
| 150 |
+
|
| 151 |
+
def _commastring(astr):
|
| 152 |
+
startindex = 0
|
| 153 |
+
result = []
|
| 154 |
+
islist = False
|
| 155 |
+
while startindex < len(astr):
|
| 156 |
+
mo = format_re.match(astr, pos=startindex)
|
| 157 |
+
try:
|
| 158 |
+
(order1, repeats, order2, dtype) = mo.groups()
|
| 159 |
+
except (TypeError, AttributeError):
|
| 160 |
+
raise ValueError(
|
| 161 |
+
f'format number {len(result)+1} of "{astr}" is not recognized'
|
| 162 |
+
) from None
|
| 163 |
+
startindex = mo.end()
|
| 164 |
+
# Separator or ending padding
|
| 165 |
+
if startindex < len(astr):
|
| 166 |
+
if space_re.match(astr, pos=startindex):
|
| 167 |
+
startindex = len(astr)
|
| 168 |
+
else:
|
| 169 |
+
mo = sep_re.match(astr, pos=startindex)
|
| 170 |
+
if not mo:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
'format number %d of "%s" is not recognized' %
|
| 173 |
+
(len(result)+1, astr))
|
| 174 |
+
startindex = mo.end()
|
| 175 |
+
islist = True
|
| 176 |
+
|
| 177 |
+
if order2 == '':
|
| 178 |
+
order = order1
|
| 179 |
+
elif order1 == '':
|
| 180 |
+
order = order2
|
| 181 |
+
else:
|
| 182 |
+
order1 = _convorder.get(order1, order1)
|
| 183 |
+
order2 = _convorder.get(order2, order2)
|
| 184 |
+
if (order1 != order2):
|
| 185 |
+
raise ValueError(
|
| 186 |
+
'inconsistent byte-order specification %s and %s' %
|
| 187 |
+
(order1, order2))
|
| 188 |
+
order = order1
|
| 189 |
+
|
| 190 |
+
if order in ('|', '=', _nbo):
|
| 191 |
+
order = ''
|
| 192 |
+
dtype = order + dtype
|
| 193 |
+
if repeats == '':
|
| 194 |
+
newitem = dtype
|
| 195 |
+
else:
|
| 196 |
+
if (repeats[0] == "(" and repeats[-1] == ")"
|
| 197 |
+
and repeats[1:-1].strip() != ""
|
| 198 |
+
and "," not in repeats):
|
| 199 |
+
warnings.warn(
|
| 200 |
+
'Passing in a parenthesized single number for repeats '
|
| 201 |
+
'is deprecated; pass either a single number or indicate '
|
| 202 |
+
'a tuple with a comma, like "(2,)".', DeprecationWarning,
|
| 203 |
+
stacklevel=2)
|
| 204 |
+
newitem = (dtype, ast.literal_eval(repeats))
|
| 205 |
+
|
| 206 |
+
result.append(newitem)
|
| 207 |
+
|
| 208 |
+
return result if islist else result[0]
|
| 209 |
+
|
| 210 |
+
class dummy_ctype:
|
| 211 |
+
|
| 212 |
+
def __init__(self, cls):
|
| 213 |
+
self._cls = cls
|
| 214 |
+
|
| 215 |
+
def __mul__(self, other):
|
| 216 |
+
return self
|
| 217 |
+
|
| 218 |
+
def __call__(self, *other):
|
| 219 |
+
return self._cls(other)
|
| 220 |
+
|
| 221 |
+
def __eq__(self, other):
|
| 222 |
+
return self._cls == other._cls
|
| 223 |
+
|
| 224 |
+
def __ne__(self, other):
|
| 225 |
+
return self._cls != other._cls
|
| 226 |
+
|
| 227 |
+
def _getintp_ctype():
|
| 228 |
+
val = _getintp_ctype.cache
|
| 229 |
+
if val is not None:
|
| 230 |
+
return val
|
| 231 |
+
if ctypes is None:
|
| 232 |
+
import numpy as np
|
| 233 |
+
val = dummy_ctype(np.intp)
|
| 234 |
+
else:
|
| 235 |
+
char = dtype('n').char
|
| 236 |
+
if char == 'i':
|
| 237 |
+
val = ctypes.c_int
|
| 238 |
+
elif char == 'l':
|
| 239 |
+
val = ctypes.c_long
|
| 240 |
+
elif char == 'q':
|
| 241 |
+
val = ctypes.c_longlong
|
| 242 |
+
else:
|
| 243 |
+
val = ctypes.c_long
|
| 244 |
+
_getintp_ctype.cache = val
|
| 245 |
+
return val
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
_getintp_ctype.cache = None
|
| 249 |
+
|
| 250 |
+
# Used for .ctypes attribute of ndarray
|
| 251 |
+
|
| 252 |
+
class _missing_ctypes:
|
| 253 |
+
def cast(self, num, obj):
|
| 254 |
+
return num.value
|
| 255 |
+
|
| 256 |
+
class c_void_p:
|
| 257 |
+
def __init__(self, ptr):
|
| 258 |
+
self.value = ptr
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
class _ctypes:
|
| 262 |
+
def __init__(self, array, ptr=None):
|
| 263 |
+
self._arr = array
|
| 264 |
+
|
| 265 |
+
if ctypes:
|
| 266 |
+
self._ctypes = ctypes
|
| 267 |
+
self._data = self._ctypes.c_void_p(ptr)
|
| 268 |
+
else:
|
| 269 |
+
# fake a pointer-like object that holds onto the reference
|
| 270 |
+
self._ctypes = _missing_ctypes()
|
| 271 |
+
self._data = self._ctypes.c_void_p(ptr)
|
| 272 |
+
self._data._objects = array
|
| 273 |
+
|
| 274 |
+
if self._arr.ndim == 0:
|
| 275 |
+
self._zerod = True
|
| 276 |
+
else:
|
| 277 |
+
self._zerod = False
|
| 278 |
+
|
| 279 |
+
def data_as(self, obj):
|
| 280 |
+
"""
|
| 281 |
+
Return the data pointer cast to a particular c-types object.
|
| 282 |
+
For example, calling ``self._as_parameter_`` is equivalent to
|
| 283 |
+
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use
|
| 284 |
+
the data as a pointer to a ctypes array of floating-point data:
|
| 285 |
+
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
|
| 286 |
+
|
| 287 |
+
The returned pointer will keep a reference to the array.
|
| 288 |
+
"""
|
| 289 |
+
# _ctypes.cast function causes a circular reference of self._data in
|
| 290 |
+
# self._data._objects. Attributes of self._data cannot be released
|
| 291 |
+
# until gc.collect is called. Make a copy of the pointer first then
|
| 292 |
+
# let it hold the array reference. This is a workaround to circumvent
|
| 293 |
+
# the CPython bug https://bugs.python.org/issue12836.
|
| 294 |
+
ptr = self._ctypes.cast(self._data, obj)
|
| 295 |
+
ptr._arr = self._arr
|
| 296 |
+
return ptr
|
| 297 |
+
|
| 298 |
+
def shape_as(self, obj):
|
| 299 |
+
"""
|
| 300 |
+
Return the shape tuple as an array of some other c-types
|
| 301 |
+
type. For example: ``self.shape_as(ctypes.c_short)``.
|
| 302 |
+
"""
|
| 303 |
+
if self._zerod:
|
| 304 |
+
return None
|
| 305 |
+
return (obj*self._arr.ndim)(*self._arr.shape)
|
| 306 |
+
|
| 307 |
+
def strides_as(self, obj):
|
| 308 |
+
"""
|
| 309 |
+
Return the strides tuple as an array of some other
|
| 310 |
+
c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
|
| 311 |
+
"""
|
| 312 |
+
if self._zerod:
|
| 313 |
+
return None
|
| 314 |
+
return (obj*self._arr.ndim)(*self._arr.strides)
|
| 315 |
+
|
| 316 |
+
@property
|
| 317 |
+
def data(self):
|
| 318 |
+
"""
|
| 319 |
+
A pointer to the memory area of the array as a Python integer.
|
| 320 |
+
This memory area may contain data that is not aligned, or not in
|
| 321 |
+
correct byte-order. The memory area may not even be writeable.
|
| 322 |
+
The array flags and data-type of this array should be respected
|
| 323 |
+
when passing this attribute to arbitrary C-code to avoid trouble
|
| 324 |
+
that can include Python crashing. User Beware! The value of this
|
| 325 |
+
attribute is exactly the same as:
|
| 326 |
+
``self._array_interface_['data'][0]``.
|
| 327 |
+
|
| 328 |
+
Note that unlike ``data_as``, a reference won't be kept to the array:
|
| 329 |
+
code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
|
| 330 |
+
pointer to a deallocated array, and should be spelt
|
| 331 |
+
``(a + b).ctypes.data_as(ctypes.c_void_p)``
|
| 332 |
+
"""
|
| 333 |
+
return self._data.value
|
| 334 |
+
|
| 335 |
+
@property
|
| 336 |
+
def shape(self):
|
| 337 |
+
"""
|
| 338 |
+
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
| 339 |
+
the basetype is the C-integer corresponding to ``dtype('p')`` on this
|
| 340 |
+
platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
|
| 341 |
+
`ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
|
| 342 |
+
the platform. The ctypes array contains the shape of
|
| 343 |
+
the underlying array.
|
| 344 |
+
"""
|
| 345 |
+
return self.shape_as(_getintp_ctype())
|
| 346 |
+
|
| 347 |
+
@property
|
| 348 |
+
def strides(self):
|
| 349 |
+
"""
|
| 350 |
+
(c_intp*self.ndim): A ctypes array of length self.ndim where
|
| 351 |
+
the basetype is the same as for the shape attribute. This ctypes
|
| 352 |
+
array contains the strides information from the underlying array.
|
| 353 |
+
This strides information is important for showing how many bytes
|
| 354 |
+
must be jumped to get to the next element in the array.
|
| 355 |
+
"""
|
| 356 |
+
return self.strides_as(_getintp_ctype())
|
| 357 |
+
|
| 358 |
+
@property
|
| 359 |
+
def _as_parameter_(self):
|
| 360 |
+
"""
|
| 361 |
+
Overrides the ctypes semi-magic method
|
| 362 |
+
|
| 363 |
+
Enables `c_func(some_array.ctypes)`
|
| 364 |
+
"""
|
| 365 |
+
return self.data_as(ctypes.c_void_p)
|
| 366 |
+
|
| 367 |
+
# Numpy 1.21.0, 2021-05-18
|
| 368 |
+
|
| 369 |
+
def get_data(self):
|
| 370 |
+
"""Deprecated getter for the `_ctypes.data` property.
|
| 371 |
+
|
| 372 |
+
.. deprecated:: 1.21
|
| 373 |
+
"""
|
| 374 |
+
warnings.warn('"get_data" is deprecated. Use "data" instead',
|
| 375 |
+
DeprecationWarning, stacklevel=2)
|
| 376 |
+
return self.data
|
| 377 |
+
|
| 378 |
+
def get_shape(self):
|
| 379 |
+
"""Deprecated getter for the `_ctypes.shape` property.
|
| 380 |
+
|
| 381 |
+
.. deprecated:: 1.21
|
| 382 |
+
"""
|
| 383 |
+
warnings.warn('"get_shape" is deprecated. Use "shape" instead',
|
| 384 |
+
DeprecationWarning, stacklevel=2)
|
| 385 |
+
return self.shape
|
| 386 |
+
|
| 387 |
+
def get_strides(self):
|
| 388 |
+
"""Deprecated getter for the `_ctypes.strides` property.
|
| 389 |
+
|
| 390 |
+
.. deprecated:: 1.21
|
| 391 |
+
"""
|
| 392 |
+
warnings.warn('"get_strides" is deprecated. Use "strides" instead',
|
| 393 |
+
DeprecationWarning, stacklevel=2)
|
| 394 |
+
return self.strides
|
| 395 |
+
|
| 396 |
+
def get_as_parameter(self):
|
| 397 |
+
"""Deprecated getter for the `_ctypes._as_parameter_` property.
|
| 398 |
+
|
| 399 |
+
.. deprecated:: 1.21
|
| 400 |
+
"""
|
| 401 |
+
warnings.warn(
|
| 402 |
+
'"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
|
| 403 |
+
DeprecationWarning, stacklevel=2,
|
| 404 |
+
)
|
| 405 |
+
return self._as_parameter_
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
def _newnames(datatype, order):
|
| 409 |
+
"""
|
| 410 |
+
Given a datatype and an order object, return a new names tuple, with the
|
| 411 |
+
order indicated
|
| 412 |
+
"""
|
| 413 |
+
oldnames = datatype.names
|
| 414 |
+
nameslist = list(oldnames)
|
| 415 |
+
if isinstance(order, str):
|
| 416 |
+
order = [order]
|
| 417 |
+
seen = set()
|
| 418 |
+
if isinstance(order, (list, tuple)):
|
| 419 |
+
for name in order:
|
| 420 |
+
try:
|
| 421 |
+
nameslist.remove(name)
|
| 422 |
+
except ValueError:
|
| 423 |
+
if name in seen:
|
| 424 |
+
raise ValueError(f"duplicate field name: {name}") from None
|
| 425 |
+
else:
|
| 426 |
+
raise ValueError(f"unknown field name: {name}") from None
|
| 427 |
+
seen.add(name)
|
| 428 |
+
return tuple(list(order) + nameslist)
|
| 429 |
+
raise ValueError(f"unsupported order value: {order}")
|
| 430 |
+
|
| 431 |
+
def _copy_fields(ary):
|
| 432 |
+
"""Return copy of structured array with padding between fields removed.
|
| 433 |
+
|
| 434 |
+
Parameters
|
| 435 |
+
----------
|
| 436 |
+
ary : ndarray
|
| 437 |
+
Structured array from which to remove padding bytes
|
| 438 |
+
|
| 439 |
+
Returns
|
| 440 |
+
-------
|
| 441 |
+
ary_copy : ndarray
|
| 442 |
+
Copy of ary with padding bytes removed
|
| 443 |
+
"""
|
| 444 |
+
dt = ary.dtype
|
| 445 |
+
copy_dtype = {'names': dt.names,
|
| 446 |
+
'formats': [dt.fields[name][0] for name in dt.names]}
|
| 447 |
+
return array(ary, dtype=copy_dtype, copy=True)
|
| 448 |
+
|
| 449 |
+
def _promote_fields(dt1, dt2):
|
| 450 |
+
""" Perform type promotion for two structured dtypes.
|
| 451 |
+
|
| 452 |
+
Parameters
|
| 453 |
+
----------
|
| 454 |
+
dt1 : structured dtype
|
| 455 |
+
First dtype.
|
| 456 |
+
dt2 : structured dtype
|
| 457 |
+
Second dtype.
|
| 458 |
+
|
| 459 |
+
Returns
|
| 460 |
+
-------
|
| 461 |
+
out : dtype
|
| 462 |
+
The promoted dtype
|
| 463 |
+
|
| 464 |
+
Notes
|
| 465 |
+
-----
|
| 466 |
+
If one of the inputs is aligned, the result will be. The titles of
|
| 467 |
+
both descriptors must match (point to the same field).
|
| 468 |
+
"""
|
| 469 |
+
# Both must be structured and have the same names in the same order
|
| 470 |
+
if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
|
| 471 |
+
raise DTypePromotionError(
|
| 472 |
+
f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
|
| 473 |
+
|
| 474 |
+
# if both are identical, we can (maybe!) just return the same dtype.
|
| 475 |
+
identical = dt1 is dt2
|
| 476 |
+
new_fields = []
|
| 477 |
+
for name in dt1.names:
|
| 478 |
+
field1 = dt1.fields[name]
|
| 479 |
+
field2 = dt2.fields[name]
|
| 480 |
+
new_descr = promote_types(field1[0], field2[0])
|
| 481 |
+
identical = identical and new_descr is field1[0]
|
| 482 |
+
|
| 483 |
+
# Check that the titles match (if given):
|
| 484 |
+
if field1[2:] != field2[2:]:
|
| 485 |
+
raise DTypePromotionError(
|
| 486 |
+
f"field titles of field '{name}' mismatch")
|
| 487 |
+
if len(field1) == 2:
|
| 488 |
+
new_fields.append((name, new_descr))
|
| 489 |
+
else:
|
| 490 |
+
new_fields.append(((field1[2], name), new_descr))
|
| 491 |
+
|
| 492 |
+
res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
|
| 493 |
+
|
| 494 |
+
# Might as well preserve identity (and metadata) if the dtype is identical
|
| 495 |
+
# and the itemsize, offsets are also unmodified. This could probably be
|
| 496 |
+
# sped up, but also probably just be removed entirely.
|
| 497 |
+
if identical and res.itemsize == dt1.itemsize:
|
| 498 |
+
for name in dt1.names:
|
| 499 |
+
if dt1.fields[name][1] != res.fields[name][1]:
|
| 500 |
+
return res # the dtype changed.
|
| 501 |
+
return dt1
|
| 502 |
+
|
| 503 |
+
return res
|
| 504 |
+
|
| 505 |
+
|
| 506 |
+
def _getfield_is_safe(oldtype, newtype, offset):
|
| 507 |
+
""" Checks safety of getfield for object arrays.
|
| 508 |
+
|
| 509 |
+
As in _view_is_safe, we need to check that memory containing objects is not
|
| 510 |
+
reinterpreted as a non-object datatype and vice versa.
|
| 511 |
+
|
| 512 |
+
Parameters
|
| 513 |
+
----------
|
| 514 |
+
oldtype : data-type
|
| 515 |
+
Data type of the original ndarray.
|
| 516 |
+
newtype : data-type
|
| 517 |
+
Data type of the field being accessed by ndarray.getfield
|
| 518 |
+
offset : int
|
| 519 |
+
Offset of the field being accessed by ndarray.getfield
|
| 520 |
+
|
| 521 |
+
Raises
|
| 522 |
+
------
|
| 523 |
+
TypeError
|
| 524 |
+
If the field access is invalid
|
| 525 |
+
|
| 526 |
+
"""
|
| 527 |
+
if newtype.hasobject or oldtype.hasobject:
|
| 528 |
+
if offset == 0 and newtype == oldtype:
|
| 529 |
+
return
|
| 530 |
+
if oldtype.names is not None:
|
| 531 |
+
for name in oldtype.names:
|
| 532 |
+
if (oldtype.fields[name][1] == offset and
|
| 533 |
+
oldtype.fields[name][0] == newtype):
|
| 534 |
+
return
|
| 535 |
+
raise TypeError("Cannot get/set field of an object array")
|
| 536 |
+
return
|
| 537 |
+
|
| 538 |
+
def _view_is_safe(oldtype, newtype):
|
| 539 |
+
""" Checks safety of a view involving object arrays, for example when
|
| 540 |
+
doing::
|
| 541 |
+
|
| 542 |
+
np.zeros(10, dtype=oldtype).view(newtype)
|
| 543 |
+
|
| 544 |
+
Parameters
|
| 545 |
+
----------
|
| 546 |
+
oldtype : data-type
|
| 547 |
+
Data type of original ndarray
|
| 548 |
+
newtype : data-type
|
| 549 |
+
Data type of the view
|
| 550 |
+
|
| 551 |
+
Raises
|
| 552 |
+
------
|
| 553 |
+
TypeError
|
| 554 |
+
If the new type is incompatible with the old type.
|
| 555 |
+
|
| 556 |
+
"""
|
| 557 |
+
|
| 558 |
+
# if the types are equivalent, there is no problem.
|
| 559 |
+
# for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
|
| 560 |
+
if oldtype == newtype:
|
| 561 |
+
return
|
| 562 |
+
|
| 563 |
+
if newtype.hasobject or oldtype.hasobject:
|
| 564 |
+
raise TypeError("Cannot change data-type for array of references.")
|
| 565 |
+
return
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
# Given a string containing a PEP 3118 format specifier,
|
| 569 |
+
# construct a NumPy dtype
|
| 570 |
+
|
| 571 |
+
_pep3118_native_map = {
|
| 572 |
+
'?': '?',
|
| 573 |
+
'c': 'S1',
|
| 574 |
+
'b': 'b',
|
| 575 |
+
'B': 'B',
|
| 576 |
+
'h': 'h',
|
| 577 |
+
'H': 'H',
|
| 578 |
+
'i': 'i',
|
| 579 |
+
'I': 'I',
|
| 580 |
+
'l': 'l',
|
| 581 |
+
'L': 'L',
|
| 582 |
+
'q': 'q',
|
| 583 |
+
'Q': 'Q',
|
| 584 |
+
'e': 'e',
|
| 585 |
+
'f': 'f',
|
| 586 |
+
'd': 'd',
|
| 587 |
+
'g': 'g',
|
| 588 |
+
'Zf': 'F',
|
| 589 |
+
'Zd': 'D',
|
| 590 |
+
'Zg': 'G',
|
| 591 |
+
's': 'S',
|
| 592 |
+
'w': 'U',
|
| 593 |
+
'O': 'O',
|
| 594 |
+
'x': 'V', # padding
|
| 595 |
+
}
|
| 596 |
+
_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
|
| 597 |
+
|
| 598 |
+
_pep3118_standard_map = {
|
| 599 |
+
'?': '?',
|
| 600 |
+
'c': 'S1',
|
| 601 |
+
'b': 'b',
|
| 602 |
+
'B': 'B',
|
| 603 |
+
'h': 'i2',
|
| 604 |
+
'H': 'u2',
|
| 605 |
+
'i': 'i4',
|
| 606 |
+
'I': 'u4',
|
| 607 |
+
'l': 'i4',
|
| 608 |
+
'L': 'u4',
|
| 609 |
+
'q': 'i8',
|
| 610 |
+
'Q': 'u8',
|
| 611 |
+
'e': 'f2',
|
| 612 |
+
'f': 'f',
|
| 613 |
+
'd': 'd',
|
| 614 |
+
'Zf': 'F',
|
| 615 |
+
'Zd': 'D',
|
| 616 |
+
's': 'S',
|
| 617 |
+
'w': 'U',
|
| 618 |
+
'O': 'O',
|
| 619 |
+
'x': 'V', # padding
|
| 620 |
+
}
|
| 621 |
+
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
|
| 622 |
+
|
| 623 |
+
_pep3118_unsupported_map = {
|
| 624 |
+
'u': 'UCS-2 strings',
|
| 625 |
+
'&': 'pointers',
|
| 626 |
+
't': 'bitfields',
|
| 627 |
+
'X': 'function pointers',
|
| 628 |
+
}
|
| 629 |
+
|
| 630 |
+
class _Stream:
|
| 631 |
+
def __init__(self, s):
|
| 632 |
+
self.s = s
|
| 633 |
+
self.byteorder = '@'
|
| 634 |
+
|
| 635 |
+
def advance(self, n):
|
| 636 |
+
res = self.s[:n]
|
| 637 |
+
self.s = self.s[n:]
|
| 638 |
+
return res
|
| 639 |
+
|
| 640 |
+
def consume(self, c):
|
| 641 |
+
if self.s[:len(c)] == c:
|
| 642 |
+
self.advance(len(c))
|
| 643 |
+
return True
|
| 644 |
+
return False
|
| 645 |
+
|
| 646 |
+
def consume_until(self, c):
|
| 647 |
+
if callable(c):
|
| 648 |
+
i = 0
|
| 649 |
+
while i < len(self.s) and not c(self.s[i]):
|
| 650 |
+
i = i + 1
|
| 651 |
+
return self.advance(i)
|
| 652 |
+
else:
|
| 653 |
+
i = self.s.index(c)
|
| 654 |
+
res = self.advance(i)
|
| 655 |
+
self.advance(len(c))
|
| 656 |
+
return res
|
| 657 |
+
|
| 658 |
+
@property
|
| 659 |
+
def next(self):
|
| 660 |
+
return self.s[0]
|
| 661 |
+
|
| 662 |
+
def __bool__(self):
|
| 663 |
+
return bool(self.s)
|
| 664 |
+
|
| 665 |
+
|
| 666 |
+
def _dtype_from_pep3118(spec):
|
| 667 |
+
stream = _Stream(spec)
|
| 668 |
+
dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
|
| 669 |
+
return dtype
|
| 670 |
+
|
| 671 |
+
def __dtype_from_pep3118(stream, is_subdtype):
|
| 672 |
+
field_spec = dict(
|
| 673 |
+
names=[],
|
| 674 |
+
formats=[],
|
| 675 |
+
offsets=[],
|
| 676 |
+
itemsize=0
|
| 677 |
+
)
|
| 678 |
+
offset = 0
|
| 679 |
+
common_alignment = 1
|
| 680 |
+
is_padding = False
|
| 681 |
+
|
| 682 |
+
# Parse spec
|
| 683 |
+
while stream:
|
| 684 |
+
value = None
|
| 685 |
+
|
| 686 |
+
# End of structure, bail out to upper level
|
| 687 |
+
if stream.consume('}'):
|
| 688 |
+
break
|
| 689 |
+
|
| 690 |
+
# Sub-arrays (1)
|
| 691 |
+
shape = None
|
| 692 |
+
if stream.consume('('):
|
| 693 |
+
shape = stream.consume_until(')')
|
| 694 |
+
shape = tuple(map(int, shape.split(',')))
|
| 695 |
+
|
| 696 |
+
# Byte order
|
| 697 |
+
if stream.next in ('@', '=', '<', '>', '^', '!'):
|
| 698 |
+
byteorder = stream.advance(1)
|
| 699 |
+
if byteorder == '!':
|
| 700 |
+
byteorder = '>'
|
| 701 |
+
stream.byteorder = byteorder
|
| 702 |
+
|
| 703 |
+
# Byte order characters also control native vs. standard type sizes
|
| 704 |
+
if stream.byteorder in ('@', '^'):
|
| 705 |
+
type_map = _pep3118_native_map
|
| 706 |
+
type_map_chars = _pep3118_native_typechars
|
| 707 |
+
else:
|
| 708 |
+
type_map = _pep3118_standard_map
|
| 709 |
+
type_map_chars = _pep3118_standard_typechars
|
| 710 |
+
|
| 711 |
+
# Item sizes
|
| 712 |
+
itemsize_str = stream.consume_until(lambda c: not c.isdigit())
|
| 713 |
+
if itemsize_str:
|
| 714 |
+
itemsize = int(itemsize_str)
|
| 715 |
+
else:
|
| 716 |
+
itemsize = 1
|
| 717 |
+
|
| 718 |
+
# Data types
|
| 719 |
+
is_padding = False
|
| 720 |
+
|
| 721 |
+
if stream.consume('T{'):
|
| 722 |
+
value, align = __dtype_from_pep3118(
|
| 723 |
+
stream, is_subdtype=True)
|
| 724 |
+
elif stream.next in type_map_chars:
|
| 725 |
+
if stream.next == 'Z':
|
| 726 |
+
typechar = stream.advance(2)
|
| 727 |
+
else:
|
| 728 |
+
typechar = stream.advance(1)
|
| 729 |
+
|
| 730 |
+
is_padding = (typechar == 'x')
|
| 731 |
+
dtypechar = type_map[typechar]
|
| 732 |
+
if dtypechar in 'USV':
|
| 733 |
+
dtypechar += '%d' % itemsize
|
| 734 |
+
itemsize = 1
|
| 735 |
+
numpy_byteorder = {'@': '=', '^': '='}.get(
|
| 736 |
+
stream.byteorder, stream.byteorder)
|
| 737 |
+
value = dtype(numpy_byteorder + dtypechar)
|
| 738 |
+
align = value.alignment
|
| 739 |
+
elif stream.next in _pep3118_unsupported_map:
|
| 740 |
+
desc = _pep3118_unsupported_map[stream.next]
|
| 741 |
+
raise NotImplementedError(
|
| 742 |
+
"Unrepresentable PEP 3118 data type {!r} ({})"
|
| 743 |
+
.format(stream.next, desc))
|
| 744 |
+
else:
|
| 745 |
+
raise ValueError(
|
| 746 |
+
"Unknown PEP 3118 data type specifier %r" % stream.s
|
| 747 |
+
)
|
| 748 |
+
|
| 749 |
+
#
|
| 750 |
+
# Native alignment may require padding
|
| 751 |
+
#
|
| 752 |
+
# Here we assume that the presence of a '@' character implicitly
|
| 753 |
+
# implies that the start of the array is *already* aligned.
|
| 754 |
+
#
|
| 755 |
+
extra_offset = 0
|
| 756 |
+
if stream.byteorder == '@':
|
| 757 |
+
start_padding = (-offset) % align
|
| 758 |
+
intra_padding = (-value.itemsize) % align
|
| 759 |
+
|
| 760 |
+
offset += start_padding
|
| 761 |
+
|
| 762 |
+
if intra_padding != 0:
|
| 763 |
+
if itemsize > 1 or (shape is not None and _prod(shape) > 1):
|
| 764 |
+
# Inject internal padding to the end of the sub-item
|
| 765 |
+
value = _add_trailing_padding(value, intra_padding)
|
| 766 |
+
else:
|
| 767 |
+
# We can postpone the injection of internal padding,
|
| 768 |
+
# as the item appears at most once
|
| 769 |
+
extra_offset += intra_padding
|
| 770 |
+
|
| 771 |
+
# Update common alignment
|
| 772 |
+
common_alignment = _lcm(align, common_alignment)
|
| 773 |
+
|
| 774 |
+
# Convert itemsize to sub-array
|
| 775 |
+
if itemsize != 1:
|
| 776 |
+
value = dtype((value, (itemsize,)))
|
| 777 |
+
|
| 778 |
+
# Sub-arrays (2)
|
| 779 |
+
if shape is not None:
|
| 780 |
+
value = dtype((value, shape))
|
| 781 |
+
|
| 782 |
+
# Field name
|
| 783 |
+
if stream.consume(':'):
|
| 784 |
+
name = stream.consume_until(':')
|
| 785 |
+
else:
|
| 786 |
+
name = None
|
| 787 |
+
|
| 788 |
+
if not (is_padding and name is None):
|
| 789 |
+
if name is not None and name in field_spec['names']:
|
| 790 |
+
raise RuntimeError(
|
| 791 |
+
f"Duplicate field name '{name}' in PEP3118 format"
|
| 792 |
+
)
|
| 793 |
+
field_spec['names'].append(name)
|
| 794 |
+
field_spec['formats'].append(value)
|
| 795 |
+
field_spec['offsets'].append(offset)
|
| 796 |
+
|
| 797 |
+
offset += value.itemsize
|
| 798 |
+
offset += extra_offset
|
| 799 |
+
|
| 800 |
+
field_spec['itemsize'] = offset
|
| 801 |
+
|
| 802 |
+
# extra final padding for aligned types
|
| 803 |
+
if stream.byteorder == '@':
|
| 804 |
+
field_spec['itemsize'] += (-offset) % common_alignment
|
| 805 |
+
|
| 806 |
+
# Check if this was a simple 1-item type, and unwrap it
|
| 807 |
+
if (field_spec['names'] == [None]
|
| 808 |
+
and field_spec['offsets'][0] == 0
|
| 809 |
+
and field_spec['itemsize'] == field_spec['formats'][0].itemsize
|
| 810 |
+
and not is_subdtype):
|
| 811 |
+
ret = field_spec['formats'][0]
|
| 812 |
+
else:
|
| 813 |
+
_fix_names(field_spec)
|
| 814 |
+
ret = dtype(field_spec)
|
| 815 |
+
|
| 816 |
+
# Finished
|
| 817 |
+
return ret, common_alignment
|
| 818 |
+
|
| 819 |
+
def _fix_names(field_spec):
|
| 820 |
+
""" Replace names which are None with the next unused f%d name """
|
| 821 |
+
names = field_spec['names']
|
| 822 |
+
for i, name in enumerate(names):
|
| 823 |
+
if name is not None:
|
| 824 |
+
continue
|
| 825 |
+
|
| 826 |
+
j = 0
|
| 827 |
+
while True:
|
| 828 |
+
name = f'f{j}'
|
| 829 |
+
if name not in names:
|
| 830 |
+
break
|
| 831 |
+
j = j + 1
|
| 832 |
+
names[i] = name
|
| 833 |
+
|
| 834 |
+
def _add_trailing_padding(value, padding):
|
| 835 |
+
"""Inject the specified number of padding bytes at the end of a dtype"""
|
| 836 |
+
if value.fields is None:
|
| 837 |
+
field_spec = dict(
|
| 838 |
+
names=['f0'],
|
| 839 |
+
formats=[value],
|
| 840 |
+
offsets=[0],
|
| 841 |
+
itemsize=value.itemsize
|
| 842 |
+
)
|
| 843 |
+
else:
|
| 844 |
+
fields = value.fields
|
| 845 |
+
names = value.names
|
| 846 |
+
field_spec = dict(
|
| 847 |
+
names=names,
|
| 848 |
+
formats=[fields[name][0] for name in names],
|
| 849 |
+
offsets=[fields[name][1] for name in names],
|
| 850 |
+
itemsize=value.itemsize
|
| 851 |
+
)
|
| 852 |
+
|
| 853 |
+
field_spec['itemsize'] += padding
|
| 854 |
+
return dtype(field_spec)
|
| 855 |
+
|
| 856 |
+
def _prod(a):
|
| 857 |
+
p = 1
|
| 858 |
+
for x in a:
|
| 859 |
+
p *= x
|
| 860 |
+
return p
|
| 861 |
+
|
| 862 |
+
def _gcd(a, b):
|
| 863 |
+
"""Calculate the greatest common divisor of a and b"""
|
| 864 |
+
if not (math.isfinite(a) and math.isfinite(b)):
|
| 865 |
+
raise ValueError('Can only find greatest common divisor of '
|
| 866 |
+
f'finite arguments, found "{a}" and "{b}"')
|
| 867 |
+
while b:
|
| 868 |
+
a, b = b, a % b
|
| 869 |
+
return a
|
| 870 |
+
|
| 871 |
+
def _lcm(a, b):
|
| 872 |
+
return a // _gcd(a, b) * b
|
| 873 |
+
|
| 874 |
+
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
|
| 875 |
+
""" Format the error message for when __array_ufunc__ gives up. """
|
| 876 |
+
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
|
| 877 |
+
['{}={!r}'.format(k, v)
|
| 878 |
+
for k, v in kwargs.items()])
|
| 879 |
+
args = inputs + kwargs.get('out', ())
|
| 880 |
+
types_string = ', '.join(repr(type(arg).__name__) for arg in args)
|
| 881 |
+
return ('operand type(s) all returned NotImplemented from '
|
| 882 |
+
'__array_ufunc__({!r}, {!r}, {}): {}'
|
| 883 |
+
.format(ufunc, method, args_string, types_string))
|
| 884 |
+
|
| 885 |
+
|
| 886 |
+
def array_function_errmsg_formatter(public_api, types):
|
| 887 |
+
""" Format the error message for when __array_ufunc__ gives up. """
|
| 888 |
+
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
|
| 889 |
+
return ("no implementation found for '{}' on types that implement "
|
| 890 |
+
'__array_function__: {}'.format(func_name, list(types)))
|
| 891 |
+
|
| 892 |
+
|
| 893 |
+
def _ufunc_doc_signature_formatter(ufunc):
|
| 894 |
+
"""
|
| 895 |
+
Builds a signature string which resembles PEP 457
|
| 896 |
+
|
| 897 |
+
This is used to construct the first line of the docstring
|
| 898 |
+
"""
|
| 899 |
+
|
| 900 |
+
# input arguments are simple
|
| 901 |
+
if ufunc.nin == 1:
|
| 902 |
+
in_args = 'x'
|
| 903 |
+
else:
|
| 904 |
+
in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
|
| 905 |
+
|
| 906 |
+
# output arguments are both keyword or positional
|
| 907 |
+
if ufunc.nout == 0:
|
| 908 |
+
out_args = ', /, out=()'
|
| 909 |
+
elif ufunc.nout == 1:
|
| 910 |
+
out_args = ', /, out=None'
|
| 911 |
+
else:
|
| 912 |
+
out_args = '[, {positional}], / [, out={default}]'.format(
|
| 913 |
+
positional=', '.join(
|
| 914 |
+
'out{}'.format(i+1) for i in range(ufunc.nout)),
|
| 915 |
+
default=repr((None,)*ufunc.nout)
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
# keyword only args depend on whether this is a gufunc
|
| 919 |
+
kwargs = (
|
| 920 |
+
", casting='same_kind'"
|
| 921 |
+
", order='K'"
|
| 922 |
+
", dtype=None"
|
| 923 |
+
", subok=True"
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
# NOTE: gufuncs may or may not support the `axis` parameter
|
| 927 |
+
if ufunc.signature is None:
|
| 928 |
+
kwargs = f", where=True{kwargs}[, signature]"
|
| 929 |
+
else:
|
| 930 |
+
kwargs += "[, signature, axes, axis]"
|
| 931 |
+
|
| 932 |
+
# join all the parts together
|
| 933 |
+
return '{name}({in_args}{out_args}, *{kwargs})'.format(
|
| 934 |
+
name=ufunc.__name__,
|
| 935 |
+
in_args=in_args,
|
| 936 |
+
out_args=out_args,
|
| 937 |
+
kwargs=kwargs
|
| 938 |
+
)
|
| 939 |
+
|
| 940 |
+
|
| 941 |
+
def npy_ctypes_check(cls):
|
| 942 |
+
# determine if a class comes from ctypes, in order to work around
|
| 943 |
+
# a bug in the buffer protocol for those objects, bpo-10746
|
| 944 |
+
try:
|
| 945 |
+
# ctypes class are new-style, so have an __mro__. This probably fails
|
| 946 |
+
# for ctypes classes with multiple inheritance.
|
| 947 |
+
if IS_PYPY:
|
| 948 |
+
# (..., _ctypes.basics._CData, Bufferable, object)
|
| 949 |
+
ctype_base = cls.__mro__[-3]
|
| 950 |
+
else:
|
| 951 |
+
# # (..., _ctypes._CData, object)
|
| 952 |
+
ctype_base = cls.__mro__[-2]
|
| 953 |
+
# right now, they're part of the _ctypes module
|
| 954 |
+
return '_ctypes' in ctype_base.__module__
|
| 955 |
+
except Exception:
|
| 956 |
+
return False
|
| 957 |
+
|
| 958 |
+
# used to handle the _NoValue default argument for na_object
|
| 959 |
+
# in the C implementation of the __reduce__ method for stringdtype
|
| 960 |
+
def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue):
|
| 961 |
+
if na_object is _NoValue:
|
| 962 |
+
return StringDType(coerce=coerce)
|
| 963 |
+
return StringDType(coerce=coerce, na_object=na_object)
|
janus/lib/python3.10/site-packages/numpy/_core/_machar.py
ADDED
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Machine arithmetic - determine the parameters of the
|
| 3 |
+
floating-point arithmetic system
|
| 4 |
+
|
| 5 |
+
Author: Pearu Peterson, September 2003
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
__all__ = ['MachAr']
|
| 9 |
+
|
| 10 |
+
from .fromnumeric import any
|
| 11 |
+
from ._ufunc_config import errstate
|
| 12 |
+
from .._utils import set_module
|
| 13 |
+
|
| 14 |
+
# Need to speed this up...especially for longdouble
|
| 15 |
+
|
| 16 |
+
# Deprecated 2021-10-20, NumPy 1.22
|
| 17 |
+
class MachAr:
|
| 18 |
+
"""
|
| 19 |
+
Diagnosing machine parameters.
|
| 20 |
+
|
| 21 |
+
Attributes
|
| 22 |
+
----------
|
| 23 |
+
ibeta : int
|
| 24 |
+
Radix in which numbers are represented.
|
| 25 |
+
it : int
|
| 26 |
+
Number of base-`ibeta` digits in the floating point mantissa M.
|
| 27 |
+
machep : int
|
| 28 |
+
Exponent of the smallest (most negative) power of `ibeta` that,
|
| 29 |
+
added to 1.0, gives something different from 1.0
|
| 30 |
+
eps : float
|
| 31 |
+
Floating-point number ``beta**machep`` (floating point precision)
|
| 32 |
+
negep : int
|
| 33 |
+
Exponent of the smallest power of `ibeta` that, subtracted
|
| 34 |
+
from 1.0, gives something different from 1.0.
|
| 35 |
+
epsneg : float
|
| 36 |
+
Floating-point number ``beta**negep``.
|
| 37 |
+
iexp : int
|
| 38 |
+
Number of bits in the exponent (including its sign and bias).
|
| 39 |
+
minexp : int
|
| 40 |
+
Smallest (most negative) power of `ibeta` consistent with there
|
| 41 |
+
being no leading zeros in the mantissa.
|
| 42 |
+
xmin : float
|
| 43 |
+
Floating-point number ``beta**minexp`` (the smallest [in
|
| 44 |
+
magnitude] positive floating point number with full precision).
|
| 45 |
+
maxexp : int
|
| 46 |
+
Smallest (positive) power of `ibeta` that causes overflow.
|
| 47 |
+
xmax : float
|
| 48 |
+
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
| 49 |
+
usable floating value).
|
| 50 |
+
irnd : int
|
| 51 |
+
In ``range(6)``, information on what kind of rounding is done
|
| 52 |
+
in addition, and on how underflow is handled.
|
| 53 |
+
ngrd : int
|
| 54 |
+
Number of 'guard digits' used when truncating the product
|
| 55 |
+
of two mantissas to fit the representation.
|
| 56 |
+
epsilon : float
|
| 57 |
+
Same as `eps`.
|
| 58 |
+
tiny : float
|
| 59 |
+
An alias for `smallest_normal`, kept for backwards compatibility.
|
| 60 |
+
huge : float
|
| 61 |
+
Same as `xmax`.
|
| 62 |
+
precision : float
|
| 63 |
+
``- int(-log10(eps))``
|
| 64 |
+
resolution : float
|
| 65 |
+
``- 10**(-precision)``
|
| 66 |
+
smallest_normal : float
|
| 67 |
+
The smallest positive floating point number with 1 as leading bit in
|
| 68 |
+
the mantissa following IEEE-754. Same as `xmin`.
|
| 69 |
+
smallest_subnormal : float
|
| 70 |
+
The smallest positive floating point number with 0 as leading bit in
|
| 71 |
+
the mantissa following IEEE-754.
|
| 72 |
+
|
| 73 |
+
Parameters
|
| 74 |
+
----------
|
| 75 |
+
float_conv : function, optional
|
| 76 |
+
Function that converts an integer or integer array to a float
|
| 77 |
+
or float array. Default is `float`.
|
| 78 |
+
int_conv : function, optional
|
| 79 |
+
Function that converts a float or float array to an integer or
|
| 80 |
+
integer array. Default is `int`.
|
| 81 |
+
float_to_float : function, optional
|
| 82 |
+
Function that converts a float array to float. Default is `float`.
|
| 83 |
+
Note that this does not seem to do anything useful in the current
|
| 84 |
+
implementation.
|
| 85 |
+
float_to_str : function, optional
|
| 86 |
+
Function that converts a single float to a string. Default is
|
| 87 |
+
``lambda v:'%24.16e' %v``.
|
| 88 |
+
title : str, optional
|
| 89 |
+
Title that is printed in the string representation of `MachAr`.
|
| 90 |
+
|
| 91 |
+
See Also
|
| 92 |
+
--------
|
| 93 |
+
finfo : Machine limits for floating point types.
|
| 94 |
+
iinfo : Machine limits for integer types.
|
| 95 |
+
|
| 96 |
+
References
|
| 97 |
+
----------
|
| 98 |
+
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
| 99 |
+
"Numerical Recipes in C++," 2nd ed,
|
| 100 |
+
Cambridge University Press, 2002, p. 31.
|
| 101 |
+
|
| 102 |
+
"""
|
| 103 |
+
|
| 104 |
+
def __init__(self, float_conv=float,int_conv=int,
|
| 105 |
+
float_to_float=float,
|
| 106 |
+
float_to_str=lambda v:'%24.16e' % v,
|
| 107 |
+
title='Python floating point number'):
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
float_conv - convert integer to float (array)
|
| 111 |
+
int_conv - convert float (array) to integer
|
| 112 |
+
float_to_float - convert float array to float
|
| 113 |
+
float_to_str - convert array float to str
|
| 114 |
+
title - description of used floating point numbers
|
| 115 |
+
|
| 116 |
+
"""
|
| 117 |
+
# We ignore all errors here because we are purposely triggering
|
| 118 |
+
# underflow to detect the properties of the running arch.
|
| 119 |
+
with errstate(under='ignore'):
|
| 120 |
+
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
| 121 |
+
|
| 122 |
+
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
| 123 |
+
max_iterN = 10000
|
| 124 |
+
msg = "Did not converge after %d tries with %s"
|
| 125 |
+
one = float_conv(1)
|
| 126 |
+
two = one + one
|
| 127 |
+
zero = one - one
|
| 128 |
+
|
| 129 |
+
# Do we really need to do this? Aren't they 2 and 2.0?
|
| 130 |
+
# Determine ibeta and beta
|
| 131 |
+
a = one
|
| 132 |
+
for _ in range(max_iterN):
|
| 133 |
+
a = a + a
|
| 134 |
+
temp = a + one
|
| 135 |
+
temp1 = temp - a
|
| 136 |
+
if any(temp1 - one != zero):
|
| 137 |
+
break
|
| 138 |
+
else:
|
| 139 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 140 |
+
b = one
|
| 141 |
+
for _ in range(max_iterN):
|
| 142 |
+
b = b + b
|
| 143 |
+
temp = a + b
|
| 144 |
+
itemp = int_conv(temp-a)
|
| 145 |
+
if any(itemp != 0):
|
| 146 |
+
break
|
| 147 |
+
else:
|
| 148 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 149 |
+
ibeta = itemp
|
| 150 |
+
beta = float_conv(ibeta)
|
| 151 |
+
|
| 152 |
+
# Determine it and irnd
|
| 153 |
+
it = -1
|
| 154 |
+
b = one
|
| 155 |
+
for _ in range(max_iterN):
|
| 156 |
+
it = it + 1
|
| 157 |
+
b = b * beta
|
| 158 |
+
temp = b + one
|
| 159 |
+
temp1 = temp - b
|
| 160 |
+
if any(temp1 - one != zero):
|
| 161 |
+
break
|
| 162 |
+
else:
|
| 163 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 164 |
+
|
| 165 |
+
betah = beta / two
|
| 166 |
+
a = one
|
| 167 |
+
for _ in range(max_iterN):
|
| 168 |
+
a = a + a
|
| 169 |
+
temp = a + one
|
| 170 |
+
temp1 = temp - a
|
| 171 |
+
if any(temp1 - one != zero):
|
| 172 |
+
break
|
| 173 |
+
else:
|
| 174 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 175 |
+
temp = a + betah
|
| 176 |
+
irnd = 0
|
| 177 |
+
if any(temp-a != zero):
|
| 178 |
+
irnd = 1
|
| 179 |
+
tempa = a + beta
|
| 180 |
+
temp = tempa + betah
|
| 181 |
+
if irnd == 0 and any(temp-tempa != zero):
|
| 182 |
+
irnd = 2
|
| 183 |
+
|
| 184 |
+
# Determine negep and epsneg
|
| 185 |
+
negep = it + 3
|
| 186 |
+
betain = one / beta
|
| 187 |
+
a = one
|
| 188 |
+
for i in range(negep):
|
| 189 |
+
a = a * betain
|
| 190 |
+
b = a
|
| 191 |
+
for _ in range(max_iterN):
|
| 192 |
+
temp = one - a
|
| 193 |
+
if any(temp-one != zero):
|
| 194 |
+
break
|
| 195 |
+
a = a * beta
|
| 196 |
+
negep = negep - 1
|
| 197 |
+
# Prevent infinite loop on PPC with gcc 4.0:
|
| 198 |
+
if negep < 0:
|
| 199 |
+
raise RuntimeError("could not determine machine tolerance "
|
| 200 |
+
"for 'negep', locals() -> %s" % (locals()))
|
| 201 |
+
else:
|
| 202 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 203 |
+
negep = -negep
|
| 204 |
+
epsneg = a
|
| 205 |
+
|
| 206 |
+
# Determine machep and eps
|
| 207 |
+
machep = - it - 3
|
| 208 |
+
a = b
|
| 209 |
+
|
| 210 |
+
for _ in range(max_iterN):
|
| 211 |
+
temp = one + a
|
| 212 |
+
if any(temp-one != zero):
|
| 213 |
+
break
|
| 214 |
+
a = a * beta
|
| 215 |
+
machep = machep + 1
|
| 216 |
+
else:
|
| 217 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 218 |
+
eps = a
|
| 219 |
+
|
| 220 |
+
# Determine ngrd
|
| 221 |
+
ngrd = 0
|
| 222 |
+
temp = one + eps
|
| 223 |
+
if irnd == 0 and any(temp*one - one != zero):
|
| 224 |
+
ngrd = 1
|
| 225 |
+
|
| 226 |
+
# Determine iexp
|
| 227 |
+
i = 0
|
| 228 |
+
k = 1
|
| 229 |
+
z = betain
|
| 230 |
+
t = one + eps
|
| 231 |
+
nxres = 0
|
| 232 |
+
for _ in range(max_iterN):
|
| 233 |
+
y = z
|
| 234 |
+
z = y*y
|
| 235 |
+
a = z*one # Check here for underflow
|
| 236 |
+
temp = z*t
|
| 237 |
+
if any(a+a == zero) or any(abs(z) >= y):
|
| 238 |
+
break
|
| 239 |
+
temp1 = temp * betain
|
| 240 |
+
if any(temp1*beta == z):
|
| 241 |
+
break
|
| 242 |
+
i = i + 1
|
| 243 |
+
k = k + k
|
| 244 |
+
else:
|
| 245 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 246 |
+
if ibeta != 10:
|
| 247 |
+
iexp = i + 1
|
| 248 |
+
mx = k + k
|
| 249 |
+
else:
|
| 250 |
+
iexp = 2
|
| 251 |
+
iz = ibeta
|
| 252 |
+
while k >= iz:
|
| 253 |
+
iz = iz * ibeta
|
| 254 |
+
iexp = iexp + 1
|
| 255 |
+
mx = iz + iz - 1
|
| 256 |
+
|
| 257 |
+
# Determine minexp and xmin
|
| 258 |
+
for _ in range(max_iterN):
|
| 259 |
+
xmin = y
|
| 260 |
+
y = y * betain
|
| 261 |
+
a = y * one
|
| 262 |
+
temp = y * t
|
| 263 |
+
if any((a + a) != zero) and any(abs(y) < xmin):
|
| 264 |
+
k = k + 1
|
| 265 |
+
temp1 = temp * betain
|
| 266 |
+
if any(temp1*beta == y) and any(temp != y):
|
| 267 |
+
nxres = 3
|
| 268 |
+
xmin = y
|
| 269 |
+
break
|
| 270 |
+
else:
|
| 271 |
+
break
|
| 272 |
+
else:
|
| 273 |
+
raise RuntimeError(msg % (_, one.dtype))
|
| 274 |
+
minexp = -k
|
| 275 |
+
|
| 276 |
+
# Determine maxexp, xmax
|
| 277 |
+
if mx <= k + k - 3 and ibeta != 10:
|
| 278 |
+
mx = mx + mx
|
| 279 |
+
iexp = iexp + 1
|
| 280 |
+
maxexp = mx + minexp
|
| 281 |
+
irnd = irnd + nxres
|
| 282 |
+
if irnd >= 2:
|
| 283 |
+
maxexp = maxexp - 2
|
| 284 |
+
i = maxexp + minexp
|
| 285 |
+
if ibeta == 2 and not i:
|
| 286 |
+
maxexp = maxexp - 1
|
| 287 |
+
if i > 20:
|
| 288 |
+
maxexp = maxexp - 1
|
| 289 |
+
if any(a != y):
|
| 290 |
+
maxexp = maxexp - 2
|
| 291 |
+
xmax = one - epsneg
|
| 292 |
+
if any(xmax*one != xmax):
|
| 293 |
+
xmax = one - beta*epsneg
|
| 294 |
+
xmax = xmax / (xmin*beta*beta*beta)
|
| 295 |
+
i = maxexp + minexp + 3
|
| 296 |
+
for j in range(i):
|
| 297 |
+
if ibeta == 2:
|
| 298 |
+
xmax = xmax + xmax
|
| 299 |
+
else:
|
| 300 |
+
xmax = xmax * beta
|
| 301 |
+
|
| 302 |
+
smallest_subnormal = abs(xmin / beta ** (it))
|
| 303 |
+
|
| 304 |
+
self.ibeta = ibeta
|
| 305 |
+
self.it = it
|
| 306 |
+
self.negep = negep
|
| 307 |
+
self.epsneg = float_to_float(epsneg)
|
| 308 |
+
self._str_epsneg = float_to_str(epsneg)
|
| 309 |
+
self.machep = machep
|
| 310 |
+
self.eps = float_to_float(eps)
|
| 311 |
+
self._str_eps = float_to_str(eps)
|
| 312 |
+
self.ngrd = ngrd
|
| 313 |
+
self.iexp = iexp
|
| 314 |
+
self.minexp = minexp
|
| 315 |
+
self.xmin = float_to_float(xmin)
|
| 316 |
+
self._str_xmin = float_to_str(xmin)
|
| 317 |
+
self.maxexp = maxexp
|
| 318 |
+
self.xmax = float_to_float(xmax)
|
| 319 |
+
self._str_xmax = float_to_str(xmax)
|
| 320 |
+
self.irnd = irnd
|
| 321 |
+
|
| 322 |
+
self.title = title
|
| 323 |
+
# Commonly used parameters
|
| 324 |
+
self.epsilon = self.eps
|
| 325 |
+
self.tiny = self.xmin
|
| 326 |
+
self.huge = self.xmax
|
| 327 |
+
self.smallest_normal = self.xmin
|
| 328 |
+
self._str_smallest_normal = float_to_str(self.xmin)
|
| 329 |
+
self.smallest_subnormal = float_to_float(smallest_subnormal)
|
| 330 |
+
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
|
| 331 |
+
|
| 332 |
+
import math
|
| 333 |
+
self.precision = int(-math.log10(float_to_float(self.eps)))
|
| 334 |
+
ten = two + two + two + two + two
|
| 335 |
+
resolution = ten ** (-self.precision)
|
| 336 |
+
self.resolution = float_to_float(resolution)
|
| 337 |
+
self._str_resolution = float_to_str(resolution)
|
| 338 |
+
|
| 339 |
+
def __str__(self):
|
| 340 |
+
fmt = (
|
| 341 |
+
'Machine parameters for %(title)s\n'
|
| 342 |
+
'---------------------------------------------------------------------\n'
|
| 343 |
+
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
| 344 |
+
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
| 345 |
+
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
| 346 |
+
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
| 347 |
+
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
| 348 |
+
'smallest_normal=%(smallest_normal)s '
|
| 349 |
+
'smallest_subnormal=%(smallest_subnormal)s\n'
|
| 350 |
+
'---------------------------------------------------------------------\n'
|
| 351 |
+
)
|
| 352 |
+
return fmt % self.__dict__
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
if __name__ == '__main__':
|
| 356 |
+
print(MachAr())
|
janus/lib/python3.10/site-packages/numpy/_core/_rational_tests.cpython-310-x86_64-linux-gnu.so
ADDED
|
Binary file (59.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/_core/_string_helpers.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
String-handling utilities to avoid locale-dependence.
|
| 3 |
+
|
| 4 |
+
Used primarily to generate type name aliases.
|
| 5 |
+
"""
|
| 6 |
+
# "import string" is costly to import!
|
| 7 |
+
# Construct the translation tables directly
|
| 8 |
+
# "A" = chr(65), "a" = chr(97)
|
| 9 |
+
_all_chars = tuple(map(chr, range(256)))
|
| 10 |
+
_ascii_upper = _all_chars[65:65+26]
|
| 11 |
+
_ascii_lower = _all_chars[97:97+26]
|
| 12 |
+
LOWER_TABLE = _all_chars[:65] + _ascii_lower + _all_chars[65+26:]
|
| 13 |
+
UPPER_TABLE = _all_chars[:97] + _ascii_upper + _all_chars[97+26:]
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def english_lower(s):
|
| 17 |
+
""" Apply English case rules to convert ASCII strings to all lower case.
|
| 18 |
+
|
| 19 |
+
This is an internal utility function to replace calls to str.lower() such
|
| 20 |
+
that we can avoid changing behavior with changing locales. In particular,
|
| 21 |
+
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
| 22 |
+
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
|
| 23 |
+
|
| 24 |
+
Parameters
|
| 25 |
+
----------
|
| 26 |
+
s : str
|
| 27 |
+
|
| 28 |
+
Returns
|
| 29 |
+
-------
|
| 30 |
+
lowered : str
|
| 31 |
+
|
| 32 |
+
Examples
|
| 33 |
+
--------
|
| 34 |
+
>>> from numpy._core.numerictypes import english_lower
|
| 35 |
+
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
| 36 |
+
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
|
| 37 |
+
>>> english_lower('')
|
| 38 |
+
''
|
| 39 |
+
"""
|
| 40 |
+
lowered = s.translate(LOWER_TABLE)
|
| 41 |
+
return lowered
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def english_upper(s):
|
| 45 |
+
""" Apply English case rules to convert ASCII strings to all upper case.
|
| 46 |
+
|
| 47 |
+
This is an internal utility function to replace calls to str.upper() such
|
| 48 |
+
that we can avoid changing behavior with changing locales. In particular,
|
| 49 |
+
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
| 50 |
+
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
|
| 51 |
+
|
| 52 |
+
Parameters
|
| 53 |
+
----------
|
| 54 |
+
s : str
|
| 55 |
+
|
| 56 |
+
Returns
|
| 57 |
+
-------
|
| 58 |
+
uppered : str
|
| 59 |
+
|
| 60 |
+
Examples
|
| 61 |
+
--------
|
| 62 |
+
>>> from numpy._core.numerictypes import english_upper
|
| 63 |
+
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
| 64 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
|
| 65 |
+
>>> english_upper('')
|
| 66 |
+
''
|
| 67 |
+
"""
|
| 68 |
+
uppered = s.translate(UPPER_TABLE)
|
| 69 |
+
return uppered
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def english_capitalize(s):
|
| 73 |
+
""" Apply English case rules to convert the first character of an ASCII
|
| 74 |
+
string to upper case.
|
| 75 |
+
|
| 76 |
+
This is an internal utility function to replace calls to str.capitalize()
|
| 77 |
+
such that we can avoid changing behavior with changing locales.
|
| 78 |
+
|
| 79 |
+
Parameters
|
| 80 |
+
----------
|
| 81 |
+
s : str
|
| 82 |
+
|
| 83 |
+
Returns
|
| 84 |
+
-------
|
| 85 |
+
capitalized : str
|
| 86 |
+
|
| 87 |
+
Examples
|
| 88 |
+
--------
|
| 89 |
+
>>> from numpy._core.numerictypes import english_capitalize
|
| 90 |
+
>>> english_capitalize('int8')
|
| 91 |
+
'Int8'
|
| 92 |
+
>>> english_capitalize('Int8')
|
| 93 |
+
'Int8'
|
| 94 |
+
>>> english_capitalize('')
|
| 95 |
+
''
|
| 96 |
+
"""
|
| 97 |
+
if s:
|
| 98 |
+
return english_upper(s[0]) + s[1:]
|
| 99 |
+
else:
|
| 100 |
+
return s
|
janus/lib/python3.10/site-packages/numpy/_core/_ufunc_config.pyi
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from _typeshed import SupportsWrite
|
| 2 |
+
from collections.abc import Callable
|
| 3 |
+
from typing import Any, Literal, TypeAlias, TypedDict, type_check_only
|
| 4 |
+
|
| 5 |
+
from numpy import errstate as errstate
|
| 6 |
+
|
| 7 |
+
_ErrKind: TypeAlias = Literal["ignore", "warn", "raise", "call", "print", "log"]
|
| 8 |
+
_ErrFunc: TypeAlias = Callable[[str, int], Any]
|
| 9 |
+
_ErrCall: TypeAlias = _ErrFunc | SupportsWrite[str]
|
| 10 |
+
|
| 11 |
+
@type_check_only
|
| 12 |
+
class _ErrDict(TypedDict):
|
| 13 |
+
divide: _ErrKind
|
| 14 |
+
over: _ErrKind
|
| 15 |
+
under: _ErrKind
|
| 16 |
+
invalid: _ErrKind
|
| 17 |
+
|
| 18 |
+
@type_check_only
|
| 19 |
+
class _ErrDictOptional(TypedDict, total=False):
|
| 20 |
+
all: None | _ErrKind
|
| 21 |
+
divide: None | _ErrKind
|
| 22 |
+
over: None | _ErrKind
|
| 23 |
+
under: None | _ErrKind
|
| 24 |
+
invalid: None | _ErrKind
|
| 25 |
+
|
| 26 |
+
def seterr(
|
| 27 |
+
all: None | _ErrKind = ...,
|
| 28 |
+
divide: None | _ErrKind = ...,
|
| 29 |
+
over: None | _ErrKind = ...,
|
| 30 |
+
under: None | _ErrKind = ...,
|
| 31 |
+
invalid: None | _ErrKind = ...,
|
| 32 |
+
) -> _ErrDict: ...
|
| 33 |
+
def geterr() -> _ErrDict: ...
|
| 34 |
+
def setbufsize(size: int) -> int: ...
|
| 35 |
+
def getbufsize() -> int: ...
|
| 36 |
+
def seterrcall(func: _ErrCall | None) -> _ErrCall | None: ...
|
| 37 |
+
def geterrcall() -> _ErrCall | None: ...
|
| 38 |
+
|
| 39 |
+
# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
|
janus/lib/python3.10/site-packages/numpy/_core/memmap.py
ADDED
|
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from contextlib import nullcontext
|
| 2 |
+
import operator
|
| 3 |
+
import numpy as np
|
| 4 |
+
from .._utils import set_module
|
| 5 |
+
from .numeric import uint8, ndarray, dtype
|
| 6 |
+
|
| 7 |
+
__all__ = ['memmap']
|
| 8 |
+
|
| 9 |
+
dtypedescr = dtype
|
| 10 |
+
valid_filemodes = ["r", "c", "r+", "w+"]
|
| 11 |
+
writeable_filemodes = ["r+", "w+"]
|
| 12 |
+
|
| 13 |
+
mode_equivalents = {
|
| 14 |
+
"readonly":"r",
|
| 15 |
+
"copyonwrite":"c",
|
| 16 |
+
"readwrite":"r+",
|
| 17 |
+
"write":"w+"
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
@set_module('numpy')
|
| 22 |
+
class memmap(ndarray):
|
| 23 |
+
"""Create a memory-map to an array stored in a *binary* file on disk.
|
| 24 |
+
|
| 25 |
+
Memory-mapped files are used for accessing small segments of large files
|
| 26 |
+
on disk, without reading the entire file into memory. NumPy's
|
| 27 |
+
memmap's are array-like objects. This differs from Python's ``mmap``
|
| 28 |
+
module, which uses file-like objects.
|
| 29 |
+
|
| 30 |
+
This subclass of ndarray has some unpleasant interactions with
|
| 31 |
+
some operations, because it doesn't quite fit properly as a subclass.
|
| 32 |
+
An alternative to using this subclass is to create the ``mmap``
|
| 33 |
+
object yourself, then create an ndarray with ndarray.__new__ directly,
|
| 34 |
+
passing the object created in its 'buffer=' parameter.
|
| 35 |
+
|
| 36 |
+
This class may at some point be turned into a factory function
|
| 37 |
+
which returns a view into an mmap buffer.
|
| 38 |
+
|
| 39 |
+
Flush the memmap instance to write the changes to the file. Currently there
|
| 40 |
+
is no API to close the underlying ``mmap``. It is tricky to ensure the
|
| 41 |
+
resource is actually closed, since it may be shared between different
|
| 42 |
+
memmap instances.
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
Parameters
|
| 46 |
+
----------
|
| 47 |
+
filename : str, file-like object, or pathlib.Path instance
|
| 48 |
+
The file name or file object to be used as the array data buffer.
|
| 49 |
+
dtype : data-type, optional
|
| 50 |
+
The data-type used to interpret the file contents.
|
| 51 |
+
Default is `uint8`.
|
| 52 |
+
mode : {'r+', 'r', 'w+', 'c'}, optional
|
| 53 |
+
The file is opened in this mode:
|
| 54 |
+
|
| 55 |
+
+------+-------------------------------------------------------------+
|
| 56 |
+
| 'r' | Open existing file for reading only. |
|
| 57 |
+
+------+-------------------------------------------------------------+
|
| 58 |
+
| 'r+' | Open existing file for reading and writing. |
|
| 59 |
+
+------+-------------------------------------------------------------+
|
| 60 |
+
| 'w+' | Create or overwrite existing file for reading and writing. |
|
| 61 |
+
| | If ``mode == 'w+'`` then `shape` must also be specified. |
|
| 62 |
+
+------+-------------------------------------------------------------+
|
| 63 |
+
| 'c' | Copy-on-write: assignments affect data in memory, but |
|
| 64 |
+
| | changes are not saved to disk. The file on disk is |
|
| 65 |
+
| | read-only. |
|
| 66 |
+
+------+-------------------------------------------------------------+
|
| 67 |
+
|
| 68 |
+
Default is 'r+'.
|
| 69 |
+
offset : int, optional
|
| 70 |
+
In the file, array data starts at this offset. Since `offset` is
|
| 71 |
+
measured in bytes, it should normally be a multiple of the byte-size
|
| 72 |
+
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
|
| 73 |
+
file are valid; The file will be extended to accommodate the
|
| 74 |
+
additional data. By default, ``memmap`` will start at the beginning of
|
| 75 |
+
the file, even if ``filename`` is a file pointer ``fp`` and
|
| 76 |
+
``fp.tell() != 0``.
|
| 77 |
+
shape : int or sequence of ints, optional
|
| 78 |
+
The desired shape of the array. If ``mode == 'r'`` and the number
|
| 79 |
+
of remaining bytes after `offset` is not a multiple of the byte-size
|
| 80 |
+
of `dtype`, you must specify `shape`. By default, the returned array
|
| 81 |
+
will be 1-D with the number of elements determined by file size
|
| 82 |
+
and data-type.
|
| 83 |
+
|
| 84 |
+
.. versionchanged:: 2.0
|
| 85 |
+
The shape parameter can now be any integer sequence type, previously
|
| 86 |
+
types were limited to tuple and int.
|
| 87 |
+
|
| 88 |
+
order : {'C', 'F'}, optional
|
| 89 |
+
Specify the order of the ndarray memory layout:
|
| 90 |
+
:term:`row-major`, C-style or :term:`column-major`,
|
| 91 |
+
Fortran-style. This only has an effect if the shape is
|
| 92 |
+
greater than 1-D. The default order is 'C'.
|
| 93 |
+
|
| 94 |
+
Attributes
|
| 95 |
+
----------
|
| 96 |
+
filename : str or pathlib.Path instance
|
| 97 |
+
Path to the mapped file.
|
| 98 |
+
offset : int
|
| 99 |
+
Offset position in the file.
|
| 100 |
+
mode : str
|
| 101 |
+
File mode.
|
| 102 |
+
|
| 103 |
+
Methods
|
| 104 |
+
-------
|
| 105 |
+
flush
|
| 106 |
+
Flush any changes in memory to file on disk.
|
| 107 |
+
When you delete a memmap object, flush is called first to write
|
| 108 |
+
changes to disk.
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
See also
|
| 112 |
+
--------
|
| 113 |
+
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
|
| 114 |
+
|
| 115 |
+
Notes
|
| 116 |
+
-----
|
| 117 |
+
The memmap object can be used anywhere an ndarray is accepted.
|
| 118 |
+
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
|
| 119 |
+
``True``.
|
| 120 |
+
|
| 121 |
+
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
|
| 122 |
+
|
| 123 |
+
When a memmap causes a file to be created or extended beyond its
|
| 124 |
+
current size in the filesystem, the contents of the new part are
|
| 125 |
+
unspecified. On systems with POSIX filesystem semantics, the extended
|
| 126 |
+
part will be filled with zero bytes.
|
| 127 |
+
|
| 128 |
+
Examples
|
| 129 |
+
--------
|
| 130 |
+
>>> import numpy as np
|
| 131 |
+
>>> data = np.arange(12, dtype='float32')
|
| 132 |
+
>>> data.resize((3,4))
|
| 133 |
+
|
| 134 |
+
This example uses a temporary file so that doctest doesn't write
|
| 135 |
+
files to your directory. You would use a 'normal' filename.
|
| 136 |
+
|
| 137 |
+
>>> from tempfile import mkdtemp
|
| 138 |
+
>>> import os.path as path
|
| 139 |
+
>>> filename = path.join(mkdtemp(), 'newfile.dat')
|
| 140 |
+
|
| 141 |
+
Create a memmap with dtype and shape that matches our data:
|
| 142 |
+
|
| 143 |
+
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
|
| 144 |
+
>>> fp
|
| 145 |
+
memmap([[0., 0., 0., 0.],
|
| 146 |
+
[0., 0., 0., 0.],
|
| 147 |
+
[0., 0., 0., 0.]], dtype=float32)
|
| 148 |
+
|
| 149 |
+
Write data to memmap array:
|
| 150 |
+
|
| 151 |
+
>>> fp[:] = data[:]
|
| 152 |
+
>>> fp
|
| 153 |
+
memmap([[ 0., 1., 2., 3.],
|
| 154 |
+
[ 4., 5., 6., 7.],
|
| 155 |
+
[ 8., 9., 10., 11.]], dtype=float32)
|
| 156 |
+
|
| 157 |
+
>>> fp.filename == path.abspath(filename)
|
| 158 |
+
True
|
| 159 |
+
|
| 160 |
+
Flushes memory changes to disk in order to read them back
|
| 161 |
+
|
| 162 |
+
>>> fp.flush()
|
| 163 |
+
|
| 164 |
+
Load the memmap and verify data was stored:
|
| 165 |
+
|
| 166 |
+
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
| 167 |
+
>>> newfp
|
| 168 |
+
memmap([[ 0., 1., 2., 3.],
|
| 169 |
+
[ 4., 5., 6., 7.],
|
| 170 |
+
[ 8., 9., 10., 11.]], dtype=float32)
|
| 171 |
+
|
| 172 |
+
Read-only memmap:
|
| 173 |
+
|
| 174 |
+
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
|
| 175 |
+
>>> fpr.flags.writeable
|
| 176 |
+
False
|
| 177 |
+
|
| 178 |
+
Copy-on-write memmap:
|
| 179 |
+
|
| 180 |
+
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
|
| 181 |
+
>>> fpc.flags.writeable
|
| 182 |
+
True
|
| 183 |
+
|
| 184 |
+
It's possible to assign to copy-on-write array, but values are only
|
| 185 |
+
written into the memory copy of the array, and not written to disk:
|
| 186 |
+
|
| 187 |
+
>>> fpc
|
| 188 |
+
memmap([[ 0., 1., 2., 3.],
|
| 189 |
+
[ 4., 5., 6., 7.],
|
| 190 |
+
[ 8., 9., 10., 11.]], dtype=float32)
|
| 191 |
+
>>> fpc[0,:] = 0
|
| 192 |
+
>>> fpc
|
| 193 |
+
memmap([[ 0., 0., 0., 0.],
|
| 194 |
+
[ 4., 5., 6., 7.],
|
| 195 |
+
[ 8., 9., 10., 11.]], dtype=float32)
|
| 196 |
+
|
| 197 |
+
File on disk is unchanged:
|
| 198 |
+
|
| 199 |
+
>>> fpr
|
| 200 |
+
memmap([[ 0., 1., 2., 3.],
|
| 201 |
+
[ 4., 5., 6., 7.],
|
| 202 |
+
[ 8., 9., 10., 11.]], dtype=float32)
|
| 203 |
+
|
| 204 |
+
Offset into a memmap:
|
| 205 |
+
|
| 206 |
+
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
|
| 207 |
+
>>> fpo
|
| 208 |
+
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
|
| 209 |
+
|
| 210 |
+
"""
|
| 211 |
+
|
| 212 |
+
__array_priority__ = -100.0
|
| 213 |
+
|
| 214 |
+
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
|
| 215 |
+
shape=None, order='C'):
|
| 216 |
+
# Import here to minimize 'import numpy' overhead
|
| 217 |
+
import mmap
|
| 218 |
+
import os.path
|
| 219 |
+
try:
|
| 220 |
+
mode = mode_equivalents[mode]
|
| 221 |
+
except KeyError as e:
|
| 222 |
+
if mode not in valid_filemodes:
|
| 223 |
+
raise ValueError(
|
| 224 |
+
"mode must be one of {!r} (got {!r})"
|
| 225 |
+
.format(valid_filemodes + list(mode_equivalents.keys()), mode)
|
| 226 |
+
) from None
|
| 227 |
+
|
| 228 |
+
if mode == 'w+' and shape is None:
|
| 229 |
+
raise ValueError("shape must be given if mode == 'w+'")
|
| 230 |
+
|
| 231 |
+
if hasattr(filename, 'read'):
|
| 232 |
+
f_ctx = nullcontext(filename)
|
| 233 |
+
else:
|
| 234 |
+
f_ctx = open(
|
| 235 |
+
os.fspath(filename),
|
| 236 |
+
('r' if mode == 'c' else mode)+'b'
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
with f_ctx as fid:
|
| 240 |
+
fid.seek(0, 2)
|
| 241 |
+
flen = fid.tell()
|
| 242 |
+
descr = dtypedescr(dtype)
|
| 243 |
+
_dbytes = descr.itemsize
|
| 244 |
+
|
| 245 |
+
if shape is None:
|
| 246 |
+
bytes = flen - offset
|
| 247 |
+
if bytes % _dbytes:
|
| 248 |
+
raise ValueError("Size of available data is not a "
|
| 249 |
+
"multiple of the data-type size.")
|
| 250 |
+
size = bytes // _dbytes
|
| 251 |
+
shape = (size,)
|
| 252 |
+
else:
|
| 253 |
+
if type(shape) not in (tuple, list):
|
| 254 |
+
try:
|
| 255 |
+
shape = [operator.index(shape)]
|
| 256 |
+
except TypeError:
|
| 257 |
+
pass
|
| 258 |
+
shape = tuple(shape)
|
| 259 |
+
size = np.intp(1) # avoid default choice of np.int_, which might overflow
|
| 260 |
+
for k in shape:
|
| 261 |
+
size *= k
|
| 262 |
+
|
| 263 |
+
bytes = int(offset + size*_dbytes)
|
| 264 |
+
|
| 265 |
+
if mode in ('w+', 'r+'):
|
| 266 |
+
# gh-27723
|
| 267 |
+
# if bytes == 0, we write out 1 byte to allow empty memmap.
|
| 268 |
+
bytes = max(bytes, 1)
|
| 269 |
+
if flen < bytes:
|
| 270 |
+
fid.seek(bytes - 1, 0)
|
| 271 |
+
fid.write(b'\0')
|
| 272 |
+
fid.flush()
|
| 273 |
+
|
| 274 |
+
if mode == 'c':
|
| 275 |
+
acc = mmap.ACCESS_COPY
|
| 276 |
+
elif mode == 'r':
|
| 277 |
+
acc = mmap.ACCESS_READ
|
| 278 |
+
else:
|
| 279 |
+
acc = mmap.ACCESS_WRITE
|
| 280 |
+
|
| 281 |
+
start = offset - offset % mmap.ALLOCATIONGRANULARITY
|
| 282 |
+
bytes -= start
|
| 283 |
+
# bytes == 0 is problematic as in mmap length=0 maps the full file.
|
| 284 |
+
# See PR gh-27723 for a more detailed explanation.
|
| 285 |
+
if bytes == 0 and start > 0:
|
| 286 |
+
bytes += mmap.ALLOCATIONGRANULARITY
|
| 287 |
+
start -= mmap.ALLOCATIONGRANULARITY
|
| 288 |
+
array_offset = offset - start
|
| 289 |
+
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
|
| 290 |
+
|
| 291 |
+
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
|
| 292 |
+
offset=array_offset, order=order)
|
| 293 |
+
self._mmap = mm
|
| 294 |
+
self.offset = offset
|
| 295 |
+
self.mode = mode
|
| 296 |
+
|
| 297 |
+
if isinstance(filename, os.PathLike):
|
| 298 |
+
# special case - if we were constructed with a pathlib.path,
|
| 299 |
+
# then filename is a path object, not a string
|
| 300 |
+
self.filename = filename.resolve()
|
| 301 |
+
elif hasattr(fid, "name") and isinstance(fid.name, str):
|
| 302 |
+
# py3 returns int for TemporaryFile().name
|
| 303 |
+
self.filename = os.path.abspath(fid.name)
|
| 304 |
+
# same as memmap copies (e.g. memmap + 1)
|
| 305 |
+
else:
|
| 306 |
+
self.filename = None
|
| 307 |
+
|
| 308 |
+
return self
|
| 309 |
+
|
| 310 |
+
def __array_finalize__(self, obj):
|
| 311 |
+
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
|
| 312 |
+
self._mmap = obj._mmap
|
| 313 |
+
self.filename = obj.filename
|
| 314 |
+
self.offset = obj.offset
|
| 315 |
+
self.mode = obj.mode
|
| 316 |
+
else:
|
| 317 |
+
self._mmap = None
|
| 318 |
+
self.filename = None
|
| 319 |
+
self.offset = None
|
| 320 |
+
self.mode = None
|
| 321 |
+
|
| 322 |
+
def flush(self):
|
| 323 |
+
"""
|
| 324 |
+
Write any changes in the array to the file on disk.
|
| 325 |
+
|
| 326 |
+
For further information, see `memmap`.
|
| 327 |
+
|
| 328 |
+
Parameters
|
| 329 |
+
----------
|
| 330 |
+
None
|
| 331 |
+
|
| 332 |
+
See Also
|
| 333 |
+
--------
|
| 334 |
+
memmap
|
| 335 |
+
|
| 336 |
+
"""
|
| 337 |
+
if self.base is not None and hasattr(self.base, 'flush'):
|
| 338 |
+
self.base.flush()
|
| 339 |
+
|
| 340 |
+
def __array_wrap__(self, arr, context=None, return_scalar=False):
|
| 341 |
+
arr = super().__array_wrap__(arr, context)
|
| 342 |
+
|
| 343 |
+
# Return a memmap if a memmap was given as the output of the
|
| 344 |
+
# ufunc. Leave the arr class unchanged if self is not a memmap
|
| 345 |
+
# to keep original memmap subclasses behavior
|
| 346 |
+
if self is arr or type(self) is not memmap:
|
| 347 |
+
return arr
|
| 348 |
+
|
| 349 |
+
# Return scalar instead of 0d memmap, e.g. for np.sum with
|
| 350 |
+
# axis=None (note that subclasses will not reach here)
|
| 351 |
+
if return_scalar:
|
| 352 |
+
return arr[()]
|
| 353 |
+
|
| 354 |
+
# Return ndarray otherwise
|
| 355 |
+
return arr.view(np.ndarray)
|
| 356 |
+
|
| 357 |
+
def __getitem__(self, index):
|
| 358 |
+
res = super().__getitem__(index)
|
| 359 |
+
if type(res) is memmap and res._mmap is None:
|
| 360 |
+
return res.view(type=ndarray)
|
| 361 |
+
return res
|
janus/lib/python3.10/site-packages/numpy/_core/umath.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Create the numpy._core.umath namespace for backward compatibility. In v1.16
|
| 3 |
+
the multiarray and umath c-extension modules were merged into a single
|
| 4 |
+
_multiarray_umath extension module. So we replicate the old namespace
|
| 5 |
+
by importing from the extension module.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import numpy
|
| 10 |
+
from . import _multiarray_umath
|
| 11 |
+
from ._multiarray_umath import * # noqa: F403
|
| 12 |
+
# These imports are needed for backward compatibility,
|
| 13 |
+
# do not change them. issue gh-11862
|
| 14 |
+
# _ones_like is semi-public, on purpose not added to __all__
|
| 15 |
+
from ._multiarray_umath import (
|
| 16 |
+
_UFUNC_API, _add_newdoc_ufunc, _ones_like, _get_extobj_dict, _make_extobj,
|
| 17 |
+
_extobj_contextvar)
|
| 18 |
+
# These imports are needed for the strip & replace implementations
|
| 19 |
+
from ._multiarray_umath import (
|
| 20 |
+
_replace, _strip_whitespace, _lstrip_whitespace, _rstrip_whitespace,
|
| 21 |
+
_strip_chars, _lstrip_chars, _rstrip_chars, _expandtabs_length,
|
| 22 |
+
_expandtabs, _center, _ljust, _rjust, _zfill, _partition, _partition_index,
|
| 23 |
+
_rpartition, _rpartition_index)
|
| 24 |
+
|
| 25 |
+
__all__ = [
|
| 26 |
+
'absolute', 'add',
|
| 27 |
+
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
|
| 28 |
+
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
|
| 29 |
+
'conjugate', 'copysign', 'cos', 'cosh', 'bitwise_count', 'deg2rad',
|
| 30 |
+
'degrees', 'divide', 'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2',
|
| 31 |
+
'expm1', 'fabs', 'floor', 'floor_divide', 'float_power', 'fmax', 'fmin',
|
| 32 |
+
'fmod', 'frexp', 'frompyfunc', 'gcd', 'greater', 'greater_equal',
|
| 33 |
+
'heaviside', 'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat',
|
| 34 |
+
'lcm', 'ldexp', 'left_shift', 'less', 'less_equal', 'log', 'log10',
|
| 35 |
+
'log1p', 'log2', 'logaddexp', 'logaddexp2', 'logical_and', 'logical_not',
|
| 36 |
+
'logical_or', 'logical_xor', 'matvec', 'maximum', 'minimum', 'mod', 'modf',
|
| 37 |
+
'multiply', 'negative', 'nextafter', 'not_equal', 'pi', 'positive',
|
| 38 |
+
'power', 'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',
|
| 39 |
+
'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square',
|
| 40 |
+
'subtract', 'tan', 'tanh', 'true_divide', 'trunc', 'vecdot', 'vecmat']
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (2.9 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_array_utils_impl.cpython-310.pyc
ADDED
|
Binary file (1.77 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arraypad_impl.cpython-310.pyc
ADDED
|
Binary file (22.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arraysetops_impl.cpython-310.pyc
ADDED
|
Binary file (33.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_arrayterator_impl.cpython-310.pyc
ADDED
|
Binary file (7.05 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc
ADDED
|
Binary file (20.7 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_histograms_impl.cpython-310.pyc
ADDED
|
Binary file (31.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_index_tricks_impl.cpython-310.pyc
ADDED
|
Binary file (29.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc
ADDED
|
Binary file (26.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_nanfunctions_impl.cpython-310.pyc
ADDED
|
Binary file (63.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_npyio_impl.cpython-310.pyc
ADDED
|
Binary file (76.2 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_polynomial_impl.cpython-310.pyc
ADDED
|
Binary file (41.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_scimath_impl.cpython-310.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_shape_base_impl.cpython-310.pyc
ADDED
|
Binary file (36.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_twodim_base_impl.cpython-310.pyc
ADDED
|
Binary file (34 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_type_check_impl.cpython-310.pyc
ADDED
|
Binary file (18.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_ufunclike_impl.cpython-310.pyc
ADDED
|
Binary file (6.24 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_user_array_impl.cpython-310.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_utils_impl.cpython-310.pyc
ADDED
|
Binary file (18.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc
ADDED
|
Binary file (4.81 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/array_utils.cpython-310.pyc
ADDED
|
Binary file (314 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc
ADDED
|
Binary file (27.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/introspect.cpython-310.pyc
ADDED
|
Binary file (2.81 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc
ADDED
|
Binary file (7.29 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc
ADDED
|
Binary file (239 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc
ADDED
|
Binary file (48.6 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc
ADDED
|
Binary file (365 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc
ADDED
|
Binary file (267 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc
ADDED
|
Binary file (230 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (166 Bytes). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-310.pyc
ADDED
|
Binary file (27.4 kB). View file
|
|
|
janus/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_arrayterator.cpython-310.pyc
ADDED
|
Binary file (1.63 kB). View file
|
|
|