Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/_natype.py +198 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/checks.pyx +274 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/meson.build +43 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/setup.py +37 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-310.pyc +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c +17 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx +11 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c +19 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/meson.build +59 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/setup.py +22 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_abc.py +54 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_api.py +616 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_array_interface.py +219 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_arraymethod.py +86 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_arrayobject.py +75 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py +154 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cpu_dispatcher.py +45 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cpu_features.py +416 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cython.py +303 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_defchararray.py +822 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_dtype.py +1963 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_einsum.py +1229 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_getlimits.py +203 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_indexing.py +1444 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_mem_overlap.py +933 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_mem_policy.py +449 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_memmap.py +230 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_numerictypes.py +620 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_overrides.py +797 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_records.py +540 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalar_ctors.py +204 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalarbuffer.py +153 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalarprint.py +382 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_simd_module.py +101 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_stringdtype.py +1813 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_strings.py +1287 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_ufunc.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_umath.py +0 -0
- mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_umath_accuracy.py +121 -0
.gitattributes
CHANGED
|
@@ -553,3 +553,4 @@ parrot/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=
|
|
| 553 |
moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 554 |
mantis_evalkit/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
|
| 555 |
openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 553 |
moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 554 |
mantis_evalkit/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
|
| 555 |
openflamingo/lib/python3.10/site-packages/torch/lib/libnvrtc-672ee683.so.11.2 filter=lfs diff=lfs merge=lfs -text
|
| 556 |
+
mantis_evalkit/lib/python3.10/site-packages/numpy/lib/tests/__pycache__/test_function_base.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test__exceptions.cpython-310.pyc
ADDED
|
Binary file (3.45 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_array_interface.cpython-310.pyc
ADDED
|
Binary file (6.1 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_arrayobject.cpython-310.pyc
ADDED
|
Binary file (2.31 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_defchararray.cpython-310.pyc
ADDED
|
Binary file (32.2 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_extint128.cpython-310.pyc
ADDED
|
Binary file (6.6 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_machar.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_multithreading.cpython-310.pyc
ADDED
|
Binary file (3.73 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_scalar_methods.cpython-310.pyc
ADDED
|
Binary file (9.17 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_scalarprint.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc
ADDED
|
Binary file (4.94 kB). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/_natype.py
ADDED
|
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Vendored implementation of pandas.NA, adapted from pandas/_libs/missing.pyx
|
| 2 |
+
#
|
| 3 |
+
# This is vendored to avoid adding pandas as a test dependency.
|
| 4 |
+
|
| 5 |
+
__all__ = ["pd_NA"]
|
| 6 |
+
|
| 7 |
+
import numbers
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
def _create_binary_propagating_op(name, is_divmod=False):
|
| 12 |
+
is_cmp = name.strip("_") in ["eq", "ne", "le", "lt", "ge", "gt"]
|
| 13 |
+
|
| 14 |
+
def method(self, other):
|
| 15 |
+
if (
|
| 16 |
+
other is pd_NA
|
| 17 |
+
or isinstance(other, (str, bytes))
|
| 18 |
+
or isinstance(other, (numbers.Number, np.bool))
|
| 19 |
+
or isinstance(other, np.ndarray)
|
| 20 |
+
and not other.shape
|
| 21 |
+
):
|
| 22 |
+
# Need the other.shape clause to handle NumPy scalars,
|
| 23 |
+
# since we do a setitem on `out` below, which
|
| 24 |
+
# won't work for NumPy scalars.
|
| 25 |
+
if is_divmod:
|
| 26 |
+
return pd_NA, pd_NA
|
| 27 |
+
else:
|
| 28 |
+
return pd_NA
|
| 29 |
+
|
| 30 |
+
elif isinstance(other, np.ndarray):
|
| 31 |
+
out = np.empty(other.shape, dtype=object)
|
| 32 |
+
out[:] = pd_NA
|
| 33 |
+
|
| 34 |
+
if is_divmod:
|
| 35 |
+
return out, out.copy()
|
| 36 |
+
else:
|
| 37 |
+
return out
|
| 38 |
+
|
| 39 |
+
elif is_cmp and isinstance(other, (np.datetime64, np.timedelta64)):
|
| 40 |
+
return pd_NA
|
| 41 |
+
|
| 42 |
+
elif isinstance(other, np.datetime64):
|
| 43 |
+
if name in ["__sub__", "__rsub__"]:
|
| 44 |
+
return pd_NA
|
| 45 |
+
|
| 46 |
+
elif isinstance(other, np.timedelta64):
|
| 47 |
+
if name in ["__sub__", "__rsub__", "__add__", "__radd__"]:
|
| 48 |
+
return pd_NA
|
| 49 |
+
|
| 50 |
+
return NotImplemented
|
| 51 |
+
|
| 52 |
+
method.__name__ = name
|
| 53 |
+
return method
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def _create_unary_propagating_op(name: str):
|
| 57 |
+
def method(self):
|
| 58 |
+
return pd_NA
|
| 59 |
+
|
| 60 |
+
method.__name__ = name
|
| 61 |
+
return method
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class NAType:
|
| 65 |
+
def __repr__(self) -> str:
|
| 66 |
+
return "<NA>"
|
| 67 |
+
|
| 68 |
+
def __format__(self, format_spec) -> str:
|
| 69 |
+
try:
|
| 70 |
+
return self.__repr__().__format__(format_spec)
|
| 71 |
+
except ValueError:
|
| 72 |
+
return self.__repr__()
|
| 73 |
+
|
| 74 |
+
def __bool__(self):
|
| 75 |
+
raise TypeError("boolean value of NA is ambiguous")
|
| 76 |
+
|
| 77 |
+
def __hash__(self):
|
| 78 |
+
exponent = 31 if is_32bit else 61
|
| 79 |
+
return 2**exponent - 1
|
| 80 |
+
|
| 81 |
+
def __reduce__(self):
|
| 82 |
+
return "pd_NA"
|
| 83 |
+
|
| 84 |
+
# Binary arithmetic and comparison ops -> propagate
|
| 85 |
+
|
| 86 |
+
__add__ = _create_binary_propagating_op("__add__")
|
| 87 |
+
__radd__ = _create_binary_propagating_op("__radd__")
|
| 88 |
+
__sub__ = _create_binary_propagating_op("__sub__")
|
| 89 |
+
__rsub__ = _create_binary_propagating_op("__rsub__")
|
| 90 |
+
__mul__ = _create_binary_propagating_op("__mul__")
|
| 91 |
+
__rmul__ = _create_binary_propagating_op("__rmul__")
|
| 92 |
+
__matmul__ = _create_binary_propagating_op("__matmul__")
|
| 93 |
+
__rmatmul__ = _create_binary_propagating_op("__rmatmul__")
|
| 94 |
+
__truediv__ = _create_binary_propagating_op("__truediv__")
|
| 95 |
+
__rtruediv__ = _create_binary_propagating_op("__rtruediv__")
|
| 96 |
+
__floordiv__ = _create_binary_propagating_op("__floordiv__")
|
| 97 |
+
__rfloordiv__ = _create_binary_propagating_op("__rfloordiv__")
|
| 98 |
+
__mod__ = _create_binary_propagating_op("__mod__")
|
| 99 |
+
__rmod__ = _create_binary_propagating_op("__rmod__")
|
| 100 |
+
__divmod__ = _create_binary_propagating_op("__divmod__", is_divmod=True)
|
| 101 |
+
__rdivmod__ = _create_binary_propagating_op("__rdivmod__", is_divmod=True)
|
| 102 |
+
# __lshift__ and __rshift__ are not implemented
|
| 103 |
+
|
| 104 |
+
__eq__ = _create_binary_propagating_op("__eq__")
|
| 105 |
+
__ne__ = _create_binary_propagating_op("__ne__")
|
| 106 |
+
__le__ = _create_binary_propagating_op("__le__")
|
| 107 |
+
__lt__ = _create_binary_propagating_op("__lt__")
|
| 108 |
+
__gt__ = _create_binary_propagating_op("__gt__")
|
| 109 |
+
__ge__ = _create_binary_propagating_op("__ge__")
|
| 110 |
+
|
| 111 |
+
# Unary ops
|
| 112 |
+
|
| 113 |
+
__neg__ = _create_unary_propagating_op("__neg__")
|
| 114 |
+
__pos__ = _create_unary_propagating_op("__pos__")
|
| 115 |
+
__abs__ = _create_unary_propagating_op("__abs__")
|
| 116 |
+
__invert__ = _create_unary_propagating_op("__invert__")
|
| 117 |
+
|
| 118 |
+
# pow has special
|
| 119 |
+
def __pow__(self, other):
|
| 120 |
+
if other is pd_NA:
|
| 121 |
+
return pd_NA
|
| 122 |
+
elif isinstance(other, (numbers.Number, np.bool)):
|
| 123 |
+
if other == 0:
|
| 124 |
+
# returning positive is correct for +/- 0.
|
| 125 |
+
return type(other)(1)
|
| 126 |
+
else:
|
| 127 |
+
return pd_NA
|
| 128 |
+
elif util.is_array(other):
|
| 129 |
+
return np.where(other == 0, other.dtype.type(1), pd_NA)
|
| 130 |
+
|
| 131 |
+
return NotImplemented
|
| 132 |
+
|
| 133 |
+
def __rpow__(self, other):
|
| 134 |
+
if other is pd_NA:
|
| 135 |
+
return pd_NA
|
| 136 |
+
elif isinstance(other, (numbers.Number, np.bool)):
|
| 137 |
+
if other == 1:
|
| 138 |
+
return other
|
| 139 |
+
else:
|
| 140 |
+
return pd_NA
|
| 141 |
+
elif util.is_array(other):
|
| 142 |
+
return np.where(other == 1, other, pd_NA)
|
| 143 |
+
return NotImplemented
|
| 144 |
+
|
| 145 |
+
# Logical ops using Kleene logic
|
| 146 |
+
|
| 147 |
+
def __and__(self, other):
|
| 148 |
+
if other is False:
|
| 149 |
+
return False
|
| 150 |
+
elif other is True or other is pd_NA:
|
| 151 |
+
return pd_NA
|
| 152 |
+
return NotImplemented
|
| 153 |
+
|
| 154 |
+
__rand__ = __and__
|
| 155 |
+
|
| 156 |
+
def __or__(self, other):
|
| 157 |
+
if other is True:
|
| 158 |
+
return True
|
| 159 |
+
elif other is False or other is pd_NA:
|
| 160 |
+
return pd_NA
|
| 161 |
+
return NotImplemented
|
| 162 |
+
|
| 163 |
+
__ror__ = __or__
|
| 164 |
+
|
| 165 |
+
def __xor__(self, other):
|
| 166 |
+
if other is False or other is True or other is pd_NA:
|
| 167 |
+
return pd_NA
|
| 168 |
+
return NotImplemented
|
| 169 |
+
|
| 170 |
+
__rxor__ = __xor__
|
| 171 |
+
|
| 172 |
+
__array_priority__ = 1000
|
| 173 |
+
_HANDLED_TYPES = (np.ndarray, numbers.Number, str, np.bool)
|
| 174 |
+
|
| 175 |
+
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
|
| 176 |
+
types = self._HANDLED_TYPES + (NAType,)
|
| 177 |
+
for x in inputs:
|
| 178 |
+
if not isinstance(x, types):
|
| 179 |
+
return NotImplemented
|
| 180 |
+
|
| 181 |
+
if method != "__call__":
|
| 182 |
+
raise ValueError(f"ufunc method '{method}' not supported for NA")
|
| 183 |
+
result = maybe_dispatch_ufunc_to_dunder_op(
|
| 184 |
+
self, ufunc, method, *inputs, **kwargs
|
| 185 |
+
)
|
| 186 |
+
if result is NotImplemented:
|
| 187 |
+
# For a NumPy ufunc that's not a binop, like np.logaddexp
|
| 188 |
+
index = [i for i, x in enumerate(inputs) if x is pd_NA][0]
|
| 189 |
+
result = np.broadcast_arrays(*inputs)[index]
|
| 190 |
+
if result.ndim == 0:
|
| 191 |
+
result = result.item()
|
| 192 |
+
if ufunc.nout > 1:
|
| 193 |
+
result = (pd_NA,) * ufunc.nout
|
| 194 |
+
|
| 195 |
+
return result
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
pd_NA = NAType()
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/checks.pyx
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#cython: language_level=3
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Functions in this module give python-space wrappers for cython functions
|
| 5 |
+
exposed in numpy/__init__.pxd, so they can be tested in test_cython.py
|
| 6 |
+
"""
|
| 7 |
+
cimport numpy as cnp
|
| 8 |
+
cnp.import_array()
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def is_td64(obj):
|
| 12 |
+
return cnp.is_timedelta64_object(obj)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def is_dt64(obj):
|
| 16 |
+
return cnp.is_datetime64_object(obj)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def get_dt64_value(obj):
|
| 20 |
+
return cnp.get_datetime64_value(obj)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_td64_value(obj):
|
| 24 |
+
return cnp.get_timedelta64_value(obj)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_dt64_unit(obj):
|
| 28 |
+
return cnp.get_datetime64_unit(obj)
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def is_integer(obj):
|
| 32 |
+
return isinstance(obj, (cnp.integer, int))
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def get_datetime_iso_8601_strlen():
|
| 36 |
+
return cnp.get_datetime_iso_8601_strlen(0, cnp.NPY_FR_ns)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def convert_datetime64_to_datetimestruct():
|
| 40 |
+
cdef:
|
| 41 |
+
cnp.npy_datetimestruct dts
|
| 42 |
+
cnp.PyArray_DatetimeMetaData meta
|
| 43 |
+
cnp.int64_t value = 1647374515260292
|
| 44 |
+
# i.e. (time.time() * 10**6) at 2022-03-15 20:01:55.260292 UTC
|
| 45 |
+
|
| 46 |
+
meta.base = cnp.NPY_FR_us
|
| 47 |
+
meta.num = 1
|
| 48 |
+
cnp.convert_datetime64_to_datetimestruct(&meta, value, &dts)
|
| 49 |
+
return dts
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def make_iso_8601_datetime(dt: "datetime"):
|
| 53 |
+
cdef:
|
| 54 |
+
cnp.npy_datetimestruct dts
|
| 55 |
+
char result[36] # 36 corresponds to NPY_FR_s passed below
|
| 56 |
+
int local = 0
|
| 57 |
+
int utc = 0
|
| 58 |
+
int tzoffset = 0
|
| 59 |
+
|
| 60 |
+
dts.year = dt.year
|
| 61 |
+
dts.month = dt.month
|
| 62 |
+
dts.day = dt.day
|
| 63 |
+
dts.hour = dt.hour
|
| 64 |
+
dts.min = dt.minute
|
| 65 |
+
dts.sec = dt.second
|
| 66 |
+
dts.us = dt.microsecond
|
| 67 |
+
dts.ps = dts.as = 0
|
| 68 |
+
|
| 69 |
+
cnp.make_iso_8601_datetime(
|
| 70 |
+
&dts,
|
| 71 |
+
result,
|
| 72 |
+
sizeof(result),
|
| 73 |
+
local,
|
| 74 |
+
utc,
|
| 75 |
+
cnp.NPY_FR_s,
|
| 76 |
+
tzoffset,
|
| 77 |
+
cnp.NPY_NO_CASTING,
|
| 78 |
+
)
|
| 79 |
+
return result
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
cdef cnp.broadcast multiiter_from_broadcast_obj(object bcast):
|
| 83 |
+
cdef dict iter_map = {
|
| 84 |
+
1: cnp.PyArray_MultiIterNew1,
|
| 85 |
+
2: cnp.PyArray_MultiIterNew2,
|
| 86 |
+
3: cnp.PyArray_MultiIterNew3,
|
| 87 |
+
4: cnp.PyArray_MultiIterNew4,
|
| 88 |
+
5: cnp.PyArray_MultiIterNew5,
|
| 89 |
+
}
|
| 90 |
+
arrays = [x.base for x in bcast.iters]
|
| 91 |
+
cdef cnp.broadcast result = iter_map[len(arrays)](*arrays)
|
| 92 |
+
return result
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def get_multiiter_size(bcast: "broadcast"):
|
| 96 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 97 |
+
return multi.size
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def get_multiiter_number_of_dims(bcast: "broadcast"):
|
| 101 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 102 |
+
return multi.nd
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def get_multiiter_current_index(bcast: "broadcast"):
|
| 106 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 107 |
+
return multi.index
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
def get_multiiter_num_of_iterators(bcast: "broadcast"):
|
| 111 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 112 |
+
return multi.numiter
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def get_multiiter_shape(bcast: "broadcast"):
|
| 116 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 117 |
+
return tuple([multi.dimensions[i] for i in range(bcast.nd)])
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def get_multiiter_iters(bcast: "broadcast"):
|
| 121 |
+
cdef cnp.broadcast multi = multiiter_from_broadcast_obj(bcast)
|
| 122 |
+
return tuple([<cnp.flatiter>multi.iters[i] for i in range(bcast.numiter)])
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def get_default_integer():
|
| 126 |
+
if cnp.NPY_DEFAULT_INT == cnp.NPY_LONG:
|
| 127 |
+
return cnp.dtype("long")
|
| 128 |
+
if cnp.NPY_DEFAULT_INT == cnp.NPY_INTP:
|
| 129 |
+
return cnp.dtype("intp")
|
| 130 |
+
return None
|
| 131 |
+
|
| 132 |
+
def get_ravel_axis():
|
| 133 |
+
return cnp.NPY_RAVEL_AXIS
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
def conv_intp(cnp.intp_t val):
|
| 137 |
+
return val
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_dtype_flags(cnp.dtype dtype):
|
| 141 |
+
return dtype.flags
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
cdef cnp.NpyIter* npyiter_from_nditer_obj(object it):
|
| 145 |
+
"""A function to create a NpyIter struct from a nditer object.
|
| 146 |
+
|
| 147 |
+
This function is only meant for testing purposes and only extracts the
|
| 148 |
+
necessary info from nditer to test the functionality of NpyIter methods
|
| 149 |
+
"""
|
| 150 |
+
cdef:
|
| 151 |
+
cnp.NpyIter* cit
|
| 152 |
+
cnp.PyArray_Descr* op_dtypes[3]
|
| 153 |
+
cnp.npy_uint32 op_flags[3]
|
| 154 |
+
cnp.PyArrayObject* ops[3]
|
| 155 |
+
cnp.npy_uint32 flags = 0
|
| 156 |
+
|
| 157 |
+
if it.has_index:
|
| 158 |
+
flags |= cnp.NPY_ITER_C_INDEX
|
| 159 |
+
if it.has_delayed_bufalloc:
|
| 160 |
+
flags |= cnp.NPY_ITER_BUFFERED | cnp.NPY_ITER_DELAY_BUFALLOC
|
| 161 |
+
if it.has_multi_index:
|
| 162 |
+
flags |= cnp.NPY_ITER_MULTI_INDEX
|
| 163 |
+
|
| 164 |
+
# one of READWRITE, READONLY and WRTIEONLY at the minimum must be specified for op_flags
|
| 165 |
+
for i in range(it.nop):
|
| 166 |
+
op_flags[i] = cnp.NPY_ITER_READONLY
|
| 167 |
+
|
| 168 |
+
for i in range(it.nop):
|
| 169 |
+
op_dtypes[i] = cnp.PyArray_DESCR(it.operands[i])
|
| 170 |
+
ops[i] = <cnp.PyArrayObject*>it.operands[i]
|
| 171 |
+
|
| 172 |
+
cit = cnp.NpyIter_MultiNew(it.nop, &ops[0], flags, cnp.NPY_KEEPORDER,
|
| 173 |
+
cnp.NPY_NO_CASTING, &op_flags[0],
|
| 174 |
+
<cnp.PyArray_Descr**>NULL)
|
| 175 |
+
return cit
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def get_npyiter_size(it: "nditer"):
|
| 179 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 180 |
+
result = cnp.NpyIter_GetIterSize(cit)
|
| 181 |
+
cnp.NpyIter_Deallocate(cit)
|
| 182 |
+
return result
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def get_npyiter_ndim(it: "nditer"):
|
| 186 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 187 |
+
result = cnp.NpyIter_GetNDim(cit)
|
| 188 |
+
cnp.NpyIter_Deallocate(cit)
|
| 189 |
+
return result
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
def get_npyiter_nop(it: "nditer"):
|
| 193 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 194 |
+
result = cnp.NpyIter_GetNOp(cit)
|
| 195 |
+
cnp.NpyIter_Deallocate(cit)
|
| 196 |
+
return result
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
def get_npyiter_operands(it: "nditer"):
|
| 200 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 201 |
+
try:
|
| 202 |
+
arr = cnp.NpyIter_GetOperandArray(cit)
|
| 203 |
+
return tuple([<cnp.ndarray>arr[i] for i in range(it.nop)])
|
| 204 |
+
finally:
|
| 205 |
+
cnp.NpyIter_Deallocate(cit)
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
def get_npyiter_itviews(it: "nditer"):
|
| 209 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 210 |
+
result = tuple([cnp.NpyIter_GetIterView(cit, i) for i in range(it.nop)])
|
| 211 |
+
cnp.NpyIter_Deallocate(cit)
|
| 212 |
+
return result
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def get_npyiter_dtypes(it: "nditer"):
|
| 216 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 217 |
+
try:
|
| 218 |
+
arr = cnp.NpyIter_GetDescrArray(cit)
|
| 219 |
+
return tuple([<cnp.dtype>arr[i] for i in range(it.nop)])
|
| 220 |
+
finally:
|
| 221 |
+
cnp.NpyIter_Deallocate(cit)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def npyiter_has_delayed_bufalloc(it: "nditer"):
|
| 225 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 226 |
+
result = cnp.NpyIter_HasDelayedBufAlloc(cit)
|
| 227 |
+
cnp.NpyIter_Deallocate(cit)
|
| 228 |
+
return result
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def npyiter_has_index(it: "nditer"):
|
| 232 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 233 |
+
result = cnp.NpyIter_HasIndex(cit)
|
| 234 |
+
cnp.NpyIter_Deallocate(cit)
|
| 235 |
+
return result
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
def npyiter_has_multi_index(it: "nditer"):
|
| 239 |
+
cdef cnp.NpyIter* cit = npyiter_from_nditer_obj(it)
|
| 240 |
+
result = cnp.NpyIter_HasMultiIndex(cit)
|
| 241 |
+
cnp.NpyIter_Deallocate(cit)
|
| 242 |
+
return result
|
| 243 |
+
|
| 244 |
+
|
| 245 |
+
def npyiter_has_finished(it: "nditer"):
|
| 246 |
+
cdef cnp.NpyIter* cit
|
| 247 |
+
try:
|
| 248 |
+
cit = npyiter_from_nditer_obj(it)
|
| 249 |
+
cnp.NpyIter_GotoIterIndex(cit, it.index)
|
| 250 |
+
return not (cnp.NpyIter_GetIterIndex(cit) < cnp.NpyIter_GetIterSize(cit))
|
| 251 |
+
finally:
|
| 252 |
+
cnp.NpyIter_Deallocate(cit)
|
| 253 |
+
|
| 254 |
+
def compile_fillwithbyte():
|
| 255 |
+
# Regression test for gh-25878, mostly checks it compiles.
|
| 256 |
+
cdef cnp.npy_intp dims[2]
|
| 257 |
+
dims = (1, 2)
|
| 258 |
+
pos = cnp.PyArray_ZEROS(2, dims, cnp.NPY_UINT8, 0)
|
| 259 |
+
cnp.PyArray_FILLWBYTE(pos, 1)
|
| 260 |
+
return pos
|
| 261 |
+
|
| 262 |
+
def inc2_cfloat_struct(cnp.ndarray[cnp.cfloat_t] arr):
|
| 263 |
+
# This works since we compile in C mode, it will fail in cpp mode
|
| 264 |
+
arr[1].real += 1
|
| 265 |
+
arr[1].imag += 1
|
| 266 |
+
# This works in both modes
|
| 267 |
+
arr[1].real = arr[1].real + 1
|
| 268 |
+
arr[1].imag = arr[1].imag + 1
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
def check_npy_uintp_type_enum():
|
| 272 |
+
# Regression test for gh-27890: cnp.NPY_UINTP was not defined.
|
| 273 |
+
# Cython would fail to compile this before gh-27890 was fixed.
|
| 274 |
+
return cnp.NPY_UINTP > 0
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/meson.build
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
project('checks', 'c', 'cython')
|
| 2 |
+
|
| 3 |
+
py = import('python').find_installation(pure: false)
|
| 4 |
+
|
| 5 |
+
cc = meson.get_compiler('c')
|
| 6 |
+
cy = meson.get_compiler('cython')
|
| 7 |
+
|
| 8 |
+
# Keep synced with pyproject.toml
|
| 9 |
+
if not cy.version().version_compare('>=3.0.6')
|
| 10 |
+
error('tests requires Cython >= 3.0.6')
|
| 11 |
+
endif
|
| 12 |
+
|
| 13 |
+
cython_args = []
|
| 14 |
+
if cy.version().version_compare('>=3.1.0')
|
| 15 |
+
cython_args += ['-Xfreethreading_compatible=True']
|
| 16 |
+
endif
|
| 17 |
+
|
| 18 |
+
npy_include_path = run_command(py, [
|
| 19 |
+
'-c',
|
| 20 |
+
'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))'
|
| 21 |
+
], check: true).stdout().strip()
|
| 22 |
+
|
| 23 |
+
npy_path = run_command(py, [
|
| 24 |
+
'-c',
|
| 25 |
+
'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))'
|
| 26 |
+
], check: true).stdout().strip()
|
| 27 |
+
|
| 28 |
+
# TODO: This is a hack due to gh-25135, where cython may not find the right
|
| 29 |
+
# __init__.pyd file.
|
| 30 |
+
add_project_arguments('-I', npy_path, language : 'cython')
|
| 31 |
+
|
| 32 |
+
py.extension_module(
|
| 33 |
+
'checks',
|
| 34 |
+
'checks.pyx',
|
| 35 |
+
install: false,
|
| 36 |
+
c_args: [
|
| 37 |
+
'-DNPY_NO_DEPRECATED_API=0', # Cython still uses old NumPy C API
|
| 38 |
+
# Require 1.25+ to test datetime additions
|
| 39 |
+
'-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION',
|
| 40 |
+
],
|
| 41 |
+
include_directories: [npy_include_path],
|
| 42 |
+
cython_args: cython_args,
|
| 43 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/cython/setup.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Provide python-space access to the functions exposed in numpy/__init__.pxd
|
| 3 |
+
for testing.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import Cython
|
| 7 |
+
import numpy as np
|
| 8 |
+
from numpy._utils import _pep440
|
| 9 |
+
from distutils.core import setup
|
| 10 |
+
from Cython.Build import cythonize
|
| 11 |
+
from setuptools.extension import Extension
|
| 12 |
+
import os
|
| 13 |
+
|
| 14 |
+
macros = [
|
| 15 |
+
("NPY_NO_DEPRECATED_API", 0),
|
| 16 |
+
# Require 1.25+ to test datetime additions
|
| 17 |
+
("NPY_TARGET_VERSION", "NPY_2_0_API_VERSION"),
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
checks = Extension(
|
| 21 |
+
"checks",
|
| 22 |
+
sources=[os.path.join('.', "checks.pyx")],
|
| 23 |
+
include_dirs=[np.get_include()],
|
| 24 |
+
define_macros=macros,
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
extensions = [checks]
|
| 28 |
+
|
| 29 |
+
compiler_directives = {}
|
| 30 |
+
if _pep440.parse(Cython.__version__) >= _pep440.parse("3.1.0a0"):
|
| 31 |
+
compiler_directives['freethreading_compatible'] = True
|
| 32 |
+
|
| 33 |
+
setup(
|
| 34 |
+
ext_modules=cythonize(
|
| 35 |
+
extensions,
|
| 36 |
+
compiler_directives=compiler_directives)
|
| 37 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/__pycache__/setup.cpython-310.pyc
ADDED
|
Binary file (641 Bytes). View file
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api1.c
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#define Py_LIMITED_API 0x03060000
|
| 2 |
+
|
| 3 |
+
#include <Python.h>
|
| 4 |
+
#include <numpy/arrayobject.h>
|
| 5 |
+
#include <numpy/ufuncobject.h>
|
| 6 |
+
|
| 7 |
+
static PyModuleDef moduledef = {
|
| 8 |
+
.m_base = PyModuleDef_HEAD_INIT,
|
| 9 |
+
.m_name = "limited_api1"
|
| 10 |
+
};
|
| 11 |
+
|
| 12 |
+
PyMODINIT_FUNC PyInit_limited_api1(void)
|
| 13 |
+
{
|
| 14 |
+
import_array();
|
| 15 |
+
import_umath();
|
| 16 |
+
return PyModule_Create(&moduledef);
|
| 17 |
+
}
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api2.pyx
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#cython: language_level=3
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Make sure cython can compile in limited API mode (see meson.build)
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
cdef extern from "numpy/arrayobject.h":
|
| 8 |
+
pass
|
| 9 |
+
cdef extern from "numpy/arrayscalars.h":
|
| 10 |
+
pass
|
| 11 |
+
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/limited_api_latest.c
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#if Py_LIMITED_API != PY_VERSION_HEX & 0xffff0000
|
| 2 |
+
# error "Py_LIMITED_API not defined to Python major+minor version"
|
| 3 |
+
#endif
|
| 4 |
+
|
| 5 |
+
#include <Python.h>
|
| 6 |
+
#include <numpy/arrayobject.h>
|
| 7 |
+
#include <numpy/ufuncobject.h>
|
| 8 |
+
|
| 9 |
+
static PyModuleDef moduledef = {
|
| 10 |
+
.m_base = PyModuleDef_HEAD_INIT,
|
| 11 |
+
.m_name = "limited_api_latest"
|
| 12 |
+
};
|
| 13 |
+
|
| 14 |
+
PyMODINIT_FUNC PyInit_limited_api_latest(void)
|
| 15 |
+
{
|
| 16 |
+
import_array();
|
| 17 |
+
import_umath();
|
| 18 |
+
return PyModule_Create(&moduledef);
|
| 19 |
+
}
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/meson.build
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
project('checks', 'c', 'cython')
|
| 2 |
+
|
| 3 |
+
py = import('python').find_installation(pure: false)
|
| 4 |
+
|
| 5 |
+
cc = meson.get_compiler('c')
|
| 6 |
+
cy = meson.get_compiler('cython')
|
| 7 |
+
|
| 8 |
+
# Keep synced with pyproject.toml
|
| 9 |
+
if not cy.version().version_compare('>=3.0.6')
|
| 10 |
+
error('tests requires Cython >= 3.0.6')
|
| 11 |
+
endif
|
| 12 |
+
|
| 13 |
+
npy_include_path = run_command(py, [
|
| 14 |
+
'-c',
|
| 15 |
+
'import os; os.chdir(".."); import numpy; print(os.path.abspath(numpy.get_include()))'
|
| 16 |
+
], check: true).stdout().strip()
|
| 17 |
+
|
| 18 |
+
npy_path = run_command(py, [
|
| 19 |
+
'-c',
|
| 20 |
+
'import os; os.chdir(".."); import numpy; print(os.path.dirname(numpy.__file__).removesuffix("numpy"))'
|
| 21 |
+
], check: true).stdout().strip()
|
| 22 |
+
|
| 23 |
+
# TODO: This is a hack due to https://github.com/cython/cython/issues/5820,
|
| 24 |
+
# where cython may not find the right __init__.pyd file.
|
| 25 |
+
add_project_arguments('-I', npy_path, language : 'cython')
|
| 26 |
+
|
| 27 |
+
py.extension_module(
|
| 28 |
+
'limited_api1',
|
| 29 |
+
'limited_api1.c',
|
| 30 |
+
c_args: [
|
| 31 |
+
'-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION',
|
| 32 |
+
],
|
| 33 |
+
include_directories: [npy_include_path],
|
| 34 |
+
limited_api: '3.6',
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
py.extension_module(
|
| 38 |
+
'limited_api_latest',
|
| 39 |
+
'limited_api_latest.c',
|
| 40 |
+
c_args: [
|
| 41 |
+
'-DNPY_NO_DEPRECATED_API=NPY_1_21_API_VERSION',
|
| 42 |
+
],
|
| 43 |
+
include_directories: [npy_include_path],
|
| 44 |
+
limited_api: py.language_version(),
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
py.extension_module(
|
| 48 |
+
'limited_api2',
|
| 49 |
+
'limited_api2.pyx',
|
| 50 |
+
install: false,
|
| 51 |
+
c_args: [
|
| 52 |
+
'-DNPY_NO_DEPRECATED_API=0',
|
| 53 |
+
# Require 1.25+ to test datetime additions
|
| 54 |
+
'-DNPY_TARGET_VERSION=NPY_2_0_API_VERSION',
|
| 55 |
+
'-DCYTHON_LIMITED_API=1',
|
| 56 |
+
],
|
| 57 |
+
include_directories: [npy_include_path],
|
| 58 |
+
limited_api: '3.7',
|
| 59 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/examples/limited_api/setup.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Build an example package using the limited Python C API.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from setuptools import setup, Extension
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
|
| 10 |
+
|
| 11 |
+
limited_api = Extension(
|
| 12 |
+
"limited_api",
|
| 13 |
+
sources=[os.path.join('.', "limited_api.c")],
|
| 14 |
+
include_dirs=[np.get_include()],
|
| 15 |
+
define_macros=macros,
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
extensions = [limited_api]
|
| 19 |
+
|
| 20 |
+
setup(
|
| 21 |
+
ext_modules=extensions
|
| 22 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_abc.py
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy.testing import assert_
|
| 2 |
+
|
| 3 |
+
import numbers
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numpy._core.numerictypes import sctypes
|
| 7 |
+
|
| 8 |
+
class TestABC:
|
| 9 |
+
def test_abstract(self):
|
| 10 |
+
assert_(issubclass(np.number, numbers.Number))
|
| 11 |
+
|
| 12 |
+
assert_(issubclass(np.inexact, numbers.Complex))
|
| 13 |
+
assert_(issubclass(np.complexfloating, numbers.Complex))
|
| 14 |
+
assert_(issubclass(np.floating, numbers.Real))
|
| 15 |
+
|
| 16 |
+
assert_(issubclass(np.integer, numbers.Integral))
|
| 17 |
+
assert_(issubclass(np.signedinteger, numbers.Integral))
|
| 18 |
+
assert_(issubclass(np.unsignedinteger, numbers.Integral))
|
| 19 |
+
|
| 20 |
+
def test_floats(self):
|
| 21 |
+
for t in sctypes['float']:
|
| 22 |
+
assert_(isinstance(t(), numbers.Real),
|
| 23 |
+
f"{t.__name__} is not instance of Real")
|
| 24 |
+
assert_(issubclass(t, numbers.Real),
|
| 25 |
+
f"{t.__name__} is not subclass of Real")
|
| 26 |
+
assert_(not isinstance(t(), numbers.Rational),
|
| 27 |
+
f"{t.__name__} is instance of Rational")
|
| 28 |
+
assert_(not issubclass(t, numbers.Rational),
|
| 29 |
+
f"{t.__name__} is subclass of Rational")
|
| 30 |
+
|
| 31 |
+
def test_complex(self):
|
| 32 |
+
for t in sctypes['complex']:
|
| 33 |
+
assert_(isinstance(t(), numbers.Complex),
|
| 34 |
+
f"{t.__name__} is not instance of Complex")
|
| 35 |
+
assert_(issubclass(t, numbers.Complex),
|
| 36 |
+
f"{t.__name__} is not subclass of Complex")
|
| 37 |
+
assert_(not isinstance(t(), numbers.Real),
|
| 38 |
+
f"{t.__name__} is instance of Real")
|
| 39 |
+
assert_(not issubclass(t, numbers.Real),
|
| 40 |
+
f"{t.__name__} is subclass of Real")
|
| 41 |
+
|
| 42 |
+
def test_int(self):
|
| 43 |
+
for t in sctypes['int']:
|
| 44 |
+
assert_(isinstance(t(), numbers.Integral),
|
| 45 |
+
f"{t.__name__} is not instance of Integral")
|
| 46 |
+
assert_(issubclass(t, numbers.Integral),
|
| 47 |
+
f"{t.__name__} is not subclass of Integral")
|
| 48 |
+
|
| 49 |
+
def test_uint(self):
|
| 50 |
+
for t in sctypes['uint']:
|
| 51 |
+
assert_(isinstance(t(), numbers.Integral),
|
| 52 |
+
f"{t.__name__} is not instance of Integral")
|
| 53 |
+
assert_(issubclass(t, numbers.Integral),
|
| 54 |
+
f"{t.__name__} is not subclass of Integral")
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_api.py
ADDED
|
@@ -0,0 +1,616 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import numpy._core.umath as ncu
|
| 5 |
+
from numpy._core._rational_tests import rational
|
| 6 |
+
import pytest
|
| 7 |
+
from numpy.testing import (
|
| 8 |
+
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
|
| 9 |
+
HAS_REFCOUNT
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def test_array_array():
|
| 14 |
+
tobj = type(object)
|
| 15 |
+
ones11 = np.ones((1, 1), np.float64)
|
| 16 |
+
tndarray = type(ones11)
|
| 17 |
+
# Test is_ndarray
|
| 18 |
+
assert_equal(np.array(ones11, dtype=np.float64), ones11)
|
| 19 |
+
if HAS_REFCOUNT:
|
| 20 |
+
old_refcount = sys.getrefcount(tndarray)
|
| 21 |
+
np.array(ones11)
|
| 22 |
+
assert_equal(old_refcount, sys.getrefcount(tndarray))
|
| 23 |
+
|
| 24 |
+
# test None
|
| 25 |
+
assert_equal(np.array(None, dtype=np.float64),
|
| 26 |
+
np.array(np.nan, dtype=np.float64))
|
| 27 |
+
if HAS_REFCOUNT:
|
| 28 |
+
old_refcount = sys.getrefcount(tobj)
|
| 29 |
+
np.array(None, dtype=np.float64)
|
| 30 |
+
assert_equal(old_refcount, sys.getrefcount(tobj))
|
| 31 |
+
|
| 32 |
+
# test scalar
|
| 33 |
+
assert_equal(np.array(1.0, dtype=np.float64),
|
| 34 |
+
np.ones((), dtype=np.float64))
|
| 35 |
+
if HAS_REFCOUNT:
|
| 36 |
+
old_refcount = sys.getrefcount(np.float64)
|
| 37 |
+
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
|
| 38 |
+
assert_equal(old_refcount, sys.getrefcount(np.float64))
|
| 39 |
+
|
| 40 |
+
# test string
|
| 41 |
+
S2 = np.dtype((bytes, 2))
|
| 42 |
+
S3 = np.dtype((bytes, 3))
|
| 43 |
+
S5 = np.dtype((bytes, 5))
|
| 44 |
+
assert_equal(np.array(b"1.0", dtype=np.float64),
|
| 45 |
+
np.ones((), dtype=np.float64))
|
| 46 |
+
assert_equal(np.array(b"1.0").dtype, S3)
|
| 47 |
+
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
|
| 48 |
+
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
|
| 49 |
+
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
|
| 50 |
+
|
| 51 |
+
# test string
|
| 52 |
+
U2 = np.dtype((str, 2))
|
| 53 |
+
U3 = np.dtype((str, 3))
|
| 54 |
+
U5 = np.dtype((str, 5))
|
| 55 |
+
assert_equal(np.array("1.0", dtype=np.float64),
|
| 56 |
+
np.ones((), dtype=np.float64))
|
| 57 |
+
assert_equal(np.array("1.0").dtype, U3)
|
| 58 |
+
assert_equal(np.array("1.0", dtype=str).dtype, U3)
|
| 59 |
+
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
|
| 60 |
+
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
|
| 61 |
+
|
| 62 |
+
builtins = getattr(__builtins__, '__dict__', __builtins__)
|
| 63 |
+
assert_(hasattr(builtins, 'get'))
|
| 64 |
+
|
| 65 |
+
# test memoryview
|
| 66 |
+
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
|
| 67 |
+
assert_equal(dat, [49.0, 46.0, 48.0])
|
| 68 |
+
assert_(dat.dtype.type is np.float64)
|
| 69 |
+
|
| 70 |
+
dat = np.array(memoryview(b'1.0'))
|
| 71 |
+
assert_equal(dat, [49, 46, 48])
|
| 72 |
+
assert_(dat.dtype.type is np.uint8)
|
| 73 |
+
|
| 74 |
+
# test array interface
|
| 75 |
+
a = np.array(100.0, dtype=np.float64)
|
| 76 |
+
o = type("o", (object,),
|
| 77 |
+
dict(__array_interface__=a.__array_interface__))
|
| 78 |
+
assert_equal(np.array(o, dtype=np.float64), a)
|
| 79 |
+
|
| 80 |
+
# test array_struct interface
|
| 81 |
+
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
|
| 82 |
+
dtype=[('f0', int), ('f1', float), ('f2', str)])
|
| 83 |
+
o = type("o", (object,),
|
| 84 |
+
dict(__array_struct__=a.__array_struct__))
|
| 85 |
+
## wasn't what I expected... is np.array(o) supposed to equal a ?
|
| 86 |
+
## instead we get a array([...], dtype=">V18")
|
| 87 |
+
assert_equal(bytes(np.array(o).data), bytes(a.data))
|
| 88 |
+
|
| 89 |
+
# test array
|
| 90 |
+
def custom__array__(self, dtype=None, copy=None):
|
| 91 |
+
return np.array(100.0, dtype=dtype, copy=copy)
|
| 92 |
+
|
| 93 |
+
o = type("o", (object,), dict(__array__=custom__array__))()
|
| 94 |
+
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
|
| 95 |
+
|
| 96 |
+
# test recursion
|
| 97 |
+
nested = 1.5
|
| 98 |
+
for i in range(ncu.MAXDIMS):
|
| 99 |
+
nested = [nested]
|
| 100 |
+
|
| 101 |
+
# no error
|
| 102 |
+
np.array(nested)
|
| 103 |
+
|
| 104 |
+
# Exceeds recursion limit
|
| 105 |
+
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
|
| 106 |
+
|
| 107 |
+
# Try with lists...
|
| 108 |
+
# float32
|
| 109 |
+
assert_equal(np.array([None] * 10, dtype=np.float32),
|
| 110 |
+
np.full((10,), np.nan, dtype=np.float32))
|
| 111 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float32),
|
| 112 |
+
np.full((10, 1), np.nan, dtype=np.float32))
|
| 113 |
+
assert_equal(np.array([[None] * 10], dtype=np.float32),
|
| 114 |
+
np.full((1, 10), np.nan, dtype=np.float32))
|
| 115 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float32),
|
| 116 |
+
np.full((10, 10), np.nan, dtype=np.float32))
|
| 117 |
+
# float64
|
| 118 |
+
assert_equal(np.array([None] * 10, dtype=np.float64),
|
| 119 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 120 |
+
assert_equal(np.array([[None]] * 10, dtype=np.float64),
|
| 121 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 122 |
+
assert_equal(np.array([[None] * 10], dtype=np.float64),
|
| 123 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 124 |
+
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
|
| 125 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 126 |
+
|
| 127 |
+
assert_equal(np.array([1.0] * 10, dtype=np.float64),
|
| 128 |
+
np.ones((10,), dtype=np.float64))
|
| 129 |
+
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
|
| 130 |
+
np.ones((10, 1), dtype=np.float64))
|
| 131 |
+
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
|
| 132 |
+
np.ones((1, 10), dtype=np.float64))
|
| 133 |
+
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
|
| 134 |
+
np.ones((10, 10), dtype=np.float64))
|
| 135 |
+
|
| 136 |
+
# Try with tuples
|
| 137 |
+
assert_equal(np.array((None,) * 10, dtype=np.float64),
|
| 138 |
+
np.full((10,), np.nan, dtype=np.float64))
|
| 139 |
+
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
|
| 140 |
+
np.full((10, 1), np.nan, dtype=np.float64))
|
| 141 |
+
assert_equal(np.array([(None,) * 10], dtype=np.float64),
|
| 142 |
+
np.full((1, 10), np.nan, dtype=np.float64))
|
| 143 |
+
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
|
| 144 |
+
np.full((10, 10), np.nan, dtype=np.float64))
|
| 145 |
+
|
| 146 |
+
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
|
| 147 |
+
np.ones((10,), dtype=np.float64))
|
| 148 |
+
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
|
| 149 |
+
np.ones((10, 1), dtype=np.float64))
|
| 150 |
+
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
|
| 151 |
+
np.ones((1, 10), dtype=np.float64))
|
| 152 |
+
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
|
| 153 |
+
np.ones((10, 10), dtype=np.float64))
|
| 154 |
+
|
| 155 |
+
@pytest.mark.parametrize("array", [True, False])
|
| 156 |
+
def test_array_impossible_casts(array):
|
| 157 |
+
# All builtin types can be forcibly cast, at least theoretically,
|
| 158 |
+
# but user dtypes cannot necessarily.
|
| 159 |
+
rt = rational(1, 2)
|
| 160 |
+
if array:
|
| 161 |
+
rt = np.array(rt)
|
| 162 |
+
with assert_raises(TypeError):
|
| 163 |
+
np.array(rt, dtype="M8")
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
def test_array_astype():
|
| 167 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 168 |
+
# Default behavior: allows unsafe casts, keeps memory layout,
|
| 169 |
+
# always copies.
|
| 170 |
+
b = a.astype('i4')
|
| 171 |
+
assert_equal(a, b)
|
| 172 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 173 |
+
assert_equal(a.strides, b.strides)
|
| 174 |
+
b = a.T.astype('i4')
|
| 175 |
+
assert_equal(a.T, b)
|
| 176 |
+
assert_equal(b.dtype, np.dtype('i4'))
|
| 177 |
+
assert_equal(a.T.strides, b.strides)
|
| 178 |
+
b = a.astype('f4')
|
| 179 |
+
assert_equal(a, b)
|
| 180 |
+
assert_(not (a is b))
|
| 181 |
+
|
| 182 |
+
# copy=False parameter skips a copy
|
| 183 |
+
b = a.astype('f4', copy=False)
|
| 184 |
+
assert_(a is b)
|
| 185 |
+
|
| 186 |
+
# order parameter allows overriding of the memory layout,
|
| 187 |
+
# forcing a copy if the layout is wrong
|
| 188 |
+
b = a.astype('f4', order='F', copy=False)
|
| 189 |
+
assert_equal(a, b)
|
| 190 |
+
assert_(not (a is b))
|
| 191 |
+
assert_(b.flags.f_contiguous)
|
| 192 |
+
|
| 193 |
+
b = a.astype('f4', order='C', copy=False)
|
| 194 |
+
assert_equal(a, b)
|
| 195 |
+
assert_(a is b)
|
| 196 |
+
assert_(b.flags.c_contiguous)
|
| 197 |
+
|
| 198 |
+
# casting parameter allows catching bad casts
|
| 199 |
+
b = a.astype('c8', casting='safe')
|
| 200 |
+
assert_equal(a, b)
|
| 201 |
+
assert_equal(b.dtype, np.dtype('c8'))
|
| 202 |
+
|
| 203 |
+
assert_raises(TypeError, a.astype, 'i4', casting='safe')
|
| 204 |
+
|
| 205 |
+
# subok=False passes through a non-subclassed array
|
| 206 |
+
b = a.astype('f4', subok=0, copy=False)
|
| 207 |
+
assert_(a is b)
|
| 208 |
+
|
| 209 |
+
class MyNDArray(np.ndarray):
|
| 210 |
+
pass
|
| 211 |
+
|
| 212 |
+
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
|
| 213 |
+
|
| 214 |
+
# subok=True passes through a subclass
|
| 215 |
+
b = a.astype('f4', subok=True, copy=False)
|
| 216 |
+
assert_(a is b)
|
| 217 |
+
|
| 218 |
+
# subok=True is default, and creates a subtype on a cast
|
| 219 |
+
b = a.astype('i4', copy=False)
|
| 220 |
+
assert_equal(a, b)
|
| 221 |
+
assert_equal(type(b), MyNDArray)
|
| 222 |
+
|
| 223 |
+
# subok=False never returns a subclass
|
| 224 |
+
b = a.astype('f4', subok=False, copy=False)
|
| 225 |
+
assert_equal(a, b)
|
| 226 |
+
assert_(not (a is b))
|
| 227 |
+
assert_(type(b) is not MyNDArray)
|
| 228 |
+
|
| 229 |
+
# Make sure converting from string object to fixed length string
|
| 230 |
+
# does not truncate.
|
| 231 |
+
a = np.array([b'a'*100], dtype='O')
|
| 232 |
+
b = a.astype('S')
|
| 233 |
+
assert_equal(a, b)
|
| 234 |
+
assert_equal(b.dtype, np.dtype('S100'))
|
| 235 |
+
a = np.array(['a'*100], dtype='O')
|
| 236 |
+
b = a.astype('U')
|
| 237 |
+
assert_equal(a, b)
|
| 238 |
+
assert_equal(b.dtype, np.dtype('U100'))
|
| 239 |
+
|
| 240 |
+
# Same test as above but for strings shorter than 64 characters
|
| 241 |
+
a = np.array([b'a'*10], dtype='O')
|
| 242 |
+
b = a.astype('S')
|
| 243 |
+
assert_equal(a, b)
|
| 244 |
+
assert_equal(b.dtype, np.dtype('S10'))
|
| 245 |
+
a = np.array(['a'*10], dtype='O')
|
| 246 |
+
b = a.astype('U')
|
| 247 |
+
assert_equal(a, b)
|
| 248 |
+
assert_equal(b.dtype, np.dtype('U10'))
|
| 249 |
+
|
| 250 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
|
| 251 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 252 |
+
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
|
| 253 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 254 |
+
|
| 255 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
|
| 256 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 257 |
+
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
|
| 258 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 259 |
+
|
| 260 |
+
a = np.array(123456789012345678901234567890, dtype='S')
|
| 261 |
+
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
|
| 262 |
+
a = np.array(123456789012345678901234567890, dtype='U')
|
| 263 |
+
assert_array_equal(a, np.array('1234567890' * 3, dtype='U30'))
|
| 264 |
+
|
| 265 |
+
a = np.array('a\u0140', dtype='U')
|
| 266 |
+
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
|
| 267 |
+
assert_(b.size == 2)
|
| 268 |
+
|
| 269 |
+
a = np.array([1000], dtype='i4')
|
| 270 |
+
assert_raises(TypeError, a.astype, 'S1', casting='safe')
|
| 271 |
+
|
| 272 |
+
a = np.array(1000, dtype='i4')
|
| 273 |
+
assert_raises(TypeError, a.astype, 'U1', casting='safe')
|
| 274 |
+
|
| 275 |
+
# gh-24023
|
| 276 |
+
assert_raises(TypeError, a.astype)
|
| 277 |
+
|
| 278 |
+
@pytest.mark.parametrize("dt", ["S", "U"])
|
| 279 |
+
def test_array_astype_to_string_discovery_empty(dt):
|
| 280 |
+
# See also gh-19085
|
| 281 |
+
arr = np.array([""], dtype=object)
|
| 282 |
+
# Note, the itemsize is the `0 -> 1` logic, which should change.
|
| 283 |
+
# The important part the test is rather that it does not error.
|
| 284 |
+
assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
|
| 285 |
+
|
| 286 |
+
# check the same thing for `np.can_cast` (since it accepts arrays)
|
| 287 |
+
assert np.can_cast(arr, dt, casting="unsafe")
|
| 288 |
+
assert not np.can_cast(arr, dt, casting="same_kind")
|
| 289 |
+
# as well as for the object as a descriptor:
|
| 290 |
+
assert np.can_cast("O", dt, casting="unsafe")
|
| 291 |
+
|
| 292 |
+
@pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
|
| 293 |
+
def test_array_astype_to_void(dt):
|
| 294 |
+
dt = np.dtype(dt)
|
| 295 |
+
arr = np.array([], dtype=dt)
|
| 296 |
+
assert arr.astype("V").dtype.itemsize == dt.itemsize
|
| 297 |
+
|
| 298 |
+
def test_object_array_astype_to_void():
|
| 299 |
+
# This is different to `test_array_astype_to_void` as object arrays
|
| 300 |
+
# are inspected. The default void is "V8" (8 is the length of double)
|
| 301 |
+
arr = np.array([], dtype="O").astype("V")
|
| 302 |
+
assert arr.dtype == "V8"
|
| 303 |
+
|
| 304 |
+
@pytest.mark.parametrize("t",
|
| 305 |
+
np._core.sctypes['uint'] +
|
| 306 |
+
np._core.sctypes['int'] +
|
| 307 |
+
np._core.sctypes['float']
|
| 308 |
+
)
|
| 309 |
+
def test_array_astype_warning(t):
|
| 310 |
+
# test ComplexWarning when casting from complex to float or int
|
| 311 |
+
a = np.array(10, dtype=np.complex128)
|
| 312 |
+
assert_warns(np.exceptions.ComplexWarning, a.astype, t)
|
| 313 |
+
|
| 314 |
+
@pytest.mark.parametrize(["dtype", "out_dtype"],
|
| 315 |
+
[(np.bytes_, np.bool),
|
| 316 |
+
(np.str_, np.bool),
|
| 317 |
+
(np.dtype("S10,S9"), np.dtype("?,?")),
|
| 318 |
+
# The following also checks unaligned unicode access:
|
| 319 |
+
(np.dtype("S7,U9"), np.dtype("?,?"))])
|
| 320 |
+
def test_string_to_boolean_cast(dtype, out_dtype):
|
| 321 |
+
# Only the last two (empty) strings are falsy (the `\0` is stripped):
|
| 322 |
+
arr = np.array(
|
| 323 |
+
["10", "10\0\0\0", "0\0\0", "0", "False", " ", "", "\0"],
|
| 324 |
+
dtype=dtype)
|
| 325 |
+
expected = np.array(
|
| 326 |
+
[True, True, True, True, True, True, False, False],
|
| 327 |
+
dtype=out_dtype)
|
| 328 |
+
assert_array_equal(arr.astype(out_dtype), expected)
|
| 329 |
+
# As it's similar, check that nonzero behaves the same (structs are
|
| 330 |
+
# nonzero if all entries are)
|
| 331 |
+
assert_array_equal(np.nonzero(arr), np.nonzero(expected))
|
| 332 |
+
|
| 333 |
+
@pytest.mark.parametrize("str_type", [str, bytes, np.str_])
|
| 334 |
+
@pytest.mark.parametrize("scalar_type",
|
| 335 |
+
[np.complex64, np.complex128, np.clongdouble])
|
| 336 |
+
def test_string_to_complex_cast(str_type, scalar_type):
|
| 337 |
+
value = scalar_type(b"1+3j")
|
| 338 |
+
assert scalar_type(value) == 1+3j
|
| 339 |
+
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
|
| 340 |
+
assert np.array(value).astype(scalar_type)[()] == 1+3j
|
| 341 |
+
arr = np.zeros(1, dtype=scalar_type)
|
| 342 |
+
arr[0] = value
|
| 343 |
+
assert arr[0] == 1+3j
|
| 344 |
+
|
| 345 |
+
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
|
| 346 |
+
def test_none_to_nan_cast(dtype):
|
| 347 |
+
# Note that at the time of writing this test, the scalar constructors
|
| 348 |
+
# reject None
|
| 349 |
+
arr = np.zeros(1, dtype=dtype)
|
| 350 |
+
arr[0] = None
|
| 351 |
+
assert np.isnan(arr)[0]
|
| 352 |
+
assert np.isnan(np.array(None, dtype=dtype))[()]
|
| 353 |
+
assert np.isnan(np.array([None], dtype=dtype))[0]
|
| 354 |
+
assert np.isnan(np.array(None).astype(dtype))[()]
|
| 355 |
+
|
| 356 |
+
def test_copyto_fromscalar():
|
| 357 |
+
a = np.arange(6, dtype='f4').reshape(2, 3)
|
| 358 |
+
|
| 359 |
+
# Simple copy
|
| 360 |
+
np.copyto(a, 1.5)
|
| 361 |
+
assert_equal(a, 1.5)
|
| 362 |
+
np.copyto(a.T, 2.5)
|
| 363 |
+
assert_equal(a, 2.5)
|
| 364 |
+
|
| 365 |
+
# Where-masked copy
|
| 366 |
+
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
|
| 367 |
+
np.copyto(a, 3.5, where=mask)
|
| 368 |
+
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
|
| 369 |
+
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
|
| 370 |
+
np.copyto(a.T, 4.5, where=mask)
|
| 371 |
+
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
|
| 372 |
+
|
| 373 |
+
def test_copyto():
|
| 374 |
+
a = np.arange(6, dtype='i4').reshape(2, 3)
|
| 375 |
+
|
| 376 |
+
# Simple copy
|
| 377 |
+
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
|
| 378 |
+
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
|
| 379 |
+
|
| 380 |
+
# Overlapping copy should work
|
| 381 |
+
np.copyto(a[:, :2], a[::-1, 1::-1])
|
| 382 |
+
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
|
| 383 |
+
|
| 384 |
+
# Defaults to 'same_kind' casting
|
| 385 |
+
assert_raises(TypeError, np.copyto, a, 1.5)
|
| 386 |
+
|
| 387 |
+
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
|
| 388 |
+
np.copyto(a, 1.5, casting='unsafe')
|
| 389 |
+
assert_equal(a, 1)
|
| 390 |
+
|
| 391 |
+
# Copying with a mask
|
| 392 |
+
np.copyto(a, 3, where=[True, False, True])
|
| 393 |
+
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
|
| 394 |
+
|
| 395 |
+
# Casting rule still applies with a mask
|
| 396 |
+
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
|
| 397 |
+
|
| 398 |
+
# Lists of integer 0's and 1's is ok too
|
| 399 |
+
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
|
| 400 |
+
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
|
| 401 |
+
|
| 402 |
+
# Overlapping copy with mask should work
|
| 403 |
+
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
|
| 404 |
+
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
|
| 405 |
+
|
| 406 |
+
# 'dst' must be an array
|
| 407 |
+
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
|
| 408 |
+
|
| 409 |
+
|
| 410 |
+
def test_copyto_cast_safety():
|
| 411 |
+
with pytest.raises(TypeError):
|
| 412 |
+
np.copyto(np.arange(3), 3., casting="safe")
|
| 413 |
+
|
| 414 |
+
# Can put integer and float scalars safely (and equiv):
|
| 415 |
+
np.copyto(np.arange(3), 3, casting="equiv")
|
| 416 |
+
np.copyto(np.arange(3.), 3., casting="equiv")
|
| 417 |
+
# And also with less precision safely:
|
| 418 |
+
np.copyto(np.arange(3, dtype="uint8"), 3, casting="safe")
|
| 419 |
+
np.copyto(np.arange(3., dtype="float32"), 3., casting="safe")
|
| 420 |
+
|
| 421 |
+
# But not equiv:
|
| 422 |
+
with pytest.raises(TypeError):
|
| 423 |
+
np.copyto(np.arange(3, dtype="uint8"), 3, casting="equiv")
|
| 424 |
+
|
| 425 |
+
with pytest.raises(TypeError):
|
| 426 |
+
np.copyto(np.arange(3., dtype="float32"), 3., casting="equiv")
|
| 427 |
+
|
| 428 |
+
# As a special thing, object is equiv currently:
|
| 429 |
+
np.copyto(np.arange(3, dtype=object), 3, casting="equiv")
|
| 430 |
+
|
| 431 |
+
# The following raises an overflow error/gives a warning but not
|
| 432 |
+
# type error (due to casting), though:
|
| 433 |
+
with pytest.raises(OverflowError):
|
| 434 |
+
np.copyto(np.arange(3), 2**80, casting="safe")
|
| 435 |
+
|
| 436 |
+
with pytest.warns(RuntimeWarning):
|
| 437 |
+
np.copyto(np.arange(3, dtype=np.float32), 2e300, casting="safe")
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def test_copyto_permut():
|
| 441 |
+
# test explicit overflow case
|
| 442 |
+
pad = 500
|
| 443 |
+
l = [True] * pad + [True, True, True, True]
|
| 444 |
+
r = np.zeros(len(l)-pad)
|
| 445 |
+
d = np.ones(len(l)-pad)
|
| 446 |
+
mask = np.array(l)[pad:]
|
| 447 |
+
np.copyto(r, d, where=mask[::-1])
|
| 448 |
+
|
| 449 |
+
# test all permutation of possible masks, 9 should be sufficient for
|
| 450 |
+
# current 4 byte unrolled code
|
| 451 |
+
power = 9
|
| 452 |
+
d = np.ones(power)
|
| 453 |
+
for i in range(2**power):
|
| 454 |
+
r = np.zeros(power)
|
| 455 |
+
l = [(i & x) != 0 for x in range(power)]
|
| 456 |
+
mask = np.array(l)
|
| 457 |
+
np.copyto(r, d, where=mask)
|
| 458 |
+
assert_array_equal(r == 1, l)
|
| 459 |
+
assert_equal(r.sum(), sum(l))
|
| 460 |
+
|
| 461 |
+
r = np.zeros(power)
|
| 462 |
+
np.copyto(r, d, where=mask[::-1])
|
| 463 |
+
assert_array_equal(r == 1, l[::-1])
|
| 464 |
+
assert_equal(r.sum(), sum(l))
|
| 465 |
+
|
| 466 |
+
r = np.zeros(power)
|
| 467 |
+
np.copyto(r[::2], d[::2], where=mask[::2])
|
| 468 |
+
assert_array_equal(r[::2] == 1, l[::2])
|
| 469 |
+
assert_equal(r[::2].sum(), sum(l[::2]))
|
| 470 |
+
|
| 471 |
+
r = np.zeros(power)
|
| 472 |
+
np.copyto(r[::2], d[::2], where=mask[::-2])
|
| 473 |
+
assert_array_equal(r[::2] == 1, l[::-2])
|
| 474 |
+
assert_equal(r[::2].sum(), sum(l[::-2]))
|
| 475 |
+
|
| 476 |
+
for c in [0xFF, 0x7F, 0x02, 0x10]:
|
| 477 |
+
r = np.zeros(power)
|
| 478 |
+
mask = np.array(l)
|
| 479 |
+
imask = np.array(l).view(np.uint8)
|
| 480 |
+
imask[mask != 0] = c
|
| 481 |
+
np.copyto(r, d, where=mask)
|
| 482 |
+
assert_array_equal(r == 1, l)
|
| 483 |
+
assert_equal(r.sum(), sum(l))
|
| 484 |
+
|
| 485 |
+
r = np.zeros(power)
|
| 486 |
+
np.copyto(r, d, where=True)
|
| 487 |
+
assert_equal(r.sum(), r.size)
|
| 488 |
+
r = np.ones(power)
|
| 489 |
+
d = np.zeros(power)
|
| 490 |
+
np.copyto(r, d, where=False)
|
| 491 |
+
assert_equal(r.sum(), r.size)
|
| 492 |
+
|
| 493 |
+
def test_copy_order():
|
| 494 |
+
a = np.arange(24).reshape(2, 1, 3, 4)
|
| 495 |
+
b = a.copy(order='F')
|
| 496 |
+
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
|
| 497 |
+
|
| 498 |
+
def check_copy_result(x, y, ccontig, fcontig, strides=False):
|
| 499 |
+
assert_(not (x is y))
|
| 500 |
+
assert_equal(x, y)
|
| 501 |
+
assert_equal(res.flags.c_contiguous, ccontig)
|
| 502 |
+
assert_equal(res.flags.f_contiguous, fcontig)
|
| 503 |
+
|
| 504 |
+
# Validate the initial state of a, b, and c
|
| 505 |
+
assert_(a.flags.c_contiguous)
|
| 506 |
+
assert_(not a.flags.f_contiguous)
|
| 507 |
+
assert_(not b.flags.c_contiguous)
|
| 508 |
+
assert_(b.flags.f_contiguous)
|
| 509 |
+
assert_(not c.flags.c_contiguous)
|
| 510 |
+
assert_(not c.flags.f_contiguous)
|
| 511 |
+
|
| 512 |
+
# Copy with order='C'
|
| 513 |
+
res = a.copy(order='C')
|
| 514 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 515 |
+
res = b.copy(order='C')
|
| 516 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 517 |
+
res = c.copy(order='C')
|
| 518 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 519 |
+
res = np.copy(a, order='C')
|
| 520 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 521 |
+
res = np.copy(b, order='C')
|
| 522 |
+
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
|
| 523 |
+
res = np.copy(c, order='C')
|
| 524 |
+
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
|
| 525 |
+
|
| 526 |
+
# Copy with order='F'
|
| 527 |
+
res = a.copy(order='F')
|
| 528 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 529 |
+
res = b.copy(order='F')
|
| 530 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 531 |
+
res = c.copy(order='F')
|
| 532 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 533 |
+
res = np.copy(a, order='F')
|
| 534 |
+
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
|
| 535 |
+
res = np.copy(b, order='F')
|
| 536 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 537 |
+
res = np.copy(c, order='F')
|
| 538 |
+
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
|
| 539 |
+
|
| 540 |
+
# Copy with order='K'
|
| 541 |
+
res = a.copy(order='K')
|
| 542 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 543 |
+
res = b.copy(order='K')
|
| 544 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 545 |
+
res = c.copy(order='K')
|
| 546 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 547 |
+
res = np.copy(a, order='K')
|
| 548 |
+
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
|
| 549 |
+
res = np.copy(b, order='K')
|
| 550 |
+
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
|
| 551 |
+
res = np.copy(c, order='K')
|
| 552 |
+
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
|
| 553 |
+
|
| 554 |
+
def test_contiguous_flags():
|
| 555 |
+
a = np.ones((4, 4, 1))[::2,:,:]
|
| 556 |
+
a.strides = a.strides[:2] + (-123,)
|
| 557 |
+
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
|
| 558 |
+
|
| 559 |
+
def check_contig(a, ccontig, fcontig):
|
| 560 |
+
assert_(a.flags.c_contiguous == ccontig)
|
| 561 |
+
assert_(a.flags.f_contiguous == fcontig)
|
| 562 |
+
|
| 563 |
+
# Check if new arrays are correct:
|
| 564 |
+
check_contig(a, False, False)
|
| 565 |
+
check_contig(b, False, False)
|
| 566 |
+
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
|
| 567 |
+
check_contig(np.array([[[1], [2]]], order='F'), True, True)
|
| 568 |
+
check_contig(np.empty((2, 2)), True, False)
|
| 569 |
+
check_contig(np.empty((2, 2), order='F'), False, True)
|
| 570 |
+
|
| 571 |
+
# Check that np.array creates correct contiguous flags:
|
| 572 |
+
check_contig(np.array(a, copy=None), False, False)
|
| 573 |
+
check_contig(np.array(a, copy=None, order='C'), True, False)
|
| 574 |
+
check_contig(np.array(a, ndmin=4, copy=None, order='F'), False, True)
|
| 575 |
+
|
| 576 |
+
# Check slicing update of flags and :
|
| 577 |
+
check_contig(a[0], True, True)
|
| 578 |
+
check_contig(a[None, ::4, ..., None], True, True)
|
| 579 |
+
check_contig(b[0, 0, ...], False, True)
|
| 580 |
+
check_contig(b[:, :, 0:0, :, :], True, True)
|
| 581 |
+
|
| 582 |
+
# Test ravel and squeeze.
|
| 583 |
+
check_contig(a.ravel(), True, True)
|
| 584 |
+
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
|
| 585 |
+
|
| 586 |
+
def test_broadcast_arrays():
|
| 587 |
+
# Test user defined dtypes
|
| 588 |
+
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
|
| 589 |
+
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
|
| 590 |
+
result = np.broadcast_arrays(a, b)
|
| 591 |
+
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
|
| 592 |
+
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
|
| 593 |
+
|
| 594 |
+
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
|
| 595 |
+
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
|
| 596 |
+
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
|
| 597 |
+
def test_full_from_list(shape, fill_value, expected_output):
|
| 598 |
+
output = np.full(shape, fill_value)
|
| 599 |
+
assert_equal(output, expected_output)
|
| 600 |
+
|
| 601 |
+
def test_astype_copyflag():
|
| 602 |
+
# test the various copyflag options
|
| 603 |
+
arr = np.arange(10, dtype=np.intp)
|
| 604 |
+
|
| 605 |
+
res_true = arr.astype(np.intp, copy=True)
|
| 606 |
+
assert not np.shares_memory(arr, res_true)
|
| 607 |
+
|
| 608 |
+
res_false = arr.astype(np.intp, copy=False)
|
| 609 |
+
assert np.shares_memory(arr, res_false)
|
| 610 |
+
|
| 611 |
+
res_false_float = arr.astype(np.float64, copy=False)
|
| 612 |
+
assert not np.shares_memory(arr, res_false_float)
|
| 613 |
+
|
| 614 |
+
# _CopyMode enum isn't allowed
|
| 615 |
+
assert_raises(ValueError, arr.astype, np.float64,
|
| 616 |
+
copy=np._CopyMode.NEVER)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_array_interface.py
ADDED
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import pytest
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import extbuild, IS_WASM, IS_EDITABLE
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@pytest.fixture
|
| 8 |
+
def get_module(tmp_path):
|
| 9 |
+
""" Some codes to generate data and manage temporary buffers use when
|
| 10 |
+
sharing with numpy via the array interface protocol.
|
| 11 |
+
"""
|
| 12 |
+
if sys.platform.startswith('cygwin'):
|
| 13 |
+
pytest.skip('link fails on cygwin')
|
| 14 |
+
if IS_WASM:
|
| 15 |
+
pytest.skip("Can't build module inside Wasm")
|
| 16 |
+
if IS_EDITABLE:
|
| 17 |
+
pytest.skip("Can't build module for editable install")
|
| 18 |
+
|
| 19 |
+
prologue = '''
|
| 20 |
+
#include <Python.h>
|
| 21 |
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
| 22 |
+
#include <numpy/arrayobject.h>
|
| 23 |
+
#include <stdio.h>
|
| 24 |
+
#include <math.h>
|
| 25 |
+
|
| 26 |
+
NPY_NO_EXPORT
|
| 27 |
+
void delete_array_struct(PyObject *cap) {
|
| 28 |
+
|
| 29 |
+
/* get the array interface structure */
|
| 30 |
+
PyArrayInterface *inter = (PyArrayInterface*)
|
| 31 |
+
PyCapsule_GetPointer(cap, NULL);
|
| 32 |
+
|
| 33 |
+
/* get the buffer by which data was shared */
|
| 34 |
+
double *ptr = (double*)PyCapsule_GetContext(cap);
|
| 35 |
+
|
| 36 |
+
/* for the purposes of the regression test set the elements
|
| 37 |
+
to nan */
|
| 38 |
+
for (npy_intp i = 0; i < inter->shape[0]; ++i)
|
| 39 |
+
ptr[i] = nan("");
|
| 40 |
+
|
| 41 |
+
/* free the shared buffer */
|
| 42 |
+
free(ptr);
|
| 43 |
+
|
| 44 |
+
/* free the array interface structure */
|
| 45 |
+
free(inter->shape);
|
| 46 |
+
free(inter);
|
| 47 |
+
|
| 48 |
+
fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
|
| 49 |
+
" ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
|
| 50 |
+
}
|
| 51 |
+
'''
|
| 52 |
+
|
| 53 |
+
functions = [
|
| 54 |
+
("new_array_struct", "METH_VARARGS", """
|
| 55 |
+
|
| 56 |
+
long long n_elem = 0;
|
| 57 |
+
double value = 0.0;
|
| 58 |
+
|
| 59 |
+
if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
|
| 60 |
+
Py_RETURN_NONE;
|
| 61 |
+
}
|
| 62 |
+
|
| 63 |
+
/* allocate and initialize the data to share with numpy */
|
| 64 |
+
long long n_bytes = n_elem*sizeof(double);
|
| 65 |
+
double *data = (double*)malloc(n_bytes);
|
| 66 |
+
|
| 67 |
+
if (!data) {
|
| 68 |
+
PyErr_Format(PyExc_MemoryError,
|
| 69 |
+
"Failed to malloc %lld bytes", n_bytes);
|
| 70 |
+
|
| 71 |
+
Py_RETURN_NONE;
|
| 72 |
+
}
|
| 73 |
+
|
| 74 |
+
for (long long i = 0; i < n_elem; ++i) {
|
| 75 |
+
data[i] = value;
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
/* calculate the shape and stride */
|
| 79 |
+
int nd = 1;
|
| 80 |
+
|
| 81 |
+
npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
|
| 82 |
+
npy_intp *shape = ss;
|
| 83 |
+
npy_intp *stride = ss + nd;
|
| 84 |
+
|
| 85 |
+
shape[0] = n_elem;
|
| 86 |
+
stride[0] = sizeof(double);
|
| 87 |
+
|
| 88 |
+
/* construct the array interface */
|
| 89 |
+
PyArrayInterface *inter = (PyArrayInterface*)
|
| 90 |
+
malloc(sizeof(PyArrayInterface));
|
| 91 |
+
|
| 92 |
+
memset(inter, 0, sizeof(PyArrayInterface));
|
| 93 |
+
|
| 94 |
+
inter->two = 2;
|
| 95 |
+
inter->nd = nd;
|
| 96 |
+
inter->typekind = 'f';
|
| 97 |
+
inter->itemsize = sizeof(double);
|
| 98 |
+
inter->shape = shape;
|
| 99 |
+
inter->strides = stride;
|
| 100 |
+
inter->data = data;
|
| 101 |
+
inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
|
| 102 |
+
NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
|
| 103 |
+
|
| 104 |
+
/* package into a capsule */
|
| 105 |
+
PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
|
| 106 |
+
|
| 107 |
+
/* save the pointer to the data */
|
| 108 |
+
PyCapsule_SetContext(cap, data);
|
| 109 |
+
|
| 110 |
+
fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
|
| 111 |
+
" ptr = %ld\\n", (long)cap, (long)inter, (long)data);
|
| 112 |
+
|
| 113 |
+
return cap;
|
| 114 |
+
""")
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
more_init = "import_array();"
|
| 118 |
+
|
| 119 |
+
try:
|
| 120 |
+
import array_interface_testing
|
| 121 |
+
return array_interface_testing
|
| 122 |
+
except ImportError:
|
| 123 |
+
pass
|
| 124 |
+
|
| 125 |
+
# if it does not exist, build and load it
|
| 126 |
+
return extbuild.build_and_import_extension('array_interface_testing',
|
| 127 |
+
functions,
|
| 128 |
+
prologue=prologue,
|
| 129 |
+
include_dirs=[np.get_include()],
|
| 130 |
+
build_dir=tmp_path,
|
| 131 |
+
more_init=more_init)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@pytest.mark.slow
|
| 135 |
+
def test_cstruct(get_module):
|
| 136 |
+
|
| 137 |
+
class data_source:
|
| 138 |
+
"""
|
| 139 |
+
This class is for testing the timing of the PyCapsule destructor
|
| 140 |
+
invoked when numpy release its reference to the shared data as part of
|
| 141 |
+
the numpy array interface protocol. If the PyCapsule destructor is
|
| 142 |
+
called early the shared data is freed and invalid memory accesses will
|
| 143 |
+
occur.
|
| 144 |
+
"""
|
| 145 |
+
|
| 146 |
+
def __init__(self, size, value):
|
| 147 |
+
self.size = size
|
| 148 |
+
self.value = value
|
| 149 |
+
|
| 150 |
+
@property
|
| 151 |
+
def __array_struct__(self):
|
| 152 |
+
return get_module.new_array_struct(self.size, self.value)
|
| 153 |
+
|
| 154 |
+
# write to the same stream as the C code
|
| 155 |
+
stderr = sys.__stderr__
|
| 156 |
+
|
| 157 |
+
# used to validate the shared data.
|
| 158 |
+
expected_value = -3.1415
|
| 159 |
+
multiplier = -10000.0
|
| 160 |
+
|
| 161 |
+
# create some data to share with numpy via the array interface
|
| 162 |
+
# assign the data an expected value.
|
| 163 |
+
stderr.write(' ---- create an object to share data ---- \n')
|
| 164 |
+
buf = data_source(256, expected_value)
|
| 165 |
+
stderr.write(' ---- OK!\n\n')
|
| 166 |
+
|
| 167 |
+
# share the data
|
| 168 |
+
stderr.write(' ---- share data via the array interface protocol ---- \n')
|
| 169 |
+
arr = np.array(buf, copy=False)
|
| 170 |
+
stderr.write('arr.__array_interface___ = %s\n' % (
|
| 171 |
+
str(arr.__array_interface__)))
|
| 172 |
+
stderr.write('arr.base = %s\n' % (str(arr.base)))
|
| 173 |
+
stderr.write(' ---- OK!\n\n')
|
| 174 |
+
|
| 175 |
+
# release the source of the shared data. this will not release the data
|
| 176 |
+
# that was shared with numpy, that is done in the PyCapsule destructor.
|
| 177 |
+
stderr.write(' ---- destroy the object that shared data ---- \n')
|
| 178 |
+
buf = None
|
| 179 |
+
stderr.write(' ---- OK!\n\n')
|
| 180 |
+
|
| 181 |
+
# check that we got the expected data. If the PyCapsule destructor we
|
| 182 |
+
# defined was prematurely called then this test will fail because our
|
| 183 |
+
# destructor sets the elements of the array to NaN before free'ing the
|
| 184 |
+
# buffer. Reading the values here may also cause a SEGV
|
| 185 |
+
assert np.allclose(arr, expected_value)
|
| 186 |
+
|
| 187 |
+
# read the data. If the PyCapsule destructor we defined was prematurely
|
| 188 |
+
# called then reading the values here may cause a SEGV and will be reported
|
| 189 |
+
# as invalid reads by valgrind
|
| 190 |
+
stderr.write(' ---- read shared data ---- \n')
|
| 191 |
+
stderr.write('arr = %s\n' % (str(arr)))
|
| 192 |
+
stderr.write(' ---- OK!\n\n')
|
| 193 |
+
|
| 194 |
+
# write to the shared buffer. If the shared data was prematurely deleted
|
| 195 |
+
# this will may cause a SEGV and valgrind will report invalid writes
|
| 196 |
+
stderr.write(' ---- modify shared data ---- \n')
|
| 197 |
+
arr *= multiplier
|
| 198 |
+
expected_value *= multiplier
|
| 199 |
+
stderr.write('arr.__array_interface___ = %s\n' % (
|
| 200 |
+
str(arr.__array_interface__)))
|
| 201 |
+
stderr.write('arr.base = %s\n' % (str(arr.base)))
|
| 202 |
+
stderr.write(' ---- OK!\n\n')
|
| 203 |
+
|
| 204 |
+
# read the data. If the shared data was prematurely deleted this
|
| 205 |
+
# will may cause a SEGV and valgrind will report invalid reads
|
| 206 |
+
stderr.write(' ---- read modified shared data ---- \n')
|
| 207 |
+
stderr.write('arr = %s\n' % (str(arr)))
|
| 208 |
+
stderr.write(' ---- OK!\n\n')
|
| 209 |
+
|
| 210 |
+
# check that we got the expected data. If the PyCapsule destructor we
|
| 211 |
+
# defined was prematurely called then this test will fail because our
|
| 212 |
+
# destructor sets the elements of the array to NaN before free'ing the
|
| 213 |
+
# buffer. Reading the values here may also cause a SEGV
|
| 214 |
+
assert np.allclose(arr, expected_value)
|
| 215 |
+
|
| 216 |
+
# free the shared data, the PyCapsule destructor should run here
|
| 217 |
+
stderr.write(' ---- free shared data ---- \n')
|
| 218 |
+
arr = None
|
| 219 |
+
stderr.write(' ---- OK!\n\n')
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_arraymethod.py
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file tests the generic aspects of ArrayMethod. At the time of writing
|
| 3 |
+
this is private API, but when added, public API may be added here.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import types
|
| 9 |
+
from typing import Any
|
| 10 |
+
|
| 11 |
+
import pytest
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from numpy._core._multiarray_umath import _get_castingimpl as get_castingimpl
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TestResolveDescriptors:
|
| 18 |
+
# Test mainly error paths of the resolve_descriptors function,
|
| 19 |
+
# note that the `casting_unittests` tests exercise this non-error paths.
|
| 20 |
+
|
| 21 |
+
# Casting implementations are the main/only current user:
|
| 22 |
+
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
|
| 23 |
+
|
| 24 |
+
@pytest.mark.parametrize("args", [
|
| 25 |
+
(True,), # Not a tuple.
|
| 26 |
+
((None,)), # Too few elements
|
| 27 |
+
((None, None, None),), # Too many
|
| 28 |
+
((None, None),), # Input dtype is None, which is invalid.
|
| 29 |
+
((np.dtype("d"), True),), # Output dtype is not a dtype
|
| 30 |
+
((np.dtype("f"), None),), # Input dtype does not match method
|
| 31 |
+
])
|
| 32 |
+
def test_invalid_arguments(self, args):
|
| 33 |
+
with pytest.raises(TypeError):
|
| 34 |
+
self.method._resolve_descriptors(*args)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
class TestSimpleStridedCall:
|
| 38 |
+
# Test mainly error paths of the resolve_descriptors function,
|
| 39 |
+
# note that the `casting_unittests` tests exercise this non-error paths.
|
| 40 |
+
|
| 41 |
+
# Casting implementations are the main/only current user:
|
| 42 |
+
method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
|
| 43 |
+
|
| 44 |
+
@pytest.mark.parametrize(["args", "error"], [
|
| 45 |
+
((True,), TypeError), # Not a tuple
|
| 46 |
+
(((None,),), TypeError), # Too few elements
|
| 47 |
+
((None, None), TypeError), # Inputs are not arrays.
|
| 48 |
+
(((None, None, None),), TypeError), # Too many
|
| 49 |
+
(((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
|
| 50 |
+
(((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
|
| 51 |
+
TypeError), # Does not support byte-swapping
|
| 52 |
+
(((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
|
| 53 |
+
ValueError), # not 1-D
|
| 54 |
+
(((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
|
| 55 |
+
ValueError), # different length
|
| 56 |
+
(((np.frombuffer(b"\0x00"*3*2, dtype="d"),
|
| 57 |
+
np.frombuffer(b"\0x00"*3, dtype="f")),),
|
| 58 |
+
ValueError), # output not writeable
|
| 59 |
+
])
|
| 60 |
+
def test_invalid_arguments(self, args, error):
|
| 61 |
+
# This is private API, which may be modified freely
|
| 62 |
+
with pytest.raises(error):
|
| 63 |
+
self.method._simple_strided_call(*args)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@pytest.mark.parametrize(
|
| 67 |
+
"cls", [
|
| 68 |
+
np.ndarray, np.recarray, np.char.chararray, np.matrix, np.memmap
|
| 69 |
+
]
|
| 70 |
+
)
|
| 71 |
+
class TestClassGetItem:
|
| 72 |
+
def test_class_getitem(self, cls: type[np.ndarray]) -> None:
|
| 73 |
+
"""Test `ndarray.__class_getitem__`."""
|
| 74 |
+
alias = cls[Any, Any]
|
| 75 |
+
assert isinstance(alias, types.GenericAlias)
|
| 76 |
+
assert alias.__origin__ is cls
|
| 77 |
+
|
| 78 |
+
@pytest.mark.parametrize("arg_len", range(4))
|
| 79 |
+
def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
|
| 80 |
+
arg_tup = (Any,) * arg_len
|
| 81 |
+
if arg_len in (1, 2):
|
| 82 |
+
assert cls[arg_tup]
|
| 83 |
+
else:
|
| 84 |
+
match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
|
| 85 |
+
with pytest.raises(TypeError, match=match):
|
| 86 |
+
cls[arg_tup]
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_arrayobject.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy.testing import assert_array_equal
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def test_matrix_transpose_raises_error_for_1d():
|
| 8 |
+
msg = "matrix transpose with ndim < 2 is undefined"
|
| 9 |
+
arr = np.arange(48)
|
| 10 |
+
with pytest.raises(ValueError, match=msg):
|
| 11 |
+
arr.mT
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def test_matrix_transpose_equals_transpose_2d():
|
| 15 |
+
arr = np.arange(48).reshape((6, 8))
|
| 16 |
+
assert_array_equal(arr.T, arr.mT)
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
ARRAY_SHAPES_TO_TEST = (
|
| 20 |
+
(5, 2),
|
| 21 |
+
(5, 2, 3),
|
| 22 |
+
(5, 2, 3, 4),
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
@pytest.mark.parametrize("shape", ARRAY_SHAPES_TO_TEST)
|
| 27 |
+
def test_matrix_transpose_equals_swapaxes(shape):
|
| 28 |
+
num_of_axes = len(shape)
|
| 29 |
+
vec = np.arange(shape[-1])
|
| 30 |
+
arr = np.broadcast_to(vec, shape)
|
| 31 |
+
tgt = np.swapaxes(arr, num_of_axes - 2, num_of_axes - 1)
|
| 32 |
+
mT = arr.mT
|
| 33 |
+
assert_array_equal(tgt, mT)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class MyArr(np.ndarray):
|
| 37 |
+
def __array_wrap__(self, arr, context=None, return_scalar=None):
|
| 38 |
+
return super().__array_wrap__(arr, context, return_scalar)
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class MyArrNoWrap(np.ndarray):
|
| 42 |
+
pass
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@pytest.mark.parametrize("subclass_self", [np.ndarray, MyArr, MyArrNoWrap])
|
| 46 |
+
@pytest.mark.parametrize("subclass_arr", [np.ndarray, MyArr, MyArrNoWrap])
|
| 47 |
+
def test_array_wrap(subclass_self, subclass_arr):
|
| 48 |
+
# NumPy should allow `__array_wrap__` to be called on arrays, it's logic
|
| 49 |
+
# is designed in a way that:
|
| 50 |
+
#
|
| 51 |
+
# * Subclasses never return scalars by default (to preserve their
|
| 52 |
+
# information). They can choose to if they wish.
|
| 53 |
+
# * NumPy returns scalars, if `return_scalar` is passed as True to allow
|
| 54 |
+
# manual calls to `arr.__array_wrap__` to do the right thing.
|
| 55 |
+
# * The type of the input should be ignored (it should be a base-class
|
| 56 |
+
# array, but I am not sure this is guaranteed).
|
| 57 |
+
|
| 58 |
+
arr = np.arange(3).view(subclass_self)
|
| 59 |
+
|
| 60 |
+
arr0d = np.array(3, dtype=np.int8).view(subclass_arr)
|
| 61 |
+
# With third argument True, ndarray allows "decay" to scalar.
|
| 62 |
+
# (I don't think NumPy would pass `None`, but it seems clear to support)
|
| 63 |
+
if subclass_self is np.ndarray:
|
| 64 |
+
assert type(arr.__array_wrap__(arr0d, None, True)) is np.int8
|
| 65 |
+
else:
|
| 66 |
+
assert type(arr.__array_wrap__(arr0d, None, True)) is type(arr)
|
| 67 |
+
|
| 68 |
+
# Otherwise, result should be viewed as the subclass
|
| 69 |
+
assert type(arr.__array_wrap__(arr0d)) is type(arr)
|
| 70 |
+
assert type(arr.__array_wrap__(arr0d, None, None)) is type(arr)
|
| 71 |
+
assert type(arr.__array_wrap__(arr0d, None, False)) is type(arr)
|
| 72 |
+
|
| 73 |
+
# Non 0-D array can't be converted to scalar, so we ignore that
|
| 74 |
+
arr1d = np.array([3], dtype=np.int8).view(subclass_arr)
|
| 75 |
+
assert type(arr.__array_wrap__(arr1d, None, True)) is type(arr)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_casting_floatingpoint_errors.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from pytest import param
|
| 3 |
+
from numpy.testing import IS_WASM
|
| 4 |
+
import numpy as np
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def values_and_dtypes():
|
| 8 |
+
"""
|
| 9 |
+
Generate value+dtype pairs that generate floating point errors during
|
| 10 |
+
casts. The invalid casts to integers will generate "invalid" value
|
| 11 |
+
warnings, the float casts all generate "overflow".
|
| 12 |
+
|
| 13 |
+
(The Python int/float paths don't need to get tested in all the same
|
| 14 |
+
situations, but it does not hurt.)
|
| 15 |
+
"""
|
| 16 |
+
# Casting to float16:
|
| 17 |
+
yield param(70000, "float16", id="int-to-f2")
|
| 18 |
+
yield param("70000", "float16", id="str-to-f2")
|
| 19 |
+
yield param(70000.0, "float16", id="float-to-f2")
|
| 20 |
+
yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2")
|
| 21 |
+
yield param(np.float64(70000.), "float16", id="double-to-f2")
|
| 22 |
+
yield param(np.float32(70000.), "float16", id="float-to-f2")
|
| 23 |
+
# Casting to float32:
|
| 24 |
+
yield param(10**100, "float32", id="int-to-f4")
|
| 25 |
+
yield param(1e100, "float32", id="float-to-f2")
|
| 26 |
+
yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2")
|
| 27 |
+
yield param(np.float64(1e300), "float32", id="double-to-f2")
|
| 28 |
+
# Casting to float64:
|
| 29 |
+
# If longdouble is double-double, its max can be rounded down to the double
|
| 30 |
+
# max. So we correct the double spacing (a bit weird, admittedly):
|
| 31 |
+
max_ld = np.finfo(np.longdouble).max
|
| 32 |
+
spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0))
|
| 33 |
+
if max_ld - spacing > np.finfo("f8").max:
|
| 34 |
+
yield param(np.finfo(np.longdouble).max, "float64",
|
| 35 |
+
id="longdouble-to-f8")
|
| 36 |
+
|
| 37 |
+
# Cast to complex32:
|
| 38 |
+
yield param(2e300, "complex64", id="float-to-c8")
|
| 39 |
+
yield param(2e300+0j, "complex64", id="complex-to-c8")
|
| 40 |
+
yield param(2e300j, "complex64", id="complex-to-c8")
|
| 41 |
+
yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
|
| 42 |
+
|
| 43 |
+
# Invalid float to integer casts:
|
| 44 |
+
with np.errstate(over="ignore"):
|
| 45 |
+
for to_dt in np.typecodes["AllInteger"]:
|
| 46 |
+
for value in [np.inf, np.nan]:
|
| 47 |
+
for from_dt in np.typecodes["AllFloat"]:
|
| 48 |
+
from_dt = np.dtype(from_dt)
|
| 49 |
+
from_val = from_dt.type(value)
|
| 50 |
+
|
| 51 |
+
yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def check_operations(dtype, value):
|
| 55 |
+
"""
|
| 56 |
+
There are many dedicated paths in NumPy which cast and should check for
|
| 57 |
+
floating point errors which occurred during those casts.
|
| 58 |
+
"""
|
| 59 |
+
if dtype.kind != 'i':
|
| 60 |
+
# These assignments use the stricter setitem logic:
|
| 61 |
+
def assignment():
|
| 62 |
+
arr = np.empty(3, dtype=dtype)
|
| 63 |
+
arr[0] = value
|
| 64 |
+
|
| 65 |
+
yield assignment
|
| 66 |
+
|
| 67 |
+
def fill():
|
| 68 |
+
arr = np.empty(3, dtype=dtype)
|
| 69 |
+
arr.fill(value)
|
| 70 |
+
|
| 71 |
+
yield fill
|
| 72 |
+
|
| 73 |
+
def copyto_scalar():
|
| 74 |
+
arr = np.empty(3, dtype=dtype)
|
| 75 |
+
np.copyto(arr, value, casting="unsafe")
|
| 76 |
+
|
| 77 |
+
yield copyto_scalar
|
| 78 |
+
|
| 79 |
+
def copyto():
|
| 80 |
+
arr = np.empty(3, dtype=dtype)
|
| 81 |
+
np.copyto(arr, np.array([value, value, value]), casting="unsafe")
|
| 82 |
+
|
| 83 |
+
yield copyto
|
| 84 |
+
|
| 85 |
+
def copyto_scalar_masked():
|
| 86 |
+
arr = np.empty(3, dtype=dtype)
|
| 87 |
+
np.copyto(arr, value, casting="unsafe",
|
| 88 |
+
where=[True, False, True])
|
| 89 |
+
|
| 90 |
+
yield copyto_scalar_masked
|
| 91 |
+
|
| 92 |
+
def copyto_masked():
|
| 93 |
+
arr = np.empty(3, dtype=dtype)
|
| 94 |
+
np.copyto(arr, np.array([value, value, value]), casting="unsafe",
|
| 95 |
+
where=[True, False, True])
|
| 96 |
+
|
| 97 |
+
yield copyto_masked
|
| 98 |
+
|
| 99 |
+
def direct_cast():
|
| 100 |
+
np.array([value, value, value]).astype(dtype)
|
| 101 |
+
|
| 102 |
+
yield direct_cast
|
| 103 |
+
|
| 104 |
+
def direct_cast_nd_strided():
|
| 105 |
+
arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
|
| 106 |
+
arr.astype(dtype)
|
| 107 |
+
|
| 108 |
+
yield direct_cast_nd_strided
|
| 109 |
+
|
| 110 |
+
def boolean_array_assignment():
|
| 111 |
+
arr = np.empty(3, dtype=dtype)
|
| 112 |
+
arr[[True, False, True]] = np.array([value, value])
|
| 113 |
+
|
| 114 |
+
yield boolean_array_assignment
|
| 115 |
+
|
| 116 |
+
def integer_array_assignment():
|
| 117 |
+
arr = np.empty(3, dtype=dtype)
|
| 118 |
+
values = np.array([value, value])
|
| 119 |
+
|
| 120 |
+
arr[[0, 1]] = values
|
| 121 |
+
|
| 122 |
+
yield integer_array_assignment
|
| 123 |
+
|
| 124 |
+
def integer_array_assignment_with_subspace():
|
| 125 |
+
arr = np.empty((5, 3), dtype=dtype)
|
| 126 |
+
values = np.array([value, value, value])
|
| 127 |
+
|
| 128 |
+
arr[[0, 2]] = values
|
| 129 |
+
|
| 130 |
+
yield integer_array_assignment_with_subspace
|
| 131 |
+
|
| 132 |
+
def flat_assignment():
|
| 133 |
+
arr = np.empty((3,), dtype=dtype)
|
| 134 |
+
values = np.array([value, value, value])
|
| 135 |
+
arr.flat[:] = values
|
| 136 |
+
|
| 137 |
+
yield flat_assignment
|
| 138 |
+
|
| 139 |
+
@pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
|
| 140 |
+
@pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
|
| 141 |
+
@pytest.mark.filterwarnings("ignore::numpy.exceptions.ComplexWarning")
|
| 142 |
+
def test_floatingpoint_errors_casting(dtype, value):
|
| 143 |
+
dtype = np.dtype(dtype)
|
| 144 |
+
for operation in check_operations(dtype, value):
|
| 145 |
+
dtype = np.dtype(dtype)
|
| 146 |
+
|
| 147 |
+
match = "invalid" if dtype.kind in 'iu' else "overflow"
|
| 148 |
+
with pytest.warns(RuntimeWarning, match=match):
|
| 149 |
+
operation()
|
| 150 |
+
|
| 151 |
+
with np.errstate(all="raise"):
|
| 152 |
+
with pytest.raises(FloatingPointError, match=match):
|
| 153 |
+
operation()
|
| 154 |
+
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cpu_dispatcher.py
ADDED
|
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from numpy._core._multiarray_umath import (
|
| 2 |
+
__cpu_features__, __cpu_baseline__, __cpu_dispatch__
|
| 3 |
+
)
|
| 4 |
+
from numpy._core import _umath_tests
|
| 5 |
+
from numpy.testing import assert_equal
|
| 6 |
+
|
| 7 |
+
def test_dispatcher():
|
| 8 |
+
"""
|
| 9 |
+
Testing the utilities of the CPU dispatcher
|
| 10 |
+
"""
|
| 11 |
+
targets = (
|
| 12 |
+
"SSE2", "SSE41", "AVX2",
|
| 13 |
+
"VSX", "VSX2", "VSX3",
|
| 14 |
+
"NEON", "ASIMD", "ASIMDHP",
|
| 15 |
+
"VX", "VXE"
|
| 16 |
+
)
|
| 17 |
+
highest_sfx = "" # no suffix for the baseline
|
| 18 |
+
all_sfx = []
|
| 19 |
+
for feature in reversed(targets):
|
| 20 |
+
# skip baseline features, by the default `CCompilerOpt` do not generate separated objects
|
| 21 |
+
# for the baseline, just one object combined all of them via 'baseline' option
|
| 22 |
+
# within the configuration statements.
|
| 23 |
+
if feature in __cpu_baseline__:
|
| 24 |
+
continue
|
| 25 |
+
# check compiler and running machine support
|
| 26 |
+
if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
|
| 27 |
+
continue
|
| 28 |
+
|
| 29 |
+
if not highest_sfx:
|
| 30 |
+
highest_sfx = "_" + feature
|
| 31 |
+
all_sfx.append("func" + "_" + feature)
|
| 32 |
+
|
| 33 |
+
test = _umath_tests.test_dispatch()
|
| 34 |
+
assert_equal(test["func"], "func" + highest_sfx)
|
| 35 |
+
assert_equal(test["var"], "var" + highest_sfx)
|
| 36 |
+
|
| 37 |
+
if highest_sfx:
|
| 38 |
+
assert_equal(test["func_xb"], "func" + highest_sfx)
|
| 39 |
+
assert_equal(test["var_xb"], "var" + highest_sfx)
|
| 40 |
+
else:
|
| 41 |
+
assert_equal(test["func_xb"], "nobase")
|
| 42 |
+
assert_equal(test["var_xb"], "nobase")
|
| 43 |
+
|
| 44 |
+
all_sfx.append("func") # add the baseline
|
| 45 |
+
assert_equal(test["all"], all_sfx)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cpu_features.py
ADDED
|
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import re
|
| 3 |
+
import sys
|
| 4 |
+
import pathlib
|
| 5 |
+
import platform
|
| 6 |
+
import subprocess
|
| 7 |
+
import pytest
|
| 8 |
+
from numpy._core._multiarray_umath import (
|
| 9 |
+
__cpu_features__,
|
| 10 |
+
__cpu_baseline__,
|
| 11 |
+
__cpu_dispatch__,
|
| 12 |
+
)
|
| 13 |
+
import numpy as np
|
| 14 |
+
|
| 15 |
+
def assert_features_equal(actual, desired, fname):
|
| 16 |
+
__tracebackhide__ = True # Hide traceback for py.test
|
| 17 |
+
actual, desired = str(actual), str(desired)
|
| 18 |
+
if actual == desired:
|
| 19 |
+
return
|
| 20 |
+
detected = str(__cpu_features__).replace("'", "")
|
| 21 |
+
try:
|
| 22 |
+
with open("/proc/cpuinfo") as fd:
|
| 23 |
+
cpuinfo = fd.read(2048)
|
| 24 |
+
except Exception as err:
|
| 25 |
+
cpuinfo = str(err)
|
| 26 |
+
|
| 27 |
+
try:
|
| 28 |
+
import subprocess
|
| 29 |
+
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
|
| 30 |
+
auxv = auxv.decode()
|
| 31 |
+
except Exception as err:
|
| 32 |
+
auxv = str(err)
|
| 33 |
+
|
| 34 |
+
import textwrap
|
| 35 |
+
error_report = textwrap.indent(
|
| 36 |
+
"""
|
| 37 |
+
###########################################
|
| 38 |
+
### Extra debugging information
|
| 39 |
+
###########################################
|
| 40 |
+
-------------------------------------------
|
| 41 |
+
--- NumPy Detections
|
| 42 |
+
-------------------------------------------
|
| 43 |
+
%s
|
| 44 |
+
-------------------------------------------
|
| 45 |
+
--- SYS / CPUINFO
|
| 46 |
+
-------------------------------------------
|
| 47 |
+
%s....
|
| 48 |
+
-------------------------------------------
|
| 49 |
+
--- SYS / AUXV
|
| 50 |
+
-------------------------------------------
|
| 51 |
+
%s
|
| 52 |
+
""" % (detected, cpuinfo, auxv), prefix='\r')
|
| 53 |
+
|
| 54 |
+
raise AssertionError((
|
| 55 |
+
"Failure Detection\n"
|
| 56 |
+
" NAME: '%s'\n"
|
| 57 |
+
" ACTUAL: %s\n"
|
| 58 |
+
" DESIRED: %s\n"
|
| 59 |
+
"%s"
|
| 60 |
+
) % (fname, actual, desired, error_report))
|
| 61 |
+
|
| 62 |
+
def _text_to_list(txt):
|
| 63 |
+
out = txt.strip("][\n").replace("'", "").split(', ')
|
| 64 |
+
return None if out[0] == "" else out
|
| 65 |
+
|
| 66 |
+
class AbstractTest:
|
| 67 |
+
features = []
|
| 68 |
+
features_groups = {}
|
| 69 |
+
features_map = {}
|
| 70 |
+
features_flags = set()
|
| 71 |
+
|
| 72 |
+
def load_flags(self):
|
| 73 |
+
# a hook
|
| 74 |
+
pass
|
| 75 |
+
def test_features(self):
|
| 76 |
+
self.load_flags()
|
| 77 |
+
for gname, features in self.features_groups.items():
|
| 78 |
+
test_features = [self.cpu_have(f) for f in features]
|
| 79 |
+
assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
|
| 80 |
+
|
| 81 |
+
for feature_name in self.features:
|
| 82 |
+
cpu_have = self.cpu_have(feature_name)
|
| 83 |
+
npy_have = __cpu_features__.get(feature_name)
|
| 84 |
+
assert_features_equal(npy_have, cpu_have, feature_name)
|
| 85 |
+
|
| 86 |
+
def cpu_have(self, feature_name):
|
| 87 |
+
map_names = self.features_map.get(feature_name, feature_name)
|
| 88 |
+
if isinstance(map_names, str):
|
| 89 |
+
return map_names in self.features_flags
|
| 90 |
+
return any(f in self.features_flags for f in map_names)
|
| 91 |
+
|
| 92 |
+
def load_flags_cpuinfo(self, magic_key):
|
| 93 |
+
self.features_flags = self.get_cpuinfo_item(magic_key)
|
| 94 |
+
|
| 95 |
+
def get_cpuinfo_item(self, magic_key):
|
| 96 |
+
values = set()
|
| 97 |
+
with open('/proc/cpuinfo') as fd:
|
| 98 |
+
for line in fd:
|
| 99 |
+
if not line.startswith(magic_key):
|
| 100 |
+
continue
|
| 101 |
+
flags_value = [s.strip() for s in line.split(':', 1)]
|
| 102 |
+
if len(flags_value) == 2:
|
| 103 |
+
values = values.union(flags_value[1].upper().split())
|
| 104 |
+
return values
|
| 105 |
+
|
| 106 |
+
def load_flags_auxv(self):
|
| 107 |
+
auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
|
| 108 |
+
for at in auxv.split(b'\n'):
|
| 109 |
+
if not at.startswith(b"AT_HWCAP"):
|
| 110 |
+
continue
|
| 111 |
+
hwcap_value = [s.strip() for s in at.split(b':', 1)]
|
| 112 |
+
if len(hwcap_value) == 2:
|
| 113 |
+
self.features_flags = self.features_flags.union(
|
| 114 |
+
hwcap_value[1].upper().decode().split()
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
@pytest.mark.skipif(
|
| 118 |
+
sys.platform == 'emscripten',
|
| 119 |
+
reason= (
|
| 120 |
+
"The subprocess module is not available on WASM platforms and"
|
| 121 |
+
" therefore this test class cannot be properly executed."
|
| 122 |
+
),
|
| 123 |
+
)
|
| 124 |
+
class TestEnvPrivation:
|
| 125 |
+
cwd = pathlib.Path(__file__).parent.resolve()
|
| 126 |
+
env = os.environ.copy()
|
| 127 |
+
_enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
|
| 128 |
+
_disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
|
| 129 |
+
SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
|
| 130 |
+
unavailable_feats = [
|
| 131 |
+
feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
|
| 132 |
+
]
|
| 133 |
+
UNAVAILABLE_FEAT = (
|
| 134 |
+
None if len(unavailable_feats) == 0
|
| 135 |
+
else unavailable_feats[0]
|
| 136 |
+
)
|
| 137 |
+
BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
|
| 138 |
+
SCRIPT = """
|
| 139 |
+
def main():
|
| 140 |
+
from numpy._core._multiarray_umath import (
|
| 141 |
+
__cpu_features__,
|
| 142 |
+
__cpu_dispatch__
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
|
| 146 |
+
print(detected)
|
| 147 |
+
|
| 148 |
+
if __name__ == "__main__":
|
| 149 |
+
main()
|
| 150 |
+
"""
|
| 151 |
+
|
| 152 |
+
@pytest.fixture(autouse=True)
|
| 153 |
+
def setup_class(self, tmp_path_factory):
|
| 154 |
+
file = tmp_path_factory.mktemp("runtime_test_script")
|
| 155 |
+
file /= "_runtime_detect.py"
|
| 156 |
+
file.write_text(self.SCRIPT)
|
| 157 |
+
self.file = file
|
| 158 |
+
return
|
| 159 |
+
|
| 160 |
+
def _run(self):
|
| 161 |
+
return subprocess.run(
|
| 162 |
+
[sys.executable, self.file],
|
| 163 |
+
env=self.env,
|
| 164 |
+
**self.SUBPROCESS_ARGS,
|
| 165 |
+
)
|
| 166 |
+
|
| 167 |
+
# Helper function mimicking pytest.raises for subprocess call
|
| 168 |
+
def _expect_error(
|
| 169 |
+
self,
|
| 170 |
+
msg,
|
| 171 |
+
err_type,
|
| 172 |
+
no_error_msg="Failed to generate error"
|
| 173 |
+
):
|
| 174 |
+
try:
|
| 175 |
+
self._run()
|
| 176 |
+
except subprocess.CalledProcessError as e:
|
| 177 |
+
assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
|
| 178 |
+
assert re.search(msg, e.stderr), assertion_message
|
| 179 |
+
|
| 180 |
+
assertion_message = (
|
| 181 |
+
f"Expected error of type: {err_type}; see full "
|
| 182 |
+
f"error:\n{e.stderr}"
|
| 183 |
+
)
|
| 184 |
+
assert re.search(err_type, e.stderr), assertion_message
|
| 185 |
+
else:
|
| 186 |
+
assert False, no_error_msg
|
| 187 |
+
|
| 188 |
+
def setup_method(self):
|
| 189 |
+
"""Ensure that the environment is reset"""
|
| 190 |
+
self.env = os.environ.copy()
|
| 191 |
+
return
|
| 192 |
+
|
| 193 |
+
def test_runtime_feature_selection(self):
|
| 194 |
+
"""
|
| 195 |
+
Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
|
| 196 |
+
features exactly specified are dispatched.
|
| 197 |
+
"""
|
| 198 |
+
|
| 199 |
+
# Capture runtime-enabled features
|
| 200 |
+
out = self._run()
|
| 201 |
+
non_baseline_features = _text_to_list(out.stdout)
|
| 202 |
+
|
| 203 |
+
if non_baseline_features is None:
|
| 204 |
+
pytest.skip(
|
| 205 |
+
"No dispatchable features outside of baseline detected."
|
| 206 |
+
)
|
| 207 |
+
feature = non_baseline_features[0]
|
| 208 |
+
|
| 209 |
+
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
|
| 210 |
+
# specified
|
| 211 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = feature
|
| 212 |
+
out = self._run()
|
| 213 |
+
enabled_features = _text_to_list(out.stdout)
|
| 214 |
+
|
| 215 |
+
# Ensure that only one feature is enabled, and it is exactly the one
|
| 216 |
+
# specified by `NPY_ENABLE_CPU_FEATURES`
|
| 217 |
+
assert set(enabled_features) == {feature}
|
| 218 |
+
|
| 219 |
+
if len(non_baseline_features) < 2:
|
| 220 |
+
pytest.skip("Only one non-baseline feature detected.")
|
| 221 |
+
# Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
|
| 222 |
+
# specified
|
| 223 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
|
| 224 |
+
out = self._run()
|
| 225 |
+
enabled_features = _text_to_list(out.stdout)
|
| 226 |
+
|
| 227 |
+
# Ensure that both features are enabled, and they are exactly the ones
|
| 228 |
+
# specified by `NPY_ENABLE_CPU_FEATURES`
|
| 229 |
+
assert set(enabled_features) == set(non_baseline_features)
|
| 230 |
+
return
|
| 231 |
+
|
| 232 |
+
@pytest.mark.parametrize("enabled, disabled",
|
| 233 |
+
[
|
| 234 |
+
("feature", "feature"),
|
| 235 |
+
("feature", "same"),
|
| 236 |
+
])
|
| 237 |
+
def test_both_enable_disable_set(self, enabled, disabled):
|
| 238 |
+
"""
|
| 239 |
+
Ensure that when both environment variables are set then an
|
| 240 |
+
ImportError is thrown
|
| 241 |
+
"""
|
| 242 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
|
| 243 |
+
self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
|
| 244 |
+
msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
|
| 245 |
+
err_type = "ImportError"
|
| 246 |
+
self._expect_error(msg, err_type)
|
| 247 |
+
|
| 248 |
+
@pytest.mark.skipif(
|
| 249 |
+
not __cpu_dispatch__,
|
| 250 |
+
reason=(
|
| 251 |
+
"NPY_*_CPU_FEATURES only parsed if "
|
| 252 |
+
"`__cpu_dispatch__` is non-empty"
|
| 253 |
+
)
|
| 254 |
+
)
|
| 255 |
+
@pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
|
| 256 |
+
def test_variable_too_long(self, action):
|
| 257 |
+
"""
|
| 258 |
+
Test that an error is thrown if the environment variables are too long
|
| 259 |
+
to be processed. Current limit is 1024, but this may change later.
|
| 260 |
+
"""
|
| 261 |
+
MAX_VAR_LENGTH = 1024
|
| 262 |
+
# Actual length is MAX_VAR_LENGTH + 1 due to null-termination
|
| 263 |
+
self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
|
| 264 |
+
msg = (
|
| 265 |
+
f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
|
| 266 |
+
f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
|
| 267 |
+
)
|
| 268 |
+
err_type = "RuntimeError"
|
| 269 |
+
self._expect_error(msg, err_type)
|
| 270 |
+
|
| 271 |
+
@pytest.mark.skipif(
|
| 272 |
+
not __cpu_dispatch__,
|
| 273 |
+
reason=(
|
| 274 |
+
"NPY_*_CPU_FEATURES only parsed if "
|
| 275 |
+
"`__cpu_dispatch__` is non-empty"
|
| 276 |
+
)
|
| 277 |
+
)
|
| 278 |
+
def test_impossible_feature_disable(self):
|
| 279 |
+
"""
|
| 280 |
+
Test that a RuntimeError is thrown if an impossible feature-disabling
|
| 281 |
+
request is made. This includes disabling a baseline feature.
|
| 282 |
+
"""
|
| 283 |
+
|
| 284 |
+
if self.BASELINE_FEAT is None:
|
| 285 |
+
pytest.skip("There are no unavailable features to test with")
|
| 286 |
+
bad_feature = self.BASELINE_FEAT
|
| 287 |
+
self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
|
| 288 |
+
msg = (
|
| 289 |
+
f"You cannot disable CPU feature '{bad_feature}', since it is "
|
| 290 |
+
"part of the baseline optimizations"
|
| 291 |
+
)
|
| 292 |
+
err_type = "RuntimeError"
|
| 293 |
+
self._expect_error(msg, err_type)
|
| 294 |
+
|
| 295 |
+
def test_impossible_feature_enable(self):
|
| 296 |
+
"""
|
| 297 |
+
Test that a RuntimeError is thrown if an impossible feature-enabling
|
| 298 |
+
request is made. This includes enabling a feature not supported by the
|
| 299 |
+
machine, or disabling a baseline optimization.
|
| 300 |
+
"""
|
| 301 |
+
|
| 302 |
+
if self.UNAVAILABLE_FEAT is None:
|
| 303 |
+
pytest.skip("There are no unavailable features to test with")
|
| 304 |
+
bad_feature = self.UNAVAILABLE_FEAT
|
| 305 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
|
| 306 |
+
msg = (
|
| 307 |
+
f"You cannot enable CPU features \\({bad_feature}\\), since "
|
| 308 |
+
"they are not supported by your machine."
|
| 309 |
+
)
|
| 310 |
+
err_type = "RuntimeError"
|
| 311 |
+
self._expect_error(msg, err_type)
|
| 312 |
+
|
| 313 |
+
# Ensure that it fails even when providing garbage in addition
|
| 314 |
+
feats = f"{bad_feature}, Foobar"
|
| 315 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
|
| 316 |
+
msg = (
|
| 317 |
+
f"You cannot enable CPU features \\({bad_feature}\\), since they "
|
| 318 |
+
"are not supported by your machine."
|
| 319 |
+
)
|
| 320 |
+
self._expect_error(msg, err_type)
|
| 321 |
+
|
| 322 |
+
if self.BASELINE_FEAT is not None:
|
| 323 |
+
# Ensure that only the bad feature gets reported
|
| 324 |
+
feats = f"{bad_feature}, {self.BASELINE_FEAT}"
|
| 325 |
+
self.env['NPY_ENABLE_CPU_FEATURES'] = feats
|
| 326 |
+
msg = (
|
| 327 |
+
f"You cannot enable CPU features \\({bad_feature}\\), since "
|
| 328 |
+
"they are not supported by your machine."
|
| 329 |
+
)
|
| 330 |
+
self._expect_error(msg, err_type)
|
| 331 |
+
|
| 332 |
+
is_linux = sys.platform.startswith('linux')
|
| 333 |
+
is_cygwin = sys.platform.startswith('cygwin')
|
| 334 |
+
machine = platform.machine()
|
| 335 |
+
is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
|
| 336 |
+
@pytest.mark.skipif(
|
| 337 |
+
not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
|
| 338 |
+
)
|
| 339 |
+
class Test_X86_Features(AbstractTest):
|
| 340 |
+
features = [
|
| 341 |
+
"MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
|
| 342 |
+
"AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
|
| 343 |
+
"AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
|
| 344 |
+
"AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
|
| 345 |
+
"AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
|
| 346 |
+
]
|
| 347 |
+
features_groups = dict(
|
| 348 |
+
AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
|
| 349 |
+
AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
|
| 350 |
+
"AVX5124VNNIW", "AVX512VPOPCNTDQ"],
|
| 351 |
+
AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
|
| 352 |
+
AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
|
| 353 |
+
AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
|
| 354 |
+
"AVX512VBMI"],
|
| 355 |
+
AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
|
| 356 |
+
"AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
|
| 357 |
+
AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
|
| 358 |
+
"AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
|
| 359 |
+
"AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
|
| 360 |
+
"AVX512FP16"],
|
| 361 |
+
)
|
| 362 |
+
features_map = dict(
|
| 363 |
+
SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
|
| 364 |
+
AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
|
| 365 |
+
AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
|
| 366 |
+
AVX512FP16="AVX512_FP16",
|
| 367 |
+
)
|
| 368 |
+
def load_flags(self):
|
| 369 |
+
self.load_flags_cpuinfo("flags")
|
| 370 |
+
|
| 371 |
+
is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
|
| 372 |
+
@pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
|
| 373 |
+
class Test_POWER_Features(AbstractTest):
|
| 374 |
+
features = ["VSX", "VSX2", "VSX3", "VSX4"]
|
| 375 |
+
features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
|
| 376 |
+
|
| 377 |
+
def load_flags(self):
|
| 378 |
+
self.load_flags_auxv()
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
|
| 382 |
+
@pytest.mark.skipif(not is_linux or not is_zarch,
|
| 383 |
+
reason="Only for Linux and IBM Z")
|
| 384 |
+
class Test_ZARCH_Features(AbstractTest):
|
| 385 |
+
features = ["VX", "VXE", "VXE2"]
|
| 386 |
+
|
| 387 |
+
def load_flags(self):
|
| 388 |
+
self.load_flags_auxv()
|
| 389 |
+
|
| 390 |
+
|
| 391 |
+
is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
|
| 392 |
+
@pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
|
| 393 |
+
class Test_ARM_Features(AbstractTest):
|
| 394 |
+
features = [
|
| 395 |
+
"SVE", "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
|
| 396 |
+
]
|
| 397 |
+
features_groups = dict(
|
| 398 |
+
NEON_FP16 = ["NEON", "HALF"],
|
| 399 |
+
NEON_VFPV4 = ["NEON", "VFPV4"],
|
| 400 |
+
)
|
| 401 |
+
def load_flags(self):
|
| 402 |
+
self.load_flags_cpuinfo("Features")
|
| 403 |
+
arch = self.get_cpuinfo_item("CPU architecture")
|
| 404 |
+
# in case of mounting virtual filesystem of aarch64 kernel
|
| 405 |
+
is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
|
| 406 |
+
if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
|
| 407 |
+
self.features_map = dict(
|
| 408 |
+
NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
|
| 409 |
+
)
|
| 410 |
+
else:
|
| 411 |
+
self.features_map = dict(
|
| 412 |
+
# ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
|
| 413 |
+
# doesn't provide information about ASIMD, so we assume that ASIMD is supported
|
| 414 |
+
# if the kernel reports any one of the following ARM8 features.
|
| 415 |
+
ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
|
| 416 |
+
)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_cython.py
ADDED
|
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from datetime import datetime
|
| 2 |
+
import os
|
| 3 |
+
import subprocess
|
| 4 |
+
import sys
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
import numpy as np
|
| 8 |
+
from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE
|
| 9 |
+
|
| 10 |
+
# This import is copied from random.tests.test_extending
|
| 11 |
+
try:
|
| 12 |
+
import cython
|
| 13 |
+
from Cython.Compiler.Version import version as cython_version
|
| 14 |
+
except ImportError:
|
| 15 |
+
cython = None
|
| 16 |
+
else:
|
| 17 |
+
from numpy._utils import _pep440
|
| 18 |
+
|
| 19 |
+
# Note: keep in sync with the one in pyproject.toml
|
| 20 |
+
required_version = "3.0.6"
|
| 21 |
+
if _pep440.parse(cython_version) < _pep440.Version(required_version):
|
| 22 |
+
# too old or wrong cython, skip the test
|
| 23 |
+
cython = None
|
| 24 |
+
|
| 25 |
+
pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
if IS_EDITABLE:
|
| 29 |
+
pytest.skip(
|
| 30 |
+
"Editable install doesn't support tests with a compile step",
|
| 31 |
+
allow_module_level=True
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@pytest.fixture(scope='module')
|
| 36 |
+
def install_temp(tmpdir_factory):
|
| 37 |
+
# Based in part on test_cython from random.tests.test_extending
|
| 38 |
+
if IS_WASM:
|
| 39 |
+
pytest.skip("No subprocess")
|
| 40 |
+
|
| 41 |
+
srcdir = os.path.join(os.path.dirname(__file__), 'examples', 'cython')
|
| 42 |
+
build_dir = tmpdir_factory.mktemp("cython_test") / "build"
|
| 43 |
+
os.makedirs(build_dir, exist_ok=True)
|
| 44 |
+
# Ensure we use the correct Python interpreter even when `meson` is
|
| 45 |
+
# installed in a different Python environment (see gh-24956)
|
| 46 |
+
native_file = str(build_dir / 'interpreter-native-file.ini')
|
| 47 |
+
with open(native_file, 'w') as f:
|
| 48 |
+
f.write("[binaries]\n")
|
| 49 |
+
f.write(f"python = '{sys.executable}'\n")
|
| 50 |
+
f.write(f"python3 = '{sys.executable}'")
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
subprocess.check_call(["meson", "--version"])
|
| 54 |
+
except FileNotFoundError:
|
| 55 |
+
pytest.skip("No usable 'meson' found")
|
| 56 |
+
if sys.platform == "win32":
|
| 57 |
+
subprocess.check_call(["meson", "setup",
|
| 58 |
+
"--buildtype=release",
|
| 59 |
+
"--vsenv", "--native-file", native_file,
|
| 60 |
+
str(srcdir)],
|
| 61 |
+
cwd=build_dir,
|
| 62 |
+
)
|
| 63 |
+
else:
|
| 64 |
+
subprocess.check_call(["meson", "setup",
|
| 65 |
+
"--native-file", native_file, str(srcdir)],
|
| 66 |
+
cwd=build_dir
|
| 67 |
+
)
|
| 68 |
+
try:
|
| 69 |
+
subprocess.check_call(["meson", "compile", "-vv"], cwd=build_dir)
|
| 70 |
+
except subprocess.CalledProcessError:
|
| 71 |
+
print("----------------")
|
| 72 |
+
print("meson build failed when doing")
|
| 73 |
+
print(f"'meson setup --native-file {native_file} {srcdir}'")
|
| 74 |
+
print("'meson compile -vv'")
|
| 75 |
+
print(f"in {build_dir}")
|
| 76 |
+
print("----------------")
|
| 77 |
+
raise
|
| 78 |
+
|
| 79 |
+
sys.path.append(str(build_dir))
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def test_is_timedelta64_object(install_temp):
|
| 83 |
+
import checks
|
| 84 |
+
|
| 85 |
+
assert checks.is_td64(np.timedelta64(1234))
|
| 86 |
+
assert checks.is_td64(np.timedelta64(1234, "ns"))
|
| 87 |
+
assert checks.is_td64(np.timedelta64("NaT", "ns"))
|
| 88 |
+
|
| 89 |
+
assert not checks.is_td64(1)
|
| 90 |
+
assert not checks.is_td64(None)
|
| 91 |
+
assert not checks.is_td64("foo")
|
| 92 |
+
assert not checks.is_td64(np.datetime64("now", "s"))
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def test_is_datetime64_object(install_temp):
|
| 96 |
+
import checks
|
| 97 |
+
|
| 98 |
+
assert checks.is_dt64(np.datetime64(1234, "ns"))
|
| 99 |
+
assert checks.is_dt64(np.datetime64("NaT", "ns"))
|
| 100 |
+
|
| 101 |
+
assert not checks.is_dt64(1)
|
| 102 |
+
assert not checks.is_dt64(None)
|
| 103 |
+
assert not checks.is_dt64("foo")
|
| 104 |
+
assert not checks.is_dt64(np.timedelta64(1234))
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
def test_get_datetime64_value(install_temp):
|
| 108 |
+
import checks
|
| 109 |
+
|
| 110 |
+
dt64 = np.datetime64("2016-01-01", "ns")
|
| 111 |
+
|
| 112 |
+
result = checks.get_dt64_value(dt64)
|
| 113 |
+
expected = dt64.view("i8")
|
| 114 |
+
|
| 115 |
+
assert result == expected
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def test_get_timedelta64_value(install_temp):
|
| 119 |
+
import checks
|
| 120 |
+
|
| 121 |
+
td64 = np.timedelta64(12345, "h")
|
| 122 |
+
|
| 123 |
+
result = checks.get_td64_value(td64)
|
| 124 |
+
expected = td64.view("i8")
|
| 125 |
+
|
| 126 |
+
assert result == expected
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_get_datetime64_unit(install_temp):
|
| 130 |
+
import checks
|
| 131 |
+
|
| 132 |
+
dt64 = np.datetime64("2016-01-01", "ns")
|
| 133 |
+
result = checks.get_dt64_unit(dt64)
|
| 134 |
+
expected = 10
|
| 135 |
+
assert result == expected
|
| 136 |
+
|
| 137 |
+
td64 = np.timedelta64(12345, "h")
|
| 138 |
+
result = checks.get_dt64_unit(td64)
|
| 139 |
+
expected = 5
|
| 140 |
+
assert result == expected
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
def test_abstract_scalars(install_temp):
|
| 144 |
+
import checks
|
| 145 |
+
|
| 146 |
+
assert checks.is_integer(1)
|
| 147 |
+
assert checks.is_integer(np.int8(1))
|
| 148 |
+
assert checks.is_integer(np.uint64(1))
|
| 149 |
+
|
| 150 |
+
def test_default_int(install_temp):
|
| 151 |
+
import checks
|
| 152 |
+
|
| 153 |
+
assert checks.get_default_integer() is np.dtype(int)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def test_ravel_axis(install_temp):
|
| 157 |
+
import checks
|
| 158 |
+
|
| 159 |
+
assert checks.get_ravel_axis() == np.iinfo("intc").min
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def test_convert_datetime64_to_datetimestruct(install_temp):
|
| 163 |
+
# GH#21199
|
| 164 |
+
import checks
|
| 165 |
+
|
| 166 |
+
res = checks.convert_datetime64_to_datetimestruct()
|
| 167 |
+
|
| 168 |
+
exp = {
|
| 169 |
+
"year": 2022,
|
| 170 |
+
"month": 3,
|
| 171 |
+
"day": 15,
|
| 172 |
+
"hour": 20,
|
| 173 |
+
"min": 1,
|
| 174 |
+
"sec": 55,
|
| 175 |
+
"us": 260292,
|
| 176 |
+
"ps": 0,
|
| 177 |
+
"as": 0,
|
| 178 |
+
}
|
| 179 |
+
|
| 180 |
+
assert res == exp
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class TestDatetimeStrings:
|
| 184 |
+
def test_make_iso_8601_datetime(self, install_temp):
|
| 185 |
+
# GH#21199
|
| 186 |
+
import checks
|
| 187 |
+
dt = datetime(2016, 6, 2, 10, 45, 19)
|
| 188 |
+
# uses NPY_FR_s
|
| 189 |
+
result = checks.make_iso_8601_datetime(dt)
|
| 190 |
+
assert result == b"2016-06-02T10:45:19"
|
| 191 |
+
|
| 192 |
+
def test_get_datetime_iso_8601_strlen(self, install_temp):
|
| 193 |
+
# GH#21199
|
| 194 |
+
import checks
|
| 195 |
+
# uses NPY_FR_ns
|
| 196 |
+
res = checks.get_datetime_iso_8601_strlen()
|
| 197 |
+
assert res == 48
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
@pytest.mark.parametrize(
|
| 201 |
+
"arrays",
|
| 202 |
+
[
|
| 203 |
+
[np.random.rand(2)],
|
| 204 |
+
[np.random.rand(2), np.random.rand(3, 1)],
|
| 205 |
+
[np.random.rand(2), np.random.rand(2, 3, 2), np.random.rand(1, 3, 2)],
|
| 206 |
+
[np.random.rand(2, 1)] * 4 + [np.random.rand(1, 1, 1)],
|
| 207 |
+
]
|
| 208 |
+
)
|
| 209 |
+
def test_multiiter_fields(install_temp, arrays):
|
| 210 |
+
import checks
|
| 211 |
+
bcast = np.broadcast(*arrays)
|
| 212 |
+
|
| 213 |
+
assert bcast.ndim == checks.get_multiiter_number_of_dims(bcast)
|
| 214 |
+
assert bcast.size == checks.get_multiiter_size(bcast)
|
| 215 |
+
assert bcast.numiter == checks.get_multiiter_num_of_iterators(bcast)
|
| 216 |
+
assert bcast.shape == checks.get_multiiter_shape(bcast)
|
| 217 |
+
assert bcast.index == checks.get_multiiter_current_index(bcast)
|
| 218 |
+
assert all(
|
| 219 |
+
x.base is y.base
|
| 220 |
+
for x, y in zip(bcast.iters, checks.get_multiiter_iters(bcast))
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def test_dtype_flags(install_temp):
|
| 225 |
+
import checks
|
| 226 |
+
dtype = np.dtype("i,O") # dtype with somewhat interesting flags
|
| 227 |
+
assert dtype.flags == checks.get_dtype_flags(dtype)
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
def test_conv_intp(install_temp):
|
| 231 |
+
import checks
|
| 232 |
+
|
| 233 |
+
class myint:
|
| 234 |
+
def __int__(self):
|
| 235 |
+
return 3
|
| 236 |
+
|
| 237 |
+
# These conversion passes via `__int__`, not `__index__`:
|
| 238 |
+
assert checks.conv_intp(3.) == 3
|
| 239 |
+
assert checks.conv_intp(myint()) == 3
|
| 240 |
+
|
| 241 |
+
|
| 242 |
+
def test_npyiter_api(install_temp):
|
| 243 |
+
import checks
|
| 244 |
+
arr = np.random.rand(3, 2)
|
| 245 |
+
|
| 246 |
+
it = np.nditer(arr)
|
| 247 |
+
assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape)
|
| 248 |
+
assert checks.get_npyiter_ndim(it) == it.ndim == 1
|
| 249 |
+
assert checks.npyiter_has_index(it) == it.has_index == False
|
| 250 |
+
|
| 251 |
+
it = np.nditer(arr, flags=["c_index"])
|
| 252 |
+
assert checks.npyiter_has_index(it) == it.has_index == True
|
| 253 |
+
assert (
|
| 254 |
+
checks.npyiter_has_delayed_bufalloc(it)
|
| 255 |
+
== it.has_delayed_bufalloc
|
| 256 |
+
== False
|
| 257 |
+
)
|
| 258 |
+
|
| 259 |
+
it = np.nditer(arr, flags=["buffered", "delay_bufalloc"])
|
| 260 |
+
assert (
|
| 261 |
+
checks.npyiter_has_delayed_bufalloc(it)
|
| 262 |
+
== it.has_delayed_bufalloc
|
| 263 |
+
== True
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
it = np.nditer(arr, flags=["multi_index"])
|
| 267 |
+
assert checks.get_npyiter_size(it) == it.itersize == np.prod(arr.shape)
|
| 268 |
+
assert checks.npyiter_has_multi_index(it) == it.has_multi_index == True
|
| 269 |
+
assert checks.get_npyiter_ndim(it) == it.ndim == 2
|
| 270 |
+
|
| 271 |
+
arr2 = np.random.rand(2, 1, 2)
|
| 272 |
+
it = np.nditer([arr, arr2])
|
| 273 |
+
assert checks.get_npyiter_nop(it) == it.nop == 2
|
| 274 |
+
assert checks.get_npyiter_size(it) == it.itersize == 12
|
| 275 |
+
assert checks.get_npyiter_ndim(it) == it.ndim == 3
|
| 276 |
+
assert all(
|
| 277 |
+
x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands)
|
| 278 |
+
)
|
| 279 |
+
assert all(
|
| 280 |
+
np.allclose(x, y)
|
| 281 |
+
for x, y in zip(checks.get_npyiter_itviews(it), it.itviews)
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
|
| 285 |
+
def test_fillwithbytes(install_temp):
|
| 286 |
+
import checks
|
| 287 |
+
|
| 288 |
+
arr = checks.compile_fillwithbyte()
|
| 289 |
+
assert_array_equal(arr, np.ones((1, 2)))
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def test_complex(install_temp):
|
| 293 |
+
from checks import inc2_cfloat_struct
|
| 294 |
+
|
| 295 |
+
arr = np.array([0, 10+10j], dtype="F")
|
| 296 |
+
inc2_cfloat_struct(arr)
|
| 297 |
+
assert arr[1] == (12 + 12j)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def test_npy_uintp_type_enum():
|
| 301 |
+
import checks
|
| 302 |
+
assert checks.check_npy_uintp_type_enum()
|
| 303 |
+
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_defchararray.py
ADDED
|
@@ -0,0 +1,822 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
from numpy._core.multiarray import _vec_string
|
| 5 |
+
from numpy.testing import (
|
| 6 |
+
assert_, assert_equal, assert_array_equal, assert_raises,
|
| 7 |
+
assert_raises_regex
|
| 8 |
+
)
|
| 9 |
+
|
| 10 |
+
kw_unicode_true = {'unicode': True} # make 2to3 work properly
|
| 11 |
+
kw_unicode_false = {'unicode': False}
|
| 12 |
+
|
| 13 |
+
class TestBasic:
|
| 14 |
+
def test_from_object_array(self):
|
| 15 |
+
A = np.array([['abc', 2],
|
| 16 |
+
['long ', '0123456789']], dtype='O')
|
| 17 |
+
B = np.char.array(A)
|
| 18 |
+
assert_equal(B.dtype.itemsize, 10)
|
| 19 |
+
assert_array_equal(B, [[b'abc', b'2'],
|
| 20 |
+
[b'long', b'0123456789']])
|
| 21 |
+
|
| 22 |
+
def test_from_object_array_unicode(self):
|
| 23 |
+
A = np.array([['abc', 'Sigma \u03a3'],
|
| 24 |
+
['long ', '0123456789']], dtype='O')
|
| 25 |
+
assert_raises(ValueError, np.char.array, (A,))
|
| 26 |
+
B = np.char.array(A, **kw_unicode_true)
|
| 27 |
+
assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
|
| 28 |
+
assert_array_equal(B, [['abc', 'Sigma \u03a3'],
|
| 29 |
+
['long', '0123456789']])
|
| 30 |
+
|
| 31 |
+
def test_from_string_array(self):
|
| 32 |
+
A = np.array([[b'abc', b'foo'],
|
| 33 |
+
[b'long ', b'0123456789']])
|
| 34 |
+
assert_equal(A.dtype.type, np.bytes_)
|
| 35 |
+
B = np.char.array(A)
|
| 36 |
+
assert_array_equal(B, A)
|
| 37 |
+
assert_equal(B.dtype, A.dtype)
|
| 38 |
+
assert_equal(B.shape, A.shape)
|
| 39 |
+
B[0, 0] = 'changed'
|
| 40 |
+
assert_(B[0, 0] != A[0, 0])
|
| 41 |
+
C = np.char.asarray(A)
|
| 42 |
+
assert_array_equal(C, A)
|
| 43 |
+
assert_equal(C.dtype, A.dtype)
|
| 44 |
+
C[0, 0] = 'changed again'
|
| 45 |
+
assert_(C[0, 0] != B[0, 0])
|
| 46 |
+
assert_(C[0, 0] == A[0, 0])
|
| 47 |
+
|
| 48 |
+
def test_from_unicode_array(self):
|
| 49 |
+
A = np.array([['abc', 'Sigma \u03a3'],
|
| 50 |
+
['long ', '0123456789']])
|
| 51 |
+
assert_equal(A.dtype.type, np.str_)
|
| 52 |
+
B = np.char.array(A)
|
| 53 |
+
assert_array_equal(B, A)
|
| 54 |
+
assert_equal(B.dtype, A.dtype)
|
| 55 |
+
assert_equal(B.shape, A.shape)
|
| 56 |
+
B = np.char.array(A, **kw_unicode_true)
|
| 57 |
+
assert_array_equal(B, A)
|
| 58 |
+
assert_equal(B.dtype, A.dtype)
|
| 59 |
+
assert_equal(B.shape, A.shape)
|
| 60 |
+
|
| 61 |
+
def fail():
|
| 62 |
+
np.char.array(A, **kw_unicode_false)
|
| 63 |
+
|
| 64 |
+
assert_raises(UnicodeEncodeError, fail)
|
| 65 |
+
|
| 66 |
+
def test_unicode_upconvert(self):
|
| 67 |
+
A = np.char.array(['abc'])
|
| 68 |
+
B = np.char.array(['\u03a3'])
|
| 69 |
+
assert_(issubclass((A + B).dtype.type, np.str_))
|
| 70 |
+
|
| 71 |
+
def test_from_string(self):
|
| 72 |
+
A = np.char.array(b'abc')
|
| 73 |
+
assert_equal(len(A), 1)
|
| 74 |
+
assert_equal(len(A[0]), 3)
|
| 75 |
+
assert_(issubclass(A.dtype.type, np.bytes_))
|
| 76 |
+
|
| 77 |
+
def test_from_unicode(self):
|
| 78 |
+
A = np.char.array('\u03a3')
|
| 79 |
+
assert_equal(len(A), 1)
|
| 80 |
+
assert_equal(len(A[0]), 1)
|
| 81 |
+
assert_equal(A.itemsize, 4)
|
| 82 |
+
assert_(issubclass(A.dtype.type, np.str_))
|
| 83 |
+
|
| 84 |
+
class TestVecString:
|
| 85 |
+
def test_non_existent_method(self):
|
| 86 |
+
|
| 87 |
+
def fail():
|
| 88 |
+
_vec_string('a', np.bytes_, 'bogus')
|
| 89 |
+
|
| 90 |
+
assert_raises(AttributeError, fail)
|
| 91 |
+
|
| 92 |
+
def test_non_string_array(self):
|
| 93 |
+
|
| 94 |
+
def fail():
|
| 95 |
+
_vec_string(1, np.bytes_, 'strip')
|
| 96 |
+
|
| 97 |
+
assert_raises(TypeError, fail)
|
| 98 |
+
|
| 99 |
+
def test_invalid_args_tuple(self):
|
| 100 |
+
|
| 101 |
+
def fail():
|
| 102 |
+
_vec_string(['a'], np.bytes_, 'strip', 1)
|
| 103 |
+
|
| 104 |
+
assert_raises(TypeError, fail)
|
| 105 |
+
|
| 106 |
+
def test_invalid_type_descr(self):
|
| 107 |
+
|
| 108 |
+
def fail():
|
| 109 |
+
_vec_string(['a'], 'BOGUS', 'strip')
|
| 110 |
+
|
| 111 |
+
assert_raises(TypeError, fail)
|
| 112 |
+
|
| 113 |
+
def test_invalid_function_args(self):
|
| 114 |
+
|
| 115 |
+
def fail():
|
| 116 |
+
_vec_string(['a'], np.bytes_, 'strip', (1,))
|
| 117 |
+
|
| 118 |
+
assert_raises(TypeError, fail)
|
| 119 |
+
|
| 120 |
+
def test_invalid_result_type(self):
|
| 121 |
+
|
| 122 |
+
def fail():
|
| 123 |
+
_vec_string(['a'], np.int_, 'strip')
|
| 124 |
+
|
| 125 |
+
assert_raises(TypeError, fail)
|
| 126 |
+
|
| 127 |
+
def test_broadcast_error(self):
|
| 128 |
+
|
| 129 |
+
def fail():
|
| 130 |
+
_vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
|
| 131 |
+
|
| 132 |
+
assert_raises(ValueError, fail)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class TestWhitespace:
|
| 136 |
+
def setup_method(self):
|
| 137 |
+
self.A = np.array([['abc ', '123 '],
|
| 138 |
+
['789 ', 'xyz ']]).view(np.char.chararray)
|
| 139 |
+
self.B = np.array([['abc', '123'],
|
| 140 |
+
['789', 'xyz']]).view(np.char.chararray)
|
| 141 |
+
|
| 142 |
+
def test1(self):
|
| 143 |
+
assert_(np.all(self.A == self.B))
|
| 144 |
+
assert_(np.all(self.A >= self.B))
|
| 145 |
+
assert_(np.all(self.A <= self.B))
|
| 146 |
+
assert_(not np.any(self.A > self.B))
|
| 147 |
+
assert_(not np.any(self.A < self.B))
|
| 148 |
+
assert_(not np.any(self.A != self.B))
|
| 149 |
+
|
| 150 |
+
class TestChar:
|
| 151 |
+
def setup_method(self):
|
| 152 |
+
self.A = np.array('abc1', dtype='c').view(np.char.chararray)
|
| 153 |
+
|
| 154 |
+
def test_it(self):
|
| 155 |
+
assert_equal(self.A.shape, (4,))
|
| 156 |
+
assert_equal(self.A.upper()[:2].tobytes(), b'AB')
|
| 157 |
+
|
| 158 |
+
class TestComparisons:
|
| 159 |
+
def setup_method(self):
|
| 160 |
+
self.A = np.array([['abc', 'abcc', '123'],
|
| 161 |
+
['789', 'abc', 'xyz']]).view(np.char.chararray)
|
| 162 |
+
self.B = np.array([['efg', 'efg', '123 '],
|
| 163 |
+
['051', 'efgg', 'tuv']]).view(np.char.chararray)
|
| 164 |
+
|
| 165 |
+
def test_not_equal(self):
|
| 166 |
+
assert_array_equal((self.A != self.B),
|
| 167 |
+
[[True, True, False], [True, True, True]])
|
| 168 |
+
|
| 169 |
+
def test_equal(self):
|
| 170 |
+
assert_array_equal((self.A == self.B),
|
| 171 |
+
[[False, False, True], [False, False, False]])
|
| 172 |
+
|
| 173 |
+
def test_greater_equal(self):
|
| 174 |
+
assert_array_equal((self.A >= self.B),
|
| 175 |
+
[[False, False, True], [True, False, True]])
|
| 176 |
+
|
| 177 |
+
def test_less_equal(self):
|
| 178 |
+
assert_array_equal((self.A <= self.B),
|
| 179 |
+
[[True, True, True], [False, True, False]])
|
| 180 |
+
|
| 181 |
+
def test_greater(self):
|
| 182 |
+
assert_array_equal((self.A > self.B),
|
| 183 |
+
[[False, False, False], [True, False, True]])
|
| 184 |
+
|
| 185 |
+
def test_less(self):
|
| 186 |
+
assert_array_equal((self.A < self.B),
|
| 187 |
+
[[True, True, False], [False, True, False]])
|
| 188 |
+
|
| 189 |
+
def test_type(self):
|
| 190 |
+
out1 = np.char.equal(self.A, self.B)
|
| 191 |
+
out2 = np.char.equal('a', 'a')
|
| 192 |
+
assert_(isinstance(out1, np.ndarray))
|
| 193 |
+
assert_(isinstance(out2, np.ndarray))
|
| 194 |
+
|
| 195 |
+
class TestComparisonsMixed1(TestComparisons):
|
| 196 |
+
"""Ticket #1276"""
|
| 197 |
+
|
| 198 |
+
def setup_method(self):
|
| 199 |
+
TestComparisons.setup_method(self)
|
| 200 |
+
self.B = np.array(
|
| 201 |
+
[['efg', 'efg', '123 '],
|
| 202 |
+
['051', 'efgg', 'tuv']], np.str_).view(np.char.chararray)
|
| 203 |
+
|
| 204 |
+
class TestComparisonsMixed2(TestComparisons):
|
| 205 |
+
"""Ticket #1276"""
|
| 206 |
+
|
| 207 |
+
def setup_method(self):
|
| 208 |
+
TestComparisons.setup_method(self)
|
| 209 |
+
self.A = np.array(
|
| 210 |
+
[['abc', 'abcc', '123'],
|
| 211 |
+
['789', 'abc', 'xyz']], np.str_).view(np.char.chararray)
|
| 212 |
+
|
| 213 |
+
class TestInformation:
|
| 214 |
+
def setup_method(self):
|
| 215 |
+
self.A = np.array([[' abc ', ''],
|
| 216 |
+
['12345', 'MixedCase'],
|
| 217 |
+
['123 \t 345 \0 ', 'UPPER']]) \
|
| 218 |
+
.view(np.char.chararray)
|
| 219 |
+
self.B = np.array([[' \u03a3 ', ''],
|
| 220 |
+
['12345', 'MixedCase'],
|
| 221 |
+
['123 \t 345 \0 ', 'UPPER']]) \
|
| 222 |
+
.view(np.char.chararray)
|
| 223 |
+
# Array with longer strings, > MEMCHR_CUT_OFF in code.
|
| 224 |
+
self.C = (np.array(['ABCDEFGHIJKLMNOPQRSTUVWXYZ',
|
| 225 |
+
'01234567890123456789012345'])
|
| 226 |
+
.view(np.char.chararray))
|
| 227 |
+
|
| 228 |
+
def test_len(self):
|
| 229 |
+
assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
|
| 230 |
+
assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
|
| 231 |
+
assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
|
| 232 |
+
|
| 233 |
+
def test_count(self):
|
| 234 |
+
assert_(issubclass(self.A.count('').dtype.type, np.integer))
|
| 235 |
+
assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
|
| 236 |
+
assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
|
| 237 |
+
# Python doesn't seem to like counting NULL characters
|
| 238 |
+
# assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
| 239 |
+
assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
|
| 240 |
+
assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
|
| 241 |
+
assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
|
| 242 |
+
# assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
|
| 243 |
+
|
| 244 |
+
def test_endswith(self):
|
| 245 |
+
assert_(issubclass(self.A.endswith('').dtype.type, np.bool))
|
| 246 |
+
assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
|
| 247 |
+
assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
| 248 |
+
|
| 249 |
+
def fail():
|
| 250 |
+
self.A.endswith('3', 'fdjk')
|
| 251 |
+
|
| 252 |
+
assert_raises(TypeError, fail)
|
| 253 |
+
|
| 254 |
+
@pytest.mark.parametrize(
|
| 255 |
+
"dtype, encode",
|
| 256 |
+
[("U", str),
|
| 257 |
+
("S", lambda x: x.encode('ascii')),
|
| 258 |
+
])
|
| 259 |
+
def test_find(self, dtype, encode):
|
| 260 |
+
A = self.A.astype(dtype)
|
| 261 |
+
assert_(issubclass(A.find(encode('a')).dtype.type, np.integer))
|
| 262 |
+
assert_array_equal(A.find(encode('a')),
|
| 263 |
+
[[1, -1], [-1, 6], [-1, -1]])
|
| 264 |
+
assert_array_equal(A.find(encode('3')),
|
| 265 |
+
[[-1, -1], [2, -1], [2, -1]])
|
| 266 |
+
assert_array_equal(A.find(encode('a'), 0, 2),
|
| 267 |
+
[[1, -1], [-1, -1], [-1, -1]])
|
| 268 |
+
assert_array_equal(A.find([encode('1'), encode('P')]),
|
| 269 |
+
[[-1, -1], [0, -1], [0, 1]])
|
| 270 |
+
C = self.C.astype(dtype)
|
| 271 |
+
assert_array_equal(C.find(encode('M')), [12, -1])
|
| 272 |
+
|
| 273 |
+
def test_index(self):
|
| 274 |
+
|
| 275 |
+
def fail():
|
| 276 |
+
self.A.index('a')
|
| 277 |
+
|
| 278 |
+
assert_raises(ValueError, fail)
|
| 279 |
+
assert_(np.char.index('abcba', 'b') == 1)
|
| 280 |
+
assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
|
| 281 |
+
|
| 282 |
+
def test_isalnum(self):
|
| 283 |
+
assert_(issubclass(self.A.isalnum().dtype.type, np.bool))
|
| 284 |
+
assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
|
| 285 |
+
|
| 286 |
+
def test_isalpha(self):
|
| 287 |
+
assert_(issubclass(self.A.isalpha().dtype.type, np.bool))
|
| 288 |
+
assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
|
| 289 |
+
|
| 290 |
+
def test_isdigit(self):
|
| 291 |
+
assert_(issubclass(self.A.isdigit().dtype.type, np.bool))
|
| 292 |
+
assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
|
| 293 |
+
|
| 294 |
+
def test_islower(self):
|
| 295 |
+
assert_(issubclass(self.A.islower().dtype.type, np.bool))
|
| 296 |
+
assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
|
| 297 |
+
|
| 298 |
+
def test_isspace(self):
|
| 299 |
+
assert_(issubclass(self.A.isspace().dtype.type, np.bool))
|
| 300 |
+
assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
|
| 301 |
+
|
| 302 |
+
def test_istitle(self):
|
| 303 |
+
assert_(issubclass(self.A.istitle().dtype.type, np.bool))
|
| 304 |
+
assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
|
| 305 |
+
|
| 306 |
+
def test_isupper(self):
|
| 307 |
+
assert_(issubclass(self.A.isupper().dtype.type, np.bool))
|
| 308 |
+
assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
|
| 309 |
+
|
| 310 |
+
def test_rfind(self):
|
| 311 |
+
assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
|
| 312 |
+
assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
|
| 313 |
+
assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
|
| 314 |
+
assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
|
| 315 |
+
assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
|
| 316 |
+
|
| 317 |
+
def test_rindex(self):
|
| 318 |
+
|
| 319 |
+
def fail():
|
| 320 |
+
self.A.rindex('a')
|
| 321 |
+
|
| 322 |
+
assert_raises(ValueError, fail)
|
| 323 |
+
assert_(np.char.rindex('abcba', 'b') == 3)
|
| 324 |
+
assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
|
| 325 |
+
|
| 326 |
+
def test_startswith(self):
|
| 327 |
+
assert_(issubclass(self.A.startswith('').dtype.type, np.bool))
|
| 328 |
+
assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
|
| 329 |
+
assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
|
| 330 |
+
|
| 331 |
+
def fail():
|
| 332 |
+
self.A.startswith('3', 'fdjk')
|
| 333 |
+
|
| 334 |
+
assert_raises(TypeError, fail)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
class TestMethods:
|
| 338 |
+
def setup_method(self):
|
| 339 |
+
self.A = np.array([[' abc ', ''],
|
| 340 |
+
['12345', 'MixedCase'],
|
| 341 |
+
['123 \t 345 \0 ', 'UPPER']],
|
| 342 |
+
dtype='S').view(np.char.chararray)
|
| 343 |
+
self.B = np.array([[' \u03a3 ', ''],
|
| 344 |
+
['12345', 'MixedCase'],
|
| 345 |
+
['123 \t 345 \0 ', 'UPPER']]).view(
|
| 346 |
+
np.char.chararray)
|
| 347 |
+
|
| 348 |
+
def test_capitalize(self):
|
| 349 |
+
tgt = [[b' abc ', b''],
|
| 350 |
+
[b'12345', b'Mixedcase'],
|
| 351 |
+
[b'123 \t 345 \0 ', b'Upper']]
|
| 352 |
+
assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_))
|
| 353 |
+
assert_array_equal(self.A.capitalize(), tgt)
|
| 354 |
+
|
| 355 |
+
tgt = [[' \u03c3 ', ''],
|
| 356 |
+
['12345', 'Mixedcase'],
|
| 357 |
+
['123 \t 345 \0 ', 'Upper']]
|
| 358 |
+
assert_(issubclass(self.B.capitalize().dtype.type, np.str_))
|
| 359 |
+
assert_array_equal(self.B.capitalize(), tgt)
|
| 360 |
+
|
| 361 |
+
def test_center(self):
|
| 362 |
+
assert_(issubclass(self.A.center(10).dtype.type, np.bytes_))
|
| 363 |
+
C = self.A.center([10, 20])
|
| 364 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
| 365 |
+
|
| 366 |
+
C = self.A.center(20, b'#')
|
| 367 |
+
assert_(np.all(C.startswith(b'#')))
|
| 368 |
+
assert_(np.all(C.endswith(b'#')))
|
| 369 |
+
|
| 370 |
+
C = np.char.center(b'FOO', [[10, 20], [15, 8]])
|
| 371 |
+
tgt = [[b' FOO ', b' FOO '],
|
| 372 |
+
[b' FOO ', b' FOO ']]
|
| 373 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
| 374 |
+
assert_array_equal(C, tgt)
|
| 375 |
+
|
| 376 |
+
def test_decode(self):
|
| 377 |
+
A = np.char.array([b'\\u03a3'])
|
| 378 |
+
assert_(A.decode('unicode-escape')[0] == '\u03a3')
|
| 379 |
+
|
| 380 |
+
def test_encode(self):
|
| 381 |
+
B = self.B.encode('unicode_escape')
|
| 382 |
+
assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
|
| 383 |
+
|
| 384 |
+
def test_expandtabs(self):
|
| 385 |
+
T = self.A.expandtabs()
|
| 386 |
+
assert_(T[2, 0] == b'123 345 \0')
|
| 387 |
+
|
| 388 |
+
def test_join(self):
|
| 389 |
+
# NOTE: list(b'123') == [49, 50, 51]
|
| 390 |
+
# so that b','.join(b'123') results to an error on Py3
|
| 391 |
+
A0 = self.A.decode('ascii')
|
| 392 |
+
|
| 393 |
+
A = np.char.join([',', '#'], A0)
|
| 394 |
+
assert_(issubclass(A.dtype.type, np.str_))
|
| 395 |
+
tgt = np.array([[' ,a,b,c, ', ''],
|
| 396 |
+
['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
|
| 397 |
+
['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
|
| 398 |
+
assert_array_equal(np.char.join([',', '#'], A0), tgt)
|
| 399 |
+
|
| 400 |
+
def test_ljust(self):
|
| 401 |
+
assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_))
|
| 402 |
+
|
| 403 |
+
C = self.A.ljust([10, 20])
|
| 404 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
| 405 |
+
|
| 406 |
+
C = self.A.ljust(20, b'#')
|
| 407 |
+
assert_array_equal(C.startswith(b'#'), [
|
| 408 |
+
[False, True], [False, False], [False, False]])
|
| 409 |
+
assert_(np.all(C.endswith(b'#')))
|
| 410 |
+
|
| 411 |
+
C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
|
| 412 |
+
tgt = [[b'FOO ', b'FOO '],
|
| 413 |
+
[b'FOO ', b'FOO ']]
|
| 414 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
| 415 |
+
assert_array_equal(C, tgt)
|
| 416 |
+
|
| 417 |
+
def test_lower(self):
|
| 418 |
+
tgt = [[b' abc ', b''],
|
| 419 |
+
[b'12345', b'mixedcase'],
|
| 420 |
+
[b'123 \t 345 \0 ', b'upper']]
|
| 421 |
+
assert_(issubclass(self.A.lower().dtype.type, np.bytes_))
|
| 422 |
+
assert_array_equal(self.A.lower(), tgt)
|
| 423 |
+
|
| 424 |
+
tgt = [[' \u03c3 ', ''],
|
| 425 |
+
['12345', 'mixedcase'],
|
| 426 |
+
['123 \t 345 \0 ', 'upper']]
|
| 427 |
+
assert_(issubclass(self.B.lower().dtype.type, np.str_))
|
| 428 |
+
assert_array_equal(self.B.lower(), tgt)
|
| 429 |
+
|
| 430 |
+
def test_lstrip(self):
|
| 431 |
+
tgt = [[b'abc ', b''],
|
| 432 |
+
[b'12345', b'MixedCase'],
|
| 433 |
+
[b'123 \t 345 \0 ', b'UPPER']]
|
| 434 |
+
assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_))
|
| 435 |
+
assert_array_equal(self.A.lstrip(), tgt)
|
| 436 |
+
|
| 437 |
+
tgt = [[b' abc', b''],
|
| 438 |
+
[b'2345', b'ixedCase'],
|
| 439 |
+
[b'23 \t 345 \x00', b'UPPER']]
|
| 440 |
+
assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
|
| 441 |
+
|
| 442 |
+
tgt = [['\u03a3 ', ''],
|
| 443 |
+
['12345', 'MixedCase'],
|
| 444 |
+
['123 \t 345 \0 ', 'UPPER']]
|
| 445 |
+
assert_(issubclass(self.B.lstrip().dtype.type, np.str_))
|
| 446 |
+
assert_array_equal(self.B.lstrip(), tgt)
|
| 447 |
+
|
| 448 |
+
def test_partition(self):
|
| 449 |
+
P = self.A.partition([b'3', b'M'])
|
| 450 |
+
tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
|
| 451 |
+
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
| 452 |
+
[(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
|
| 453 |
+
assert_(issubclass(P.dtype.type, np.bytes_))
|
| 454 |
+
assert_array_equal(P, tgt)
|
| 455 |
+
|
| 456 |
+
def test_replace(self):
|
| 457 |
+
R = self.A.replace([b'3', b'a'],
|
| 458 |
+
[b'##########', b'@'])
|
| 459 |
+
tgt = [[b' abc ', b''],
|
| 460 |
+
[b'12##########45', b'MixedC@se'],
|
| 461 |
+
[b'12########## \t ##########45 \x00 ', b'UPPER']]
|
| 462 |
+
assert_(issubclass(R.dtype.type, np.bytes_))
|
| 463 |
+
assert_array_equal(R, tgt)
|
| 464 |
+
# Test special cases that should just return the input array,
|
| 465 |
+
# since replacements are not possible or do nothing.
|
| 466 |
+
S1 = self.A.replace(b'A very long byte string, longer than A', b'')
|
| 467 |
+
assert_array_equal(S1, self.A)
|
| 468 |
+
S2 = self.A.replace(b'', b'')
|
| 469 |
+
assert_array_equal(S2, self.A)
|
| 470 |
+
S3 = self.A.replace(b'3', b'3')
|
| 471 |
+
assert_array_equal(S3, self.A)
|
| 472 |
+
S4 = self.A.replace(b'3', b'', count=0)
|
| 473 |
+
assert_array_equal(S4, self.A)
|
| 474 |
+
|
| 475 |
+
def test_replace_count_and_size(self):
|
| 476 |
+
a = np.array(['0123456789' * i for i in range(4)]
|
| 477 |
+
).view(np.char.chararray)
|
| 478 |
+
r1 = a.replace('5', 'ABCDE')
|
| 479 |
+
assert r1.dtype.itemsize == (3*10 + 3*4) * 4
|
| 480 |
+
assert_array_equal(r1, np.array(['01234ABCDE6789' * i
|
| 481 |
+
for i in range(4)]))
|
| 482 |
+
r2 = a.replace('5', 'ABCDE', count=1)
|
| 483 |
+
assert r2.dtype.itemsize == (3*10 + 4) * 4
|
| 484 |
+
r3 = a.replace('5', 'ABCDE', count=0)
|
| 485 |
+
assert r3.dtype.itemsize == a.dtype.itemsize
|
| 486 |
+
assert_array_equal(r3, a)
|
| 487 |
+
# Negative values mean to replace all.
|
| 488 |
+
r4 = a.replace('5', 'ABCDE', count=-1)
|
| 489 |
+
assert r4.dtype.itemsize == (3*10 + 3*4) * 4
|
| 490 |
+
assert_array_equal(r4, r1)
|
| 491 |
+
# We can do count on an element-by-element basis.
|
| 492 |
+
r5 = a.replace('5', 'ABCDE', count=[-1, -1, -1, 1])
|
| 493 |
+
assert r5.dtype.itemsize == (3*10 + 4) * 4
|
| 494 |
+
assert_array_equal(r5, np.array(
|
| 495 |
+
['01234ABCDE6789' * i for i in range(3)]
|
| 496 |
+
+ ['01234ABCDE6789' + '0123456789' * 2]))
|
| 497 |
+
|
| 498 |
+
def test_replace_broadcasting(self):
|
| 499 |
+
a = np.array('0,0,0').view(np.char.chararray)
|
| 500 |
+
r1 = a.replace('0', '1', count=np.arange(3))
|
| 501 |
+
assert r1.dtype == a.dtype
|
| 502 |
+
assert_array_equal(r1, np.array(['0,0,0', '1,0,0', '1,1,0']))
|
| 503 |
+
r2 = a.replace('0', [['1'], ['2']], count=np.arange(1, 4))
|
| 504 |
+
assert_array_equal(r2, np.array([['1,0,0', '1,1,0', '1,1,1'],
|
| 505 |
+
['2,0,0', '2,2,0', '2,2,2']]))
|
| 506 |
+
r3 = a.replace(['0', '0,0', '0,0,0'], 'X')
|
| 507 |
+
assert_array_equal(r3, np.array(['X,X,X', 'X,0', 'X']))
|
| 508 |
+
|
| 509 |
+
def test_rjust(self):
|
| 510 |
+
assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_))
|
| 511 |
+
|
| 512 |
+
C = self.A.rjust([10, 20])
|
| 513 |
+
assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
|
| 514 |
+
|
| 515 |
+
C = self.A.rjust(20, b'#')
|
| 516 |
+
assert_(np.all(C.startswith(b'#')))
|
| 517 |
+
assert_array_equal(C.endswith(b'#'),
|
| 518 |
+
[[False, True], [False, False], [False, False]])
|
| 519 |
+
|
| 520 |
+
C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
|
| 521 |
+
tgt = [[b' FOO', b' FOO'],
|
| 522 |
+
[b' FOO', b' FOO']]
|
| 523 |
+
assert_(issubclass(C.dtype.type, np.bytes_))
|
| 524 |
+
assert_array_equal(C, tgt)
|
| 525 |
+
|
| 526 |
+
def test_rpartition(self):
|
| 527 |
+
P = self.A.rpartition([b'3', b'M'])
|
| 528 |
+
tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
|
| 529 |
+
[(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
|
| 530 |
+
[(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
|
| 531 |
+
assert_(issubclass(P.dtype.type, np.bytes_))
|
| 532 |
+
assert_array_equal(P, tgt)
|
| 533 |
+
|
| 534 |
+
def test_rsplit(self):
|
| 535 |
+
A = self.A.rsplit(b'3')
|
| 536 |
+
tgt = [[[b' abc '], [b'']],
|
| 537 |
+
[[b'12', b'45'], [b'MixedCase']],
|
| 538 |
+
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
| 539 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
| 540 |
+
assert_equal(A.tolist(), tgt)
|
| 541 |
+
|
| 542 |
+
def test_rstrip(self):
|
| 543 |
+
assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_))
|
| 544 |
+
|
| 545 |
+
tgt = [[b' abc', b''],
|
| 546 |
+
[b'12345', b'MixedCase'],
|
| 547 |
+
[b'123 \t 345', b'UPPER']]
|
| 548 |
+
assert_array_equal(self.A.rstrip(), tgt)
|
| 549 |
+
|
| 550 |
+
tgt = [[b' abc ', b''],
|
| 551 |
+
[b'1234', b'MixedCase'],
|
| 552 |
+
[b'123 \t 345 \x00', b'UPP']
|
| 553 |
+
]
|
| 554 |
+
assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
|
| 555 |
+
|
| 556 |
+
tgt = [[' \u03a3', ''],
|
| 557 |
+
['12345', 'MixedCase'],
|
| 558 |
+
['123 \t 345', 'UPPER']]
|
| 559 |
+
assert_(issubclass(self.B.rstrip().dtype.type, np.str_))
|
| 560 |
+
assert_array_equal(self.B.rstrip(), tgt)
|
| 561 |
+
|
| 562 |
+
def test_strip(self):
|
| 563 |
+
tgt = [[b'abc', b''],
|
| 564 |
+
[b'12345', b'MixedCase'],
|
| 565 |
+
[b'123 \t 345', b'UPPER']]
|
| 566 |
+
assert_(issubclass(self.A.strip().dtype.type, np.bytes_))
|
| 567 |
+
assert_array_equal(self.A.strip(), tgt)
|
| 568 |
+
|
| 569 |
+
tgt = [[b' abc ', b''],
|
| 570 |
+
[b'234', b'ixedCas'],
|
| 571 |
+
[b'23 \t 345 \x00', b'UPP']]
|
| 572 |
+
assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
|
| 573 |
+
|
| 574 |
+
tgt = [['\u03a3', ''],
|
| 575 |
+
['12345', 'MixedCase'],
|
| 576 |
+
['123 \t 345', 'UPPER']]
|
| 577 |
+
assert_(issubclass(self.B.strip().dtype.type, np.str_))
|
| 578 |
+
assert_array_equal(self.B.strip(), tgt)
|
| 579 |
+
|
| 580 |
+
def test_split(self):
|
| 581 |
+
A = self.A.split(b'3')
|
| 582 |
+
tgt = [
|
| 583 |
+
[[b' abc '], [b'']],
|
| 584 |
+
[[b'12', b'45'], [b'MixedCase']],
|
| 585 |
+
[[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
|
| 586 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
| 587 |
+
assert_equal(A.tolist(), tgt)
|
| 588 |
+
|
| 589 |
+
def test_splitlines(self):
|
| 590 |
+
A = np.char.array(['abc\nfds\nwer']).splitlines()
|
| 591 |
+
assert_(issubclass(A.dtype.type, np.object_))
|
| 592 |
+
assert_(A.shape == (1,))
|
| 593 |
+
assert_(len(A[0]) == 3)
|
| 594 |
+
|
| 595 |
+
def test_swapcase(self):
|
| 596 |
+
tgt = [[b' ABC ', b''],
|
| 597 |
+
[b'12345', b'mIXEDcASE'],
|
| 598 |
+
[b'123 \t 345 \0 ', b'upper']]
|
| 599 |
+
assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_))
|
| 600 |
+
assert_array_equal(self.A.swapcase(), tgt)
|
| 601 |
+
|
| 602 |
+
tgt = [[' \u03c3 ', ''],
|
| 603 |
+
['12345', 'mIXEDcASE'],
|
| 604 |
+
['123 \t 345 \0 ', 'upper']]
|
| 605 |
+
assert_(issubclass(self.B.swapcase().dtype.type, np.str_))
|
| 606 |
+
assert_array_equal(self.B.swapcase(), tgt)
|
| 607 |
+
|
| 608 |
+
def test_title(self):
|
| 609 |
+
tgt = [[b' Abc ', b''],
|
| 610 |
+
[b'12345', b'Mixedcase'],
|
| 611 |
+
[b'123 \t 345 \0 ', b'Upper']]
|
| 612 |
+
assert_(issubclass(self.A.title().dtype.type, np.bytes_))
|
| 613 |
+
assert_array_equal(self.A.title(), tgt)
|
| 614 |
+
|
| 615 |
+
tgt = [[' \u03a3 ', ''],
|
| 616 |
+
['12345', 'Mixedcase'],
|
| 617 |
+
['123 \t 345 \0 ', 'Upper']]
|
| 618 |
+
assert_(issubclass(self.B.title().dtype.type, np.str_))
|
| 619 |
+
assert_array_equal(self.B.title(), tgt)
|
| 620 |
+
|
| 621 |
+
def test_upper(self):
|
| 622 |
+
tgt = [[b' ABC ', b''],
|
| 623 |
+
[b'12345', b'MIXEDCASE'],
|
| 624 |
+
[b'123 \t 345 \0 ', b'UPPER']]
|
| 625 |
+
assert_(issubclass(self.A.upper().dtype.type, np.bytes_))
|
| 626 |
+
assert_array_equal(self.A.upper(), tgt)
|
| 627 |
+
|
| 628 |
+
tgt = [[' \u03a3 ', ''],
|
| 629 |
+
['12345', 'MIXEDCASE'],
|
| 630 |
+
['123 \t 345 \0 ', 'UPPER']]
|
| 631 |
+
assert_(issubclass(self.B.upper().dtype.type, np.str_))
|
| 632 |
+
assert_array_equal(self.B.upper(), tgt)
|
| 633 |
+
|
| 634 |
+
def test_isnumeric(self):
|
| 635 |
+
|
| 636 |
+
def fail():
|
| 637 |
+
self.A.isnumeric()
|
| 638 |
+
|
| 639 |
+
assert_raises(TypeError, fail)
|
| 640 |
+
assert_(issubclass(self.B.isnumeric().dtype.type, np.bool))
|
| 641 |
+
assert_array_equal(self.B.isnumeric(), [
|
| 642 |
+
[False, False], [True, False], [False, False]])
|
| 643 |
+
|
| 644 |
+
def test_isdecimal(self):
|
| 645 |
+
|
| 646 |
+
def fail():
|
| 647 |
+
self.A.isdecimal()
|
| 648 |
+
|
| 649 |
+
assert_raises(TypeError, fail)
|
| 650 |
+
assert_(issubclass(self.B.isdecimal().dtype.type, np.bool))
|
| 651 |
+
assert_array_equal(self.B.isdecimal(), [
|
| 652 |
+
[False, False], [True, False], [False, False]])
|
| 653 |
+
|
| 654 |
+
|
| 655 |
+
class TestOperations:
|
| 656 |
+
def setup_method(self):
|
| 657 |
+
self.A = np.array([['abc', '123'],
|
| 658 |
+
['789', 'xyz']]).view(np.char.chararray)
|
| 659 |
+
self.B = np.array([['efg', '456'],
|
| 660 |
+
['051', 'tuv']]).view(np.char.chararray)
|
| 661 |
+
|
| 662 |
+
def test_add(self):
|
| 663 |
+
AB = np.array([['abcefg', '123456'],
|
| 664 |
+
['789051', 'xyztuv']]).view(np.char.chararray)
|
| 665 |
+
assert_array_equal(AB, (self.A + self.B))
|
| 666 |
+
assert_(len((self.A + self.B)[0][0]) == 6)
|
| 667 |
+
|
| 668 |
+
def test_radd(self):
|
| 669 |
+
QA = np.array([['qabc', 'q123'],
|
| 670 |
+
['q789', 'qxyz']]).view(np.char.chararray)
|
| 671 |
+
assert_array_equal(QA, ('q' + self.A))
|
| 672 |
+
|
| 673 |
+
def test_mul(self):
|
| 674 |
+
A = self.A
|
| 675 |
+
for r in (2, 3, 5, 7, 197):
|
| 676 |
+
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
| 677 |
+
[A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray)
|
| 678 |
+
|
| 679 |
+
assert_array_equal(Ar, (self.A * r))
|
| 680 |
+
|
| 681 |
+
for ob in [object(), 'qrs']:
|
| 682 |
+
with assert_raises_regex(ValueError,
|
| 683 |
+
'Can only multiply by integers'):
|
| 684 |
+
A*ob
|
| 685 |
+
|
| 686 |
+
def test_rmul(self):
|
| 687 |
+
A = self.A
|
| 688 |
+
for r in (2, 3, 5, 7, 197):
|
| 689 |
+
Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
|
| 690 |
+
[A[1, 0]*r, A[1, 1]*r]]).view(np.char.chararray)
|
| 691 |
+
assert_array_equal(Ar, (r * self.A))
|
| 692 |
+
|
| 693 |
+
for ob in [object(), 'qrs']:
|
| 694 |
+
with assert_raises_regex(ValueError,
|
| 695 |
+
'Can only multiply by integers'):
|
| 696 |
+
ob * A
|
| 697 |
+
|
| 698 |
+
def test_mod(self):
|
| 699 |
+
"""Ticket #856"""
|
| 700 |
+
F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.char.chararray)
|
| 701 |
+
C = np.array([[3, 7], [19, 1]], dtype=np.int64)
|
| 702 |
+
FC = np.array([['3', '7.000000'],
|
| 703 |
+
['19', 'np.int64(1)']]).view(np.char.chararray)
|
| 704 |
+
assert_array_equal(FC, F % C)
|
| 705 |
+
|
| 706 |
+
A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.char.chararray)
|
| 707 |
+
A1 = np.array([['1.000', '1'],
|
| 708 |
+
['1', repr(np.array(1)[()])]]).view(np.char.chararray)
|
| 709 |
+
assert_array_equal(A1, (A % 1))
|
| 710 |
+
|
| 711 |
+
A2 = np.array([['1.000', '2'],
|
| 712 |
+
['3', repr(np.array(4)[()])]]).view(np.char.chararray)
|
| 713 |
+
assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
|
| 714 |
+
|
| 715 |
+
def test_rmod(self):
|
| 716 |
+
assert_(("%s" % self.A) == str(self.A))
|
| 717 |
+
assert_(("%r" % self.A) == repr(self.A))
|
| 718 |
+
|
| 719 |
+
for ob in [42, object()]:
|
| 720 |
+
with assert_raises_regex(
|
| 721 |
+
TypeError, "unsupported operand type.* and 'chararray'"):
|
| 722 |
+
ob % self.A
|
| 723 |
+
|
| 724 |
+
def test_slice(self):
|
| 725 |
+
"""Regression test for https://github.com/numpy/numpy/issues/5982"""
|
| 726 |
+
|
| 727 |
+
arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
|
| 728 |
+
dtype='S4').view(np.char.chararray)
|
| 729 |
+
sl1 = arr[:]
|
| 730 |
+
assert_array_equal(sl1, arr)
|
| 731 |
+
assert_(sl1.base is arr)
|
| 732 |
+
assert_(sl1.base.base is arr.base)
|
| 733 |
+
|
| 734 |
+
sl2 = arr[:, :]
|
| 735 |
+
assert_array_equal(sl2, arr)
|
| 736 |
+
assert_(sl2.base is arr)
|
| 737 |
+
assert_(sl2.base.base is arr.base)
|
| 738 |
+
|
| 739 |
+
assert_(arr[0, 0] == b'abc')
|
| 740 |
+
|
| 741 |
+
@pytest.mark.parametrize('data', [['plate', ' ', 'shrimp'],
|
| 742 |
+
[b'retro', b' ', b'encabulator']])
|
| 743 |
+
def test_getitem_length_zero_item(self, data):
|
| 744 |
+
# Regression test for gh-26375.
|
| 745 |
+
a = np.char.array(data)
|
| 746 |
+
# a.dtype.type() will be an empty string or bytes instance.
|
| 747 |
+
# The equality test will fail if a[1] has the wrong type
|
| 748 |
+
# or does not have length 0.
|
| 749 |
+
assert_equal(a[1], a.dtype.type())
|
| 750 |
+
|
| 751 |
+
|
| 752 |
+
class TestMethodsEmptyArray:
|
| 753 |
+
def setup_method(self):
|
| 754 |
+
self.U = np.array([], dtype='U')
|
| 755 |
+
self.S = np.array([], dtype='S')
|
| 756 |
+
|
| 757 |
+
def test_encode(self):
|
| 758 |
+
res = np.char.encode(self.U)
|
| 759 |
+
assert_array_equal(res, [])
|
| 760 |
+
assert_(res.dtype.char == 'S')
|
| 761 |
+
|
| 762 |
+
def test_decode(self):
|
| 763 |
+
res = np.char.decode(self.S)
|
| 764 |
+
assert_array_equal(res, [])
|
| 765 |
+
assert_(res.dtype.char == 'U')
|
| 766 |
+
|
| 767 |
+
def test_decode_with_reshape(self):
|
| 768 |
+
res = np.char.decode(self.S.reshape((1, 0, 1)))
|
| 769 |
+
assert_(res.shape == (1, 0, 1))
|
| 770 |
+
|
| 771 |
+
|
| 772 |
+
class TestMethodsScalarValues:
|
| 773 |
+
def test_mod(self):
|
| 774 |
+
A = np.array([[' abc ', ''],
|
| 775 |
+
['12345', 'MixedCase'],
|
| 776 |
+
['123 \t 345 \0 ', 'UPPER']], dtype='S')
|
| 777 |
+
tgt = [[b'123 abc ', b'123'],
|
| 778 |
+
[b'12312345', b'123MixedCase'],
|
| 779 |
+
[b'123123 \t 345 \0 ', b'123UPPER']]
|
| 780 |
+
assert_array_equal(np.char.mod(b"123%s", A), tgt)
|
| 781 |
+
|
| 782 |
+
def test_decode(self):
|
| 783 |
+
bytestring = b'\x81\xc1\x81\xc1\x81\xc1'
|
| 784 |
+
assert_equal(np.char.decode(bytestring, encoding='cp037'),
|
| 785 |
+
'aAaAaA')
|
| 786 |
+
|
| 787 |
+
def test_encode(self):
|
| 788 |
+
unicode = 'aAaAaA'
|
| 789 |
+
assert_equal(np.char.encode(unicode, encoding='cp037'),
|
| 790 |
+
b'\x81\xc1\x81\xc1\x81\xc1')
|
| 791 |
+
|
| 792 |
+
def test_expandtabs(self):
|
| 793 |
+
s = "\tone level of indentation\n\t\ttwo levels of indentation"
|
| 794 |
+
assert_equal(
|
| 795 |
+
np.char.expandtabs(s, tabsize=2),
|
| 796 |
+
" one level of indentation\n two levels of indentation"
|
| 797 |
+
)
|
| 798 |
+
|
| 799 |
+
def test_join(self):
|
| 800 |
+
seps = np.array(['-', '_'])
|
| 801 |
+
assert_array_equal(np.char.join(seps, 'hello'),
|
| 802 |
+
['h-e-l-l-o', 'h_e_l_l_o'])
|
| 803 |
+
|
| 804 |
+
def test_partition(self):
|
| 805 |
+
assert_equal(np.char.partition('This string', ' '),
|
| 806 |
+
['This', ' ', 'string'])
|
| 807 |
+
|
| 808 |
+
def test_rpartition(self):
|
| 809 |
+
assert_equal(np.char.rpartition('This string here', ' '),
|
| 810 |
+
['This string', ' ', 'here'])
|
| 811 |
+
|
| 812 |
+
def test_replace(self):
|
| 813 |
+
assert_equal(np.char.replace('Python is good', 'good', 'great'),
|
| 814 |
+
'Python is great')
|
| 815 |
+
|
| 816 |
+
|
| 817 |
+
def test_empty_indexing():
|
| 818 |
+
"""Regression test for ticket 1948."""
|
| 819 |
+
# Check that indexing a chararray with an empty list/array returns an
|
| 820 |
+
# empty chararray instead of a chararray with a single empty string in it.
|
| 821 |
+
s = np.char.chararray((4,))
|
| 822 |
+
assert_(s[[]].size == 0)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_dtype.py
ADDED
|
@@ -0,0 +1,1963 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import operator
|
| 3 |
+
import pytest
|
| 4 |
+
import ctypes
|
| 5 |
+
import gc
|
| 6 |
+
import types
|
| 7 |
+
from typing import Any
|
| 8 |
+
import pickle
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
import numpy.dtypes
|
| 12 |
+
from numpy._core._rational_tests import rational
|
| 13 |
+
from numpy._core._multiarray_tests import create_custom_field_dtype
|
| 14 |
+
from numpy.testing import (
|
| 15 |
+
assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
|
| 16 |
+
IS_PYSTON)
|
| 17 |
+
from itertools import permutations
|
| 18 |
+
import random
|
| 19 |
+
|
| 20 |
+
import hypothesis
|
| 21 |
+
from hypothesis.extra import numpy as hynp
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def assert_dtype_equal(a, b):
|
| 26 |
+
assert_equal(a, b)
|
| 27 |
+
assert_equal(hash(a), hash(b),
|
| 28 |
+
"two equivalent types do not hash to the same value !")
|
| 29 |
+
|
| 30 |
+
def assert_dtype_not_equal(a, b):
|
| 31 |
+
assert_(a != b)
|
| 32 |
+
assert_(hash(a) != hash(b),
|
| 33 |
+
"two different types hash to the same value !")
|
| 34 |
+
|
| 35 |
+
class TestBuiltin:
|
| 36 |
+
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object])
|
| 37 |
+
def test_run(self, t):
|
| 38 |
+
"""Only test hash runs at all."""
|
| 39 |
+
dt = np.dtype(t)
|
| 40 |
+
hash(dt)
|
| 41 |
+
|
| 42 |
+
@pytest.mark.parametrize('t', [int, float])
|
| 43 |
+
def test_dtype(self, t):
|
| 44 |
+
# Make sure equivalent byte order char hash the same (e.g. < and = on
|
| 45 |
+
# little endian)
|
| 46 |
+
dt = np.dtype(t)
|
| 47 |
+
dt2 = dt.newbyteorder("<")
|
| 48 |
+
dt3 = dt.newbyteorder(">")
|
| 49 |
+
if dt == dt2:
|
| 50 |
+
assert_(dt.byteorder != dt2.byteorder, "bogus test")
|
| 51 |
+
assert_dtype_equal(dt, dt2)
|
| 52 |
+
else:
|
| 53 |
+
assert_(dt.byteorder != dt3.byteorder, "bogus test")
|
| 54 |
+
assert_dtype_equal(dt, dt3)
|
| 55 |
+
|
| 56 |
+
def test_equivalent_dtype_hashing(self):
|
| 57 |
+
# Make sure equivalent dtypes with different type num hash equal
|
| 58 |
+
uintp = np.dtype(np.uintp)
|
| 59 |
+
if uintp.itemsize == 4:
|
| 60 |
+
left = uintp
|
| 61 |
+
right = np.dtype(np.uint32)
|
| 62 |
+
else:
|
| 63 |
+
left = uintp
|
| 64 |
+
right = np.dtype(np.ulonglong)
|
| 65 |
+
assert_(left == right)
|
| 66 |
+
assert_(hash(left) == hash(right))
|
| 67 |
+
|
| 68 |
+
def test_invalid_types(self):
|
| 69 |
+
# Make sure invalid type strings raise an error
|
| 70 |
+
|
| 71 |
+
assert_raises(TypeError, np.dtype, 'O3')
|
| 72 |
+
assert_raises(TypeError, np.dtype, 'O5')
|
| 73 |
+
assert_raises(TypeError, np.dtype, 'O7')
|
| 74 |
+
assert_raises(TypeError, np.dtype, 'b3')
|
| 75 |
+
assert_raises(TypeError, np.dtype, 'h4')
|
| 76 |
+
assert_raises(TypeError, np.dtype, 'I5')
|
| 77 |
+
assert_raises(TypeError, np.dtype, 'e3')
|
| 78 |
+
assert_raises(TypeError, np.dtype, 'f5')
|
| 79 |
+
|
| 80 |
+
if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
|
| 81 |
+
assert_raises(TypeError, np.dtype, 'g12')
|
| 82 |
+
elif np.dtype('g').itemsize == 12:
|
| 83 |
+
assert_raises(TypeError, np.dtype, 'g16')
|
| 84 |
+
|
| 85 |
+
if np.dtype('l').itemsize == 8:
|
| 86 |
+
assert_raises(TypeError, np.dtype, 'l4')
|
| 87 |
+
assert_raises(TypeError, np.dtype, 'L4')
|
| 88 |
+
else:
|
| 89 |
+
assert_raises(TypeError, np.dtype, 'l8')
|
| 90 |
+
assert_raises(TypeError, np.dtype, 'L8')
|
| 91 |
+
|
| 92 |
+
if np.dtype('q').itemsize == 8:
|
| 93 |
+
assert_raises(TypeError, np.dtype, 'q4')
|
| 94 |
+
assert_raises(TypeError, np.dtype, 'Q4')
|
| 95 |
+
else:
|
| 96 |
+
assert_raises(TypeError, np.dtype, 'q8')
|
| 97 |
+
assert_raises(TypeError, np.dtype, 'Q8')
|
| 98 |
+
|
| 99 |
+
# Make sure negative-sized dtype raises an error
|
| 100 |
+
assert_raises(TypeError, np.dtype, 'S-1')
|
| 101 |
+
assert_raises(TypeError, np.dtype, 'U-1')
|
| 102 |
+
assert_raises(TypeError, np.dtype, 'V-1')
|
| 103 |
+
|
| 104 |
+
def test_richcompare_invalid_dtype_equality(self):
|
| 105 |
+
# Make sure objects that cannot be converted to valid
|
| 106 |
+
# dtypes results in False/True when compared to valid dtypes.
|
| 107 |
+
# Here 7 cannot be converted to dtype. No exceptions should be raised
|
| 108 |
+
|
| 109 |
+
assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
|
| 110 |
+
assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
|
| 111 |
+
|
| 112 |
+
@pytest.mark.parametrize(
|
| 113 |
+
'operation',
|
| 114 |
+
[operator.le, operator.lt, operator.ge, operator.gt])
|
| 115 |
+
def test_richcompare_invalid_dtype_comparison(self, operation):
|
| 116 |
+
# Make sure TypeError is raised for comparison operators
|
| 117 |
+
# for invalid dtypes. Here 7 is an invalid dtype.
|
| 118 |
+
|
| 119 |
+
with pytest.raises(TypeError):
|
| 120 |
+
operation(np.dtype(np.int32), 7)
|
| 121 |
+
|
| 122 |
+
@pytest.mark.parametrize("dtype",
|
| 123 |
+
['Bool', 'Bytes0', 'Complex32', 'Complex64',
|
| 124 |
+
'Datetime64', 'Float16', 'Float32', 'Float64',
|
| 125 |
+
'Int8', 'Int16', 'Int32', 'Int64',
|
| 126 |
+
'Object0', 'Str0', 'Timedelta64',
|
| 127 |
+
'UInt8', 'UInt16', 'Uint32', 'UInt32',
|
| 128 |
+
'Uint64', 'UInt64', 'Void0',
|
| 129 |
+
"Float128", "Complex128"])
|
| 130 |
+
def test_numeric_style_types_are_invalid(self, dtype):
|
| 131 |
+
with assert_raises(TypeError):
|
| 132 |
+
np.dtype(dtype)
|
| 133 |
+
|
| 134 |
+
def test_expired_dtypes_with_bad_bytesize(self):
|
| 135 |
+
match: str = r".*removed in NumPy 2.0.*"
|
| 136 |
+
with pytest.raises(TypeError, match=match):
|
| 137 |
+
np.dtype("int0")
|
| 138 |
+
with pytest.raises(TypeError, match=match):
|
| 139 |
+
np.dtype("uint0")
|
| 140 |
+
with pytest.raises(TypeError, match=match):
|
| 141 |
+
np.dtype("bool8")
|
| 142 |
+
with pytest.raises(TypeError, match=match):
|
| 143 |
+
np.dtype("bytes0")
|
| 144 |
+
with pytest.raises(TypeError, match=match):
|
| 145 |
+
np.dtype("str0")
|
| 146 |
+
with pytest.raises(TypeError, match=match):
|
| 147 |
+
np.dtype("object0")
|
| 148 |
+
with pytest.raises(TypeError, match=match):
|
| 149 |
+
np.dtype("void0")
|
| 150 |
+
|
| 151 |
+
@pytest.mark.parametrize(
|
| 152 |
+
'value',
|
| 153 |
+
['m8', 'M8', 'datetime64', 'timedelta64',
|
| 154 |
+
'i4, (2,3)f8, f4', 'S3, 3u8, (3,4)S10',
|
| 155 |
+
'>f', '<f', '=f', '|f',
|
| 156 |
+
])
|
| 157 |
+
def test_dtype_bytes_str_equivalence(self, value):
|
| 158 |
+
bytes_value = value.encode('ascii')
|
| 159 |
+
from_bytes = np.dtype(bytes_value)
|
| 160 |
+
from_str = np.dtype(value)
|
| 161 |
+
assert_dtype_equal(from_bytes, from_str)
|
| 162 |
+
|
| 163 |
+
def test_dtype_from_bytes(self):
|
| 164 |
+
# Empty bytes object
|
| 165 |
+
assert_raises(TypeError, np.dtype, b'')
|
| 166 |
+
# Byte order indicator, but no type
|
| 167 |
+
assert_raises(TypeError, np.dtype, b'|')
|
| 168 |
+
|
| 169 |
+
# Single character with ordinal < NPY_NTYPES_LEGACY returns
|
| 170 |
+
# type by index into _builtin_descrs
|
| 171 |
+
assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
|
| 172 |
+
assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
|
| 173 |
+
|
| 174 |
+
# Single character where value is a valid type code
|
| 175 |
+
assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
|
| 176 |
+
|
| 177 |
+
# Bytes with non-ascii values raise errors
|
| 178 |
+
assert_raises(TypeError, np.dtype, b'\xff')
|
| 179 |
+
assert_raises(TypeError, np.dtype, b's\xff')
|
| 180 |
+
|
| 181 |
+
def test_bad_param(self):
|
| 182 |
+
# Can't give a size that's too small
|
| 183 |
+
assert_raises(ValueError, np.dtype,
|
| 184 |
+
{'names':['f0', 'f1'],
|
| 185 |
+
'formats':['i4', 'i1'],
|
| 186 |
+
'offsets':[0, 4],
|
| 187 |
+
'itemsize':4})
|
| 188 |
+
# If alignment is enabled, the alignment (4) must divide the itemsize
|
| 189 |
+
assert_raises(ValueError, np.dtype,
|
| 190 |
+
{'names':['f0', 'f1'],
|
| 191 |
+
'formats':['i4', 'i1'],
|
| 192 |
+
'offsets':[0, 4],
|
| 193 |
+
'itemsize':9}, align=True)
|
| 194 |
+
# If alignment is enabled, the individual fields must be aligned
|
| 195 |
+
assert_raises(ValueError, np.dtype,
|
| 196 |
+
{'names':['f0', 'f1'],
|
| 197 |
+
'formats':['i1', 'f4'],
|
| 198 |
+
'offsets':[0, 2]}, align=True)
|
| 199 |
+
|
| 200 |
+
def test_field_order_equality(self):
|
| 201 |
+
x = np.dtype({'names': ['A', 'B'],
|
| 202 |
+
'formats': ['i4', 'f4'],
|
| 203 |
+
'offsets': [0, 4]})
|
| 204 |
+
y = np.dtype({'names': ['B', 'A'],
|
| 205 |
+
'formats': ['i4', 'f4'],
|
| 206 |
+
'offsets': [4, 0]})
|
| 207 |
+
assert_equal(x == y, False)
|
| 208 |
+
# This is an safe cast (not equiv) due to the different names:
|
| 209 |
+
assert np.can_cast(x, y, casting="safe")
|
| 210 |
+
|
| 211 |
+
@pytest.mark.parametrize(
|
| 212 |
+
["type_char", "char_size", "scalar_type"],
|
| 213 |
+
[["U", 4, np.str_],
|
| 214 |
+
["S", 1, np.bytes_]])
|
| 215 |
+
def test_create_string_dtypes_directly(
|
| 216 |
+
self, type_char, char_size, scalar_type):
|
| 217 |
+
dtype_class = type(np.dtype(type_char))
|
| 218 |
+
|
| 219 |
+
dtype = dtype_class(8)
|
| 220 |
+
assert dtype.type is scalar_type
|
| 221 |
+
assert dtype.itemsize == 8*char_size
|
| 222 |
+
|
| 223 |
+
def test_create_invalid_string_errors(self):
|
| 224 |
+
one_too_big = np.iinfo(np.intc).max + 1
|
| 225 |
+
with pytest.raises(TypeError):
|
| 226 |
+
type(np.dtype("U"))(one_too_big // 4)
|
| 227 |
+
|
| 228 |
+
with pytest.raises(TypeError):
|
| 229 |
+
# Code coverage for very large numbers:
|
| 230 |
+
type(np.dtype("U"))(np.iinfo(np.intp).max // 4 + 1)
|
| 231 |
+
|
| 232 |
+
if one_too_big < sys.maxsize:
|
| 233 |
+
with pytest.raises(TypeError):
|
| 234 |
+
type(np.dtype("S"))(one_too_big)
|
| 235 |
+
|
| 236 |
+
with pytest.raises(ValueError):
|
| 237 |
+
type(np.dtype("U"))(-1)
|
| 238 |
+
|
| 239 |
+
# OverflowError on 32 bit
|
| 240 |
+
with pytest.raises((TypeError, OverflowError)):
|
| 241 |
+
# see gh-26556
|
| 242 |
+
type(np.dtype("S"))(2**61)
|
| 243 |
+
|
| 244 |
+
with pytest.raises(TypeError):
|
| 245 |
+
np.dtype("S1234hello")
|
| 246 |
+
|
| 247 |
+
def test_leading_zero_parsing(self):
|
| 248 |
+
dt1 = np.dtype('S010')
|
| 249 |
+
dt2 = np.dtype('S10')
|
| 250 |
+
|
| 251 |
+
assert dt1 == dt2
|
| 252 |
+
assert repr(dt1) == "dtype('S10')"
|
| 253 |
+
assert dt1.itemsize == 10
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
class TestRecord:
|
| 257 |
+
def test_equivalent_record(self):
|
| 258 |
+
"""Test whether equivalent record dtypes hash the same."""
|
| 259 |
+
a = np.dtype([('yo', int)])
|
| 260 |
+
b = np.dtype([('yo', int)])
|
| 261 |
+
assert_dtype_equal(a, b)
|
| 262 |
+
|
| 263 |
+
def test_different_names(self):
|
| 264 |
+
# In theory, they may hash the same (collision) ?
|
| 265 |
+
a = np.dtype([('yo', int)])
|
| 266 |
+
b = np.dtype([('ye', int)])
|
| 267 |
+
assert_dtype_not_equal(a, b)
|
| 268 |
+
|
| 269 |
+
def test_different_titles(self):
|
| 270 |
+
# In theory, they may hash the same (collision) ?
|
| 271 |
+
a = np.dtype({'names': ['r', 'b'],
|
| 272 |
+
'formats': ['u1', 'u1'],
|
| 273 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
| 274 |
+
b = np.dtype({'names': ['r', 'b'],
|
| 275 |
+
'formats': ['u1', 'u1'],
|
| 276 |
+
'titles': ['RRed pixel', 'Blue pixel']})
|
| 277 |
+
assert_dtype_not_equal(a, b)
|
| 278 |
+
|
| 279 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
| 280 |
+
def test_refcount_dictionary_setting(self):
|
| 281 |
+
names = ["name1"]
|
| 282 |
+
formats = ["f8"]
|
| 283 |
+
titles = ["t1"]
|
| 284 |
+
offsets = [0]
|
| 285 |
+
d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
|
| 286 |
+
refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
|
| 287 |
+
np.dtype(d)
|
| 288 |
+
refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
|
| 289 |
+
assert refcounts == refcounts_new
|
| 290 |
+
|
| 291 |
+
def test_mutate(self):
|
| 292 |
+
# Mutating a dtype should reset the cached hash value.
|
| 293 |
+
# NOTE: Mutating should be deprecated, but new API added to replace it.
|
| 294 |
+
a = np.dtype([('yo', int)])
|
| 295 |
+
b = np.dtype([('yo', int)])
|
| 296 |
+
c = np.dtype([('ye', int)])
|
| 297 |
+
assert_dtype_equal(a, b)
|
| 298 |
+
assert_dtype_not_equal(a, c)
|
| 299 |
+
a.names = ['ye']
|
| 300 |
+
assert_dtype_equal(a, c)
|
| 301 |
+
assert_dtype_not_equal(a, b)
|
| 302 |
+
state = b.__reduce__()[2]
|
| 303 |
+
a.__setstate__(state)
|
| 304 |
+
assert_dtype_equal(a, b)
|
| 305 |
+
assert_dtype_not_equal(a, c)
|
| 306 |
+
|
| 307 |
+
def test_init_simple_structured(self):
|
| 308 |
+
dt1 = np.dtype("i, i")
|
| 309 |
+
assert dt1.names == ("f0", "f1")
|
| 310 |
+
|
| 311 |
+
dt2 = np.dtype("i,")
|
| 312 |
+
assert dt2.names == ("f0",)
|
| 313 |
+
|
| 314 |
+
def test_mutate_error(self):
|
| 315 |
+
# NOTE: Mutating should be deprecated, but new API added to replace it.
|
| 316 |
+
a = np.dtype("i,i")
|
| 317 |
+
|
| 318 |
+
with pytest.raises(ValueError, match="must replace all names at once"):
|
| 319 |
+
a.names = ["f0"]
|
| 320 |
+
|
| 321 |
+
with pytest.raises(ValueError, match=".*and not string"):
|
| 322 |
+
a.names = ["f0", b"not a unicode name"]
|
| 323 |
+
|
| 324 |
+
def test_not_lists(self):
|
| 325 |
+
"""Test if an appropriate exception is raised when passing bad values to
|
| 326 |
+
the dtype constructor.
|
| 327 |
+
"""
|
| 328 |
+
assert_raises(TypeError, np.dtype,
|
| 329 |
+
dict(names={'A', 'B'}, formats=['f8', 'i4']))
|
| 330 |
+
assert_raises(TypeError, np.dtype,
|
| 331 |
+
dict(names=['A', 'B'], formats={'f8', 'i4'}))
|
| 332 |
+
|
| 333 |
+
def test_aligned_size(self):
|
| 334 |
+
# Check that structured dtypes get padded to an aligned size
|
| 335 |
+
dt = np.dtype('i4, i1', align=True)
|
| 336 |
+
assert_equal(dt.itemsize, 8)
|
| 337 |
+
dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
|
| 338 |
+
assert_equal(dt.itemsize, 8)
|
| 339 |
+
dt = np.dtype({'names':['f0', 'f1'],
|
| 340 |
+
'formats':['i4', 'u1'],
|
| 341 |
+
'offsets':[0, 4]}, align=True)
|
| 342 |
+
assert_equal(dt.itemsize, 8)
|
| 343 |
+
dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
|
| 344 |
+
assert_equal(dt.itemsize, 8)
|
| 345 |
+
# Nesting should preserve that alignment
|
| 346 |
+
dt1 = np.dtype([('f0', 'i4'),
|
| 347 |
+
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
|
| 348 |
+
('f2', 'i1')], align=True)
|
| 349 |
+
assert_equal(dt1.itemsize, 20)
|
| 350 |
+
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
|
| 351 |
+
'formats':['i4',
|
| 352 |
+
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
|
| 353 |
+
'i1'],
|
| 354 |
+
'offsets':[0, 4, 16]}, align=True)
|
| 355 |
+
assert_equal(dt2.itemsize, 20)
|
| 356 |
+
dt3 = np.dtype({'f0': ('i4', 0),
|
| 357 |
+
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
|
| 358 |
+
'f2': ('i1', 16)}, align=True)
|
| 359 |
+
assert_equal(dt3.itemsize, 20)
|
| 360 |
+
assert_equal(dt1, dt2)
|
| 361 |
+
assert_equal(dt2, dt3)
|
| 362 |
+
# Nesting should preserve packing
|
| 363 |
+
dt1 = np.dtype([('f0', 'i4'),
|
| 364 |
+
('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
|
| 365 |
+
('f2', 'i1')], align=False)
|
| 366 |
+
assert_equal(dt1.itemsize, 11)
|
| 367 |
+
dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
|
| 368 |
+
'formats':['i4',
|
| 369 |
+
[('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
|
| 370 |
+
'i1'],
|
| 371 |
+
'offsets':[0, 4, 10]}, align=False)
|
| 372 |
+
assert_equal(dt2.itemsize, 11)
|
| 373 |
+
dt3 = np.dtype({'f0': ('i4', 0),
|
| 374 |
+
'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
|
| 375 |
+
'f2': ('i1', 10)}, align=False)
|
| 376 |
+
assert_equal(dt3.itemsize, 11)
|
| 377 |
+
assert_equal(dt1, dt2)
|
| 378 |
+
assert_equal(dt2, dt3)
|
| 379 |
+
# Array of subtype should preserve alignment
|
| 380 |
+
dt1 = np.dtype([('a', '|i1'),
|
| 381 |
+
('b', [('f0', '<i2'),
|
| 382 |
+
('f1', '<f4')], 2)], align=True)
|
| 383 |
+
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
|
| 384 |
+
('b', [('f0', '<i2'), ('', '|V2'),
|
| 385 |
+
('f1', '<f4')], (2,))])
|
| 386 |
+
|
| 387 |
+
def test_empty_struct_alignment(self):
|
| 388 |
+
# Empty dtypes should have an alignment of 1
|
| 389 |
+
dt = np.dtype([], align=True)
|
| 390 |
+
assert_equal(dt.alignment, 1)
|
| 391 |
+
dt = np.dtype([('f0', [])], align=True)
|
| 392 |
+
assert_equal(dt.alignment, 1)
|
| 393 |
+
dt = np.dtype({'names': [],
|
| 394 |
+
'formats': [],
|
| 395 |
+
'offsets': []}, align=True)
|
| 396 |
+
assert_equal(dt.alignment, 1)
|
| 397 |
+
dt = np.dtype({'names': ['f0'],
|
| 398 |
+
'formats': [[]],
|
| 399 |
+
'offsets': [0]}, align=True)
|
| 400 |
+
assert_equal(dt.alignment, 1)
|
| 401 |
+
|
| 402 |
+
def test_union_struct(self):
|
| 403 |
+
# Should be able to create union dtypes
|
| 404 |
+
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
|
| 405 |
+
'offsets':[0, 0, 2]}, align=True)
|
| 406 |
+
assert_equal(dt.itemsize, 4)
|
| 407 |
+
a = np.array([3], dtype='<u4').view(dt)
|
| 408 |
+
a['f1'] = 10
|
| 409 |
+
a['f2'] = 36
|
| 410 |
+
assert_equal(a['f0'], 10 + 36*256*256)
|
| 411 |
+
# Should be able to specify fields out of order
|
| 412 |
+
dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
|
| 413 |
+
'offsets':[4, 0, 2]}, align=True)
|
| 414 |
+
assert_equal(dt.itemsize, 8)
|
| 415 |
+
# field name should not matter: assignment is by position
|
| 416 |
+
dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
|
| 417 |
+
'formats':['<u4', '<u2', '<u2'],
|
| 418 |
+
'offsets':[4, 0, 2]}, align=True)
|
| 419 |
+
vals = [(0, 1, 2), (3, 2**15-1, 4)]
|
| 420 |
+
vals2 = [(0, 1, 2), (3, 2**15-1, 4)]
|
| 421 |
+
a = np.array(vals, dt)
|
| 422 |
+
b = np.array(vals2, dt2)
|
| 423 |
+
assert_equal(a.astype(dt2), b)
|
| 424 |
+
assert_equal(b.astype(dt), a)
|
| 425 |
+
assert_equal(a.view(dt2), b)
|
| 426 |
+
assert_equal(b.view(dt), a)
|
| 427 |
+
# Should not be able to overlap objects with other types
|
| 428 |
+
assert_raises(TypeError, np.dtype,
|
| 429 |
+
{'names':['f0', 'f1'],
|
| 430 |
+
'formats':['O', 'i1'],
|
| 431 |
+
'offsets':[0, 2]})
|
| 432 |
+
assert_raises(TypeError, np.dtype,
|
| 433 |
+
{'names':['f0', 'f1'],
|
| 434 |
+
'formats':['i4', 'O'],
|
| 435 |
+
'offsets':[0, 3]})
|
| 436 |
+
assert_raises(TypeError, np.dtype,
|
| 437 |
+
{'names':['f0', 'f1'],
|
| 438 |
+
'formats':[[('a', 'O')], 'i1'],
|
| 439 |
+
'offsets':[0, 2]})
|
| 440 |
+
assert_raises(TypeError, np.dtype,
|
| 441 |
+
{'names':['f0', 'f1'],
|
| 442 |
+
'formats':['i4', [('a', 'O')]],
|
| 443 |
+
'offsets':[0, 3]})
|
| 444 |
+
# Out of order should still be ok, however
|
| 445 |
+
dt = np.dtype({'names':['f0', 'f1'],
|
| 446 |
+
'formats':['i1', 'O'],
|
| 447 |
+
'offsets':[np.dtype('intp').itemsize, 0]})
|
| 448 |
+
|
| 449 |
+
@pytest.mark.parametrize(["obj", "dtype", "expected"],
|
| 450 |
+
[([], ("2f4"), np.empty((0, 2), dtype="f4")),
|
| 451 |
+
(3, "(3,)f4", [3, 3, 3]),
|
| 452 |
+
(np.float64(2), "(2,)f4", [2, 2]),
|
| 453 |
+
([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
|
| 454 |
+
(["1", "2"], "2i", None)])
|
| 455 |
+
def test_subarray_list(self, obj, dtype, expected):
|
| 456 |
+
dtype = np.dtype(dtype)
|
| 457 |
+
res = np.array(obj, dtype=dtype)
|
| 458 |
+
|
| 459 |
+
if expected is None:
|
| 460 |
+
# iterate the 1-d list to fill the array
|
| 461 |
+
expected = np.empty(len(obj), dtype=dtype)
|
| 462 |
+
for i in range(len(expected)):
|
| 463 |
+
expected[i] = obj[i]
|
| 464 |
+
|
| 465 |
+
assert_array_equal(res, expected)
|
| 466 |
+
|
| 467 |
+
def test_parenthesized_single_number(self):
|
| 468 |
+
with pytest.raises(TypeError, match="not understood"):
|
| 469 |
+
np.dtype("(2)f4")
|
| 470 |
+
|
| 471 |
+
# Deprecation also tested in
|
| 472 |
+
# test_deprecations.py::TestDeprecatedDTypeParenthesizedRepeatCount
|
| 473 |
+
# Left here to allow easy conversion to an exception check.
|
| 474 |
+
with pytest.warns(DeprecationWarning,
|
| 475 |
+
match="parenthesized single number"):
|
| 476 |
+
np.dtype("(2)f4,")
|
| 477 |
+
|
| 478 |
+
def test_comma_datetime(self):
|
| 479 |
+
dt = np.dtype('M8[D],datetime64[Y],i8')
|
| 480 |
+
assert_equal(dt, np.dtype([('f0', 'M8[D]'),
|
| 481 |
+
('f1', 'datetime64[Y]'),
|
| 482 |
+
('f2', 'i8')]))
|
| 483 |
+
|
| 484 |
+
def test_from_dictproxy(self):
|
| 485 |
+
# Tests for PR #5920
|
| 486 |
+
dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
|
| 487 |
+
assert_dtype_equal(dt, np.dtype(dt.fields))
|
| 488 |
+
dt2 = np.dtype((np.void, dt.fields))
|
| 489 |
+
assert_equal(dt2.fields, dt.fields)
|
| 490 |
+
|
| 491 |
+
def test_from_dict_with_zero_width_field(self):
|
| 492 |
+
# Regression test for #6430 / #2196
|
| 493 |
+
dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
|
| 494 |
+
dt2 = np.dtype({'names': ['val1', 'val2'],
|
| 495 |
+
'formats': [(np.float32, (0,)), int]})
|
| 496 |
+
|
| 497 |
+
assert_dtype_equal(dt, dt2)
|
| 498 |
+
assert_equal(dt.fields['val1'][0].itemsize, 0)
|
| 499 |
+
assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
|
| 500 |
+
|
| 501 |
+
def test_bool_commastring(self):
|
| 502 |
+
d = np.dtype('?,?,?') # raises?
|
| 503 |
+
assert_equal(len(d.names), 3)
|
| 504 |
+
for n in d.names:
|
| 505 |
+
assert_equal(d.fields[n][0], np.dtype('?'))
|
| 506 |
+
|
| 507 |
+
def test_nonint_offsets(self):
|
| 508 |
+
# gh-8059
|
| 509 |
+
def make_dtype(off):
|
| 510 |
+
return np.dtype({'names': ['A'], 'formats': ['i4'],
|
| 511 |
+
'offsets': [off]})
|
| 512 |
+
|
| 513 |
+
assert_raises(TypeError, make_dtype, 'ASD')
|
| 514 |
+
assert_raises(OverflowError, make_dtype, 2**70)
|
| 515 |
+
assert_raises(TypeError, make_dtype, 2.3)
|
| 516 |
+
assert_raises(ValueError, make_dtype, -10)
|
| 517 |
+
|
| 518 |
+
# no errors here:
|
| 519 |
+
dt = make_dtype(np.uint32(0))
|
| 520 |
+
np.zeros(1, dtype=dt)[0].item()
|
| 521 |
+
|
| 522 |
+
def test_fields_by_index(self):
|
| 523 |
+
dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
|
| 524 |
+
assert_dtype_equal(dt[0], np.dtype(np.int8))
|
| 525 |
+
assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
|
| 526 |
+
assert_dtype_equal(dt[-1], dt[1])
|
| 527 |
+
assert_dtype_equal(dt[-2], dt[0])
|
| 528 |
+
assert_raises(IndexError, lambda: dt[-3])
|
| 529 |
+
|
| 530 |
+
assert_raises(TypeError, operator.getitem, dt, 3.0)
|
| 531 |
+
|
| 532 |
+
assert_equal(dt[1], dt[np.int8(1)])
|
| 533 |
+
|
| 534 |
+
@pytest.mark.parametrize('align_flag',[False, True])
|
| 535 |
+
def test_multifield_index(self, align_flag):
|
| 536 |
+
# indexing with a list produces subfields
|
| 537 |
+
# the align flag should be preserved
|
| 538 |
+
dt = np.dtype([
|
| 539 |
+
(('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
|
| 540 |
+
], align=align_flag)
|
| 541 |
+
|
| 542 |
+
dt_sub = dt[['B', 'col1']]
|
| 543 |
+
assert_equal(
|
| 544 |
+
dt_sub,
|
| 545 |
+
np.dtype({
|
| 546 |
+
'names': ['B', 'col1'],
|
| 547 |
+
'formats': ['<f8', '<U20'],
|
| 548 |
+
'offsets': [88, 0],
|
| 549 |
+
'titles': [None, 'title'],
|
| 550 |
+
'itemsize': 96
|
| 551 |
+
})
|
| 552 |
+
)
|
| 553 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
| 554 |
+
|
| 555 |
+
dt_sub = dt[['B']]
|
| 556 |
+
assert_equal(
|
| 557 |
+
dt_sub,
|
| 558 |
+
np.dtype({
|
| 559 |
+
'names': ['B'],
|
| 560 |
+
'formats': ['<f8'],
|
| 561 |
+
'offsets': [88],
|
| 562 |
+
'itemsize': 96
|
| 563 |
+
})
|
| 564 |
+
)
|
| 565 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
| 566 |
+
|
| 567 |
+
dt_sub = dt[[]]
|
| 568 |
+
assert_equal(
|
| 569 |
+
dt_sub,
|
| 570 |
+
np.dtype({
|
| 571 |
+
'names': [],
|
| 572 |
+
'formats': [],
|
| 573 |
+
'offsets': [],
|
| 574 |
+
'itemsize': 96
|
| 575 |
+
})
|
| 576 |
+
)
|
| 577 |
+
assert_equal(dt_sub.isalignedstruct, align_flag)
|
| 578 |
+
|
| 579 |
+
assert_raises(TypeError, operator.getitem, dt, ())
|
| 580 |
+
assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
|
| 581 |
+
assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
|
| 582 |
+
assert_raises(KeyError, operator.getitem, dt, ['fake'])
|
| 583 |
+
assert_raises(KeyError, operator.getitem, dt, ['title'])
|
| 584 |
+
assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
|
| 585 |
+
|
| 586 |
+
def test_partial_dict(self):
|
| 587 |
+
# 'names' is missing
|
| 588 |
+
assert_raises(ValueError, np.dtype,
|
| 589 |
+
{'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
|
| 590 |
+
|
| 591 |
+
def test_fieldless_views(self):
|
| 592 |
+
a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
|
| 593 |
+
'itemsize':8})
|
| 594 |
+
assert_raises(ValueError, a.view, np.dtype([]))
|
| 595 |
+
|
| 596 |
+
d = np.dtype((np.dtype([]), 10))
|
| 597 |
+
assert_equal(d.shape, (10,))
|
| 598 |
+
assert_equal(d.itemsize, 0)
|
| 599 |
+
assert_equal(d.base, np.dtype([]))
|
| 600 |
+
|
| 601 |
+
arr = np.fromiter((() for i in range(10)), [])
|
| 602 |
+
assert_equal(arr.dtype, np.dtype([]))
|
| 603 |
+
assert_raises(ValueError, np.frombuffer, b'', dtype=[])
|
| 604 |
+
assert_equal(np.frombuffer(b'', dtype=[], count=2),
|
| 605 |
+
np.empty(2, dtype=[]))
|
| 606 |
+
|
| 607 |
+
assert_raises(ValueError, np.dtype, ([], 'f8'))
|
| 608 |
+
assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
|
| 609 |
+
|
| 610 |
+
assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
|
| 611 |
+
np.ones(2, dtype=bool))
|
| 612 |
+
|
| 613 |
+
assert_equal(np.zeros((1, 2), dtype=[]) == a,
|
| 614 |
+
np.ones((1, 2), dtype=bool))
|
| 615 |
+
|
| 616 |
+
def test_nonstructured_with_object(self):
|
| 617 |
+
# See gh-23277, the dtype here thinks it contain objects, if the
|
| 618 |
+
# assert about that fails, the test becomes meaningless (which is OK)
|
| 619 |
+
arr = np.recarray((0,), dtype="O")
|
| 620 |
+
assert arr.dtype.names is None # no fields
|
| 621 |
+
assert arr.dtype.hasobject # but claims to contain objects
|
| 622 |
+
del arr # the deletion failed previously.
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class TestSubarray:
|
| 626 |
+
def test_single_subarray(self):
|
| 627 |
+
a = np.dtype((int, (2)))
|
| 628 |
+
b = np.dtype((int, (2,)))
|
| 629 |
+
assert_dtype_equal(a, b)
|
| 630 |
+
|
| 631 |
+
assert_equal(type(a.subdtype[1]), tuple)
|
| 632 |
+
assert_equal(type(b.subdtype[1]), tuple)
|
| 633 |
+
|
| 634 |
+
def test_equivalent_record(self):
|
| 635 |
+
"""Test whether equivalent subarray dtypes hash the same."""
|
| 636 |
+
a = np.dtype((int, (2, 3)))
|
| 637 |
+
b = np.dtype((int, (2, 3)))
|
| 638 |
+
assert_dtype_equal(a, b)
|
| 639 |
+
|
| 640 |
+
def test_nonequivalent_record(self):
|
| 641 |
+
"""Test whether different subarray dtypes hash differently."""
|
| 642 |
+
a = np.dtype((int, (2, 3)))
|
| 643 |
+
b = np.dtype((int, (3, 2)))
|
| 644 |
+
assert_dtype_not_equal(a, b)
|
| 645 |
+
|
| 646 |
+
a = np.dtype((int, (2, 3)))
|
| 647 |
+
b = np.dtype((int, (2, 2)))
|
| 648 |
+
assert_dtype_not_equal(a, b)
|
| 649 |
+
|
| 650 |
+
a = np.dtype((int, (1, 2, 3)))
|
| 651 |
+
b = np.dtype((int, (1, 2)))
|
| 652 |
+
assert_dtype_not_equal(a, b)
|
| 653 |
+
|
| 654 |
+
def test_shape_equal(self):
|
| 655 |
+
"""Test some data types that are equal"""
|
| 656 |
+
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
|
| 657 |
+
assert_dtype_equal(np.dtype('(1,)f8'), np.dtype(('f8', 1)))
|
| 658 |
+
assert np.dtype(('f8', 1)).shape == (1,)
|
| 659 |
+
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
|
| 660 |
+
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
|
| 661 |
+
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
|
| 662 |
+
assert_dtype_equal(np.dtype(d), np.dtype(d))
|
| 663 |
+
|
| 664 |
+
def test_shape_simple(self):
|
| 665 |
+
"""Test some simple cases that shouldn't be equal"""
|
| 666 |
+
assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
|
| 667 |
+
assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
|
| 668 |
+
assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
|
| 669 |
+
|
| 670 |
+
def test_shape_monster(self):
|
| 671 |
+
"""Test some more complicated cases that shouldn't be equal"""
|
| 672 |
+
assert_dtype_not_equal(
|
| 673 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
| 674 |
+
np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
|
| 675 |
+
assert_dtype_not_equal(
|
| 676 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
| 677 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
|
| 678 |
+
assert_dtype_not_equal(
|
| 679 |
+
np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
| 680 |
+
np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
|
| 681 |
+
assert_dtype_not_equal(
|
| 682 |
+
np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
|
| 683 |
+
np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
|
| 684 |
+
|
| 685 |
+
def test_shape_sequence(self):
|
| 686 |
+
# Any sequence of integers should work as shape, but the result
|
| 687 |
+
# should be a tuple (immutable) of base type integers.
|
| 688 |
+
a = np.array([1, 2, 3], dtype=np.int16)
|
| 689 |
+
l = [1, 2, 3]
|
| 690 |
+
# Array gets converted
|
| 691 |
+
dt = np.dtype([('a', 'f4', a)])
|
| 692 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
| 693 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
| 694 |
+
# List gets converted
|
| 695 |
+
dt = np.dtype([('a', 'f4', l)])
|
| 696 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
| 697 |
+
#
|
| 698 |
+
|
| 699 |
+
class IntLike:
|
| 700 |
+
def __index__(self):
|
| 701 |
+
return 3
|
| 702 |
+
|
| 703 |
+
def __int__(self):
|
| 704 |
+
# (a PyNumber_Check fails without __int__)
|
| 705 |
+
return 3
|
| 706 |
+
|
| 707 |
+
dt = np.dtype([('a', 'f4', IntLike())])
|
| 708 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
| 709 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
| 710 |
+
dt = np.dtype([('a', 'f4', (IntLike(),))])
|
| 711 |
+
assert_(isinstance(dt['a'].shape, tuple))
|
| 712 |
+
assert_(isinstance(dt['a'].shape[0], int))
|
| 713 |
+
|
| 714 |
+
def test_shape_matches_ndim(self):
|
| 715 |
+
dt = np.dtype([('a', 'f4', ())])
|
| 716 |
+
assert_equal(dt['a'].shape, ())
|
| 717 |
+
assert_equal(dt['a'].ndim, 0)
|
| 718 |
+
|
| 719 |
+
dt = np.dtype([('a', 'f4')])
|
| 720 |
+
assert_equal(dt['a'].shape, ())
|
| 721 |
+
assert_equal(dt['a'].ndim, 0)
|
| 722 |
+
|
| 723 |
+
dt = np.dtype([('a', 'f4', 4)])
|
| 724 |
+
assert_equal(dt['a'].shape, (4,))
|
| 725 |
+
assert_equal(dt['a'].ndim, 1)
|
| 726 |
+
|
| 727 |
+
dt = np.dtype([('a', 'f4', (1, 2, 3))])
|
| 728 |
+
assert_equal(dt['a'].shape, (1, 2, 3))
|
| 729 |
+
assert_equal(dt['a'].ndim, 3)
|
| 730 |
+
|
| 731 |
+
def test_shape_invalid(self):
|
| 732 |
+
# Check that the shape is valid.
|
| 733 |
+
max_int = np.iinfo(np.intc).max
|
| 734 |
+
max_intp = np.iinfo(np.intp).max
|
| 735 |
+
# Too large values (the datatype is part of this)
|
| 736 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
|
| 737 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
|
| 738 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
|
| 739 |
+
# Takes a different code path (fails earlier:
|
| 740 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
|
| 741 |
+
# Negative values
|
| 742 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
|
| 743 |
+
assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
|
| 744 |
+
|
| 745 |
+
def test_alignment(self):
|
| 746 |
+
#Check that subarrays are aligned
|
| 747 |
+
t1 = np.dtype('(1,)i4', align=True)
|
| 748 |
+
t2 = np.dtype('2i4', align=True)
|
| 749 |
+
assert_equal(t1.alignment, t2.alignment)
|
| 750 |
+
|
| 751 |
+
def test_aligned_empty(self):
|
| 752 |
+
# Mainly regression test for gh-19696: construction failed completely
|
| 753 |
+
dt = np.dtype([], align=True)
|
| 754 |
+
assert dt == np.dtype([])
|
| 755 |
+
dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
|
| 756 |
+
assert dt == np.dtype([])
|
| 757 |
+
|
| 758 |
+
def test_subarray_base_item(self):
|
| 759 |
+
arr = np.ones(3, dtype=[("f", "i", 3)])
|
| 760 |
+
# Extracting the field "absorbs" the subarray into a view:
|
| 761 |
+
assert arr["f"].base is arr
|
| 762 |
+
# Extract the structured item, and then check the tuple component:
|
| 763 |
+
item = arr.item(0)
|
| 764 |
+
assert type(item) is tuple and len(item) == 1
|
| 765 |
+
assert item[0].base is arr
|
| 766 |
+
|
| 767 |
+
def test_subarray_cast_copies(self):
|
| 768 |
+
# Older versions of NumPy did NOT copy, but they got the ownership
|
| 769 |
+
# wrong (not actually knowing the correct base!). Versions since 1.21
|
| 770 |
+
# (I think) crashed fairly reliable. This defines the correct behavior
|
| 771 |
+
# as a copy. Keeping the ownership would be possible (but harder)
|
| 772 |
+
arr = np.ones(3, dtype=[("f", "i", 3)])
|
| 773 |
+
cast = arr.astype(object)
|
| 774 |
+
for fields in cast:
|
| 775 |
+
assert type(fields) == tuple and len(fields) == 1
|
| 776 |
+
subarr = fields[0]
|
| 777 |
+
assert subarr.base is None
|
| 778 |
+
assert subarr.flags.owndata
|
| 779 |
+
|
| 780 |
+
|
| 781 |
+
def iter_struct_object_dtypes():
|
| 782 |
+
"""
|
| 783 |
+
Iterates over a few complex dtypes and object pattern which
|
| 784 |
+
fill the array with a given object (defaults to a singleton).
|
| 785 |
+
|
| 786 |
+
Yields
|
| 787 |
+
------
|
| 788 |
+
dtype : dtype
|
| 789 |
+
pattern : tuple
|
| 790 |
+
Structured tuple for use with `np.array`.
|
| 791 |
+
count : int
|
| 792 |
+
Number of objects stored in the dtype.
|
| 793 |
+
singleton : object
|
| 794 |
+
A singleton object. The returned pattern is constructed so that
|
| 795 |
+
all objects inside the datatype are set to the singleton.
|
| 796 |
+
"""
|
| 797 |
+
obj = object()
|
| 798 |
+
|
| 799 |
+
dt = np.dtype([('b', 'O', (2, 3))])
|
| 800 |
+
p = ([[obj] * 3] * 2,)
|
| 801 |
+
yield pytest.param(dt, p, 6, obj, id="<subarray>")
|
| 802 |
+
|
| 803 |
+
dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
|
| 804 |
+
p = (0, [[obj] * 3] * 2)
|
| 805 |
+
yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
|
| 806 |
+
|
| 807 |
+
dt = np.dtype([('a', 'i4'),
|
| 808 |
+
('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
|
| 809 |
+
p = (0, [[(obj, 0)] * 3] * 2)
|
| 810 |
+
yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
|
| 811 |
+
|
| 812 |
+
dt = np.dtype([('a', 'i4'),
|
| 813 |
+
('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
|
| 814 |
+
p = (0, [[(obj, obj)] * 3] * 2)
|
| 815 |
+
yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
@pytest.mark.skipif(
|
| 819 |
+
sys.version_info >= (3, 12),
|
| 820 |
+
reason="Python 3.12 has immortal refcounts, this test will no longer "
|
| 821 |
+
"work. See gh-23986"
|
| 822 |
+
)
|
| 823 |
+
@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
|
| 824 |
+
class TestStructuredObjectRefcounting:
|
| 825 |
+
"""These tests cover various uses of complicated structured types which
|
| 826 |
+
include objects and thus require reference counting.
|
| 827 |
+
"""
|
| 828 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
| 829 |
+
iter_struct_object_dtypes())
|
| 830 |
+
@pytest.mark.parametrize(["creation_func", "creation_obj"], [
|
| 831 |
+
pytest.param(np.empty, None,
|
| 832 |
+
# None is probably used for too many things
|
| 833 |
+
marks=pytest.mark.skip("unreliable due to python's behaviour")),
|
| 834 |
+
(np.ones, 1),
|
| 835 |
+
(np.zeros, 0)])
|
| 836 |
+
def test_structured_object_create_delete(self, dt, pat, count, singleton,
|
| 837 |
+
creation_func, creation_obj):
|
| 838 |
+
"""Structured object reference counting in creation and deletion"""
|
| 839 |
+
# The test assumes that 0, 1, and None are singletons.
|
| 840 |
+
gc.collect()
|
| 841 |
+
before = sys.getrefcount(creation_obj)
|
| 842 |
+
arr = creation_func(3, dt)
|
| 843 |
+
|
| 844 |
+
now = sys.getrefcount(creation_obj)
|
| 845 |
+
assert now - before == count * 3
|
| 846 |
+
del arr
|
| 847 |
+
now = sys.getrefcount(creation_obj)
|
| 848 |
+
assert now == before
|
| 849 |
+
|
| 850 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
| 851 |
+
iter_struct_object_dtypes())
|
| 852 |
+
def test_structured_object_item_setting(self, dt, pat, count, singleton):
|
| 853 |
+
"""Structured object reference counting for simple item setting"""
|
| 854 |
+
one = 1
|
| 855 |
+
|
| 856 |
+
gc.collect()
|
| 857 |
+
before = sys.getrefcount(singleton)
|
| 858 |
+
arr = np.array([pat] * 3, dt)
|
| 859 |
+
assert sys.getrefcount(singleton) - before == count * 3
|
| 860 |
+
# Fill with `1` and check that it was replaced correctly:
|
| 861 |
+
before2 = sys.getrefcount(one)
|
| 862 |
+
arr[...] = one
|
| 863 |
+
after2 = sys.getrefcount(one)
|
| 864 |
+
assert after2 - before2 == count * 3
|
| 865 |
+
del arr
|
| 866 |
+
gc.collect()
|
| 867 |
+
assert sys.getrefcount(one) == before2
|
| 868 |
+
assert sys.getrefcount(singleton) == before
|
| 869 |
+
|
| 870 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
| 871 |
+
iter_struct_object_dtypes())
|
| 872 |
+
@pytest.mark.parametrize(
|
| 873 |
+
['shape', 'index', 'items_changed'],
|
| 874 |
+
[((3,), ([0, 2],), 2),
|
| 875 |
+
((3, 2), ([0, 2], slice(None)), 4),
|
| 876 |
+
((3, 2), ([0, 2], [1]), 2),
|
| 877 |
+
((3,), ([True, False, True]), 2)])
|
| 878 |
+
def test_structured_object_indexing(self, shape, index, items_changed,
|
| 879 |
+
dt, pat, count, singleton):
|
| 880 |
+
"""Structured object reference counting for advanced indexing."""
|
| 881 |
+
# Use two small negative values (should be singletons, but less likely
|
| 882 |
+
# to run into race-conditions). This failed in some threaded envs
|
| 883 |
+
# When using 0 and 1. If it fails again, should remove all explicit
|
| 884 |
+
# checks, and rely on `pytest-leaks` reference count checker only.
|
| 885 |
+
val0 = -4
|
| 886 |
+
val1 = -5
|
| 887 |
+
|
| 888 |
+
arr = np.full(shape, val0, dt)
|
| 889 |
+
|
| 890 |
+
gc.collect()
|
| 891 |
+
before_val0 = sys.getrefcount(val0)
|
| 892 |
+
before_val1 = sys.getrefcount(val1)
|
| 893 |
+
# Test item getting:
|
| 894 |
+
part = arr[index]
|
| 895 |
+
after_val0 = sys.getrefcount(val0)
|
| 896 |
+
assert after_val0 - before_val0 == count * items_changed
|
| 897 |
+
del part
|
| 898 |
+
# Test item setting:
|
| 899 |
+
arr[index] = val1
|
| 900 |
+
gc.collect()
|
| 901 |
+
after_val0 = sys.getrefcount(val0)
|
| 902 |
+
after_val1 = sys.getrefcount(val1)
|
| 903 |
+
assert before_val0 - after_val0 == count * items_changed
|
| 904 |
+
assert after_val1 - before_val1 == count * items_changed
|
| 905 |
+
|
| 906 |
+
@pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
|
| 907 |
+
iter_struct_object_dtypes())
|
| 908 |
+
def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
|
| 909 |
+
"""Structured object reference counting for specialized functions.
|
| 910 |
+
The older functions such as take and repeat use different code paths
|
| 911 |
+
then item setting (when writing this).
|
| 912 |
+
"""
|
| 913 |
+
indices = [0, 1]
|
| 914 |
+
|
| 915 |
+
arr = np.array([pat] * 3, dt)
|
| 916 |
+
gc.collect()
|
| 917 |
+
before = sys.getrefcount(singleton)
|
| 918 |
+
res = arr.take(indices)
|
| 919 |
+
after = sys.getrefcount(singleton)
|
| 920 |
+
assert after - before == count * 2
|
| 921 |
+
new = res.repeat(10)
|
| 922 |
+
gc.collect()
|
| 923 |
+
after_repeat = sys.getrefcount(singleton)
|
| 924 |
+
assert after_repeat - after == count * 2 * 10
|
| 925 |
+
|
| 926 |
+
|
| 927 |
+
class TestStructuredDtypeSparseFields:
|
| 928 |
+
"""Tests subarray fields which contain sparse dtypes so that
|
| 929 |
+
not all memory is used by the dtype work. Such dtype's should
|
| 930 |
+
leave the underlying memory unchanged.
|
| 931 |
+
"""
|
| 932 |
+
dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
|
| 933 |
+
'offsets':[0, 4]}, (2, 3))])
|
| 934 |
+
sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
|
| 935 |
+
'offsets':[4]}, (2, 3))])
|
| 936 |
+
|
| 937 |
+
def test_sparse_field_assignment(self):
|
| 938 |
+
arr = np.zeros(3, self.dtype)
|
| 939 |
+
sparse_arr = arr.view(self.sparse_dtype)
|
| 940 |
+
|
| 941 |
+
sparse_arr[...] = np.finfo(np.float32).max
|
| 942 |
+
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
|
| 943 |
+
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
|
| 944 |
+
|
| 945 |
+
def test_sparse_field_assignment_fancy(self):
|
| 946 |
+
# Fancy assignment goes to the copyswap function for complex types:
|
| 947 |
+
arr = np.zeros(3, self.dtype)
|
| 948 |
+
sparse_arr = arr.view(self.sparse_dtype)
|
| 949 |
+
|
| 950 |
+
sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
|
| 951 |
+
# dtype is reduced when accessing the field, so shape is (3, 2, 3):
|
| 952 |
+
assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
|
| 953 |
+
|
| 954 |
+
|
| 955 |
+
class TestMonsterType:
|
| 956 |
+
"""Test deeply nested subtypes."""
|
| 957 |
+
|
| 958 |
+
def test1(self):
|
| 959 |
+
simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
| 960 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
| 961 |
+
a = np.dtype([('yo', int), ('ye', simple1),
|
| 962 |
+
('yi', np.dtype((int, (3, 2))))])
|
| 963 |
+
b = np.dtype([('yo', int), ('ye', simple1),
|
| 964 |
+
('yi', np.dtype((int, (3, 2))))])
|
| 965 |
+
assert_dtype_equal(a, b)
|
| 966 |
+
|
| 967 |
+
c = np.dtype([('yo', int), ('ye', simple1),
|
| 968 |
+
('yi', np.dtype((a, (3, 2))))])
|
| 969 |
+
d = np.dtype([('yo', int), ('ye', simple1),
|
| 970 |
+
('yi', np.dtype((a, (3, 2))))])
|
| 971 |
+
assert_dtype_equal(c, d)
|
| 972 |
+
|
| 973 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
| 974 |
+
def test_list_recursion(self):
|
| 975 |
+
l = list()
|
| 976 |
+
l.append(('f', l))
|
| 977 |
+
with pytest.raises(RecursionError):
|
| 978 |
+
np.dtype(l)
|
| 979 |
+
|
| 980 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
| 981 |
+
def test_tuple_recursion(self):
|
| 982 |
+
d = np.int32
|
| 983 |
+
for i in range(100000):
|
| 984 |
+
d = (d, (1,))
|
| 985 |
+
with pytest.raises(RecursionError):
|
| 986 |
+
np.dtype(d)
|
| 987 |
+
|
| 988 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
| 989 |
+
def test_dict_recursion(self):
|
| 990 |
+
d = dict(names=['self'], formats=[None], offsets=[0])
|
| 991 |
+
d['formats'][0] = d
|
| 992 |
+
with pytest.raises(RecursionError):
|
| 993 |
+
np.dtype(d)
|
| 994 |
+
|
| 995 |
+
|
| 996 |
+
class TestMetadata:
|
| 997 |
+
def test_no_metadata(self):
|
| 998 |
+
d = np.dtype(int)
|
| 999 |
+
assert_(d.metadata is None)
|
| 1000 |
+
|
| 1001 |
+
def test_metadata_takes_dict(self):
|
| 1002 |
+
d = np.dtype(int, metadata={'datum': 1})
|
| 1003 |
+
assert_(d.metadata == {'datum': 1})
|
| 1004 |
+
|
| 1005 |
+
def test_metadata_rejects_nondict(self):
|
| 1006 |
+
assert_raises(TypeError, np.dtype, int, metadata='datum')
|
| 1007 |
+
assert_raises(TypeError, np.dtype, int, metadata=1)
|
| 1008 |
+
assert_raises(TypeError, np.dtype, int, metadata=None)
|
| 1009 |
+
|
| 1010 |
+
def test_nested_metadata(self):
|
| 1011 |
+
d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
|
| 1012 |
+
assert_(d['a'].metadata == {'datum': 1})
|
| 1013 |
+
|
| 1014 |
+
def test_base_metadata_copied(self):
|
| 1015 |
+
d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
|
| 1016 |
+
assert_(d.metadata == {'datum': 1})
|
| 1017 |
+
|
| 1018 |
+
class TestString:
|
| 1019 |
+
def test_complex_dtype_str(self):
|
| 1020 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
| 1021 |
+
('rtile', '>f4', (64, 36))], (3,)),
|
| 1022 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
| 1023 |
+
('bright', '>f4', (8, 36))])])
|
| 1024 |
+
assert_equal(str(dt),
|
| 1025 |
+
"[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
|
| 1026 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
| 1027 |
+
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
|
| 1028 |
+
"('bright', '>f4', (8, 36))])]")
|
| 1029 |
+
|
| 1030 |
+
# If the sticky aligned flag is set to True, it makes the
|
| 1031 |
+
# str() function use a dict representation with an 'aligned' flag
|
| 1032 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
| 1033 |
+
('rtile', '>f4', (64, 36))],
|
| 1034 |
+
(3,)),
|
| 1035 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
| 1036 |
+
('bright', '>f4', (8, 36))])],
|
| 1037 |
+
align=True)
|
| 1038 |
+
assert_equal(str(dt),
|
| 1039 |
+
"{'names': ['top', 'bottom'],"
|
| 1040 |
+
" 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
|
| 1041 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
| 1042 |
+
"[('bleft', ('>f4', (8, 64)), (1,)), "
|
| 1043 |
+
"('bright', '>f4', (8, 36))]],"
|
| 1044 |
+
" 'offsets': [0, 76800],"
|
| 1045 |
+
" 'itemsize': 80000,"
|
| 1046 |
+
" 'aligned': True}")
|
| 1047 |
+
with np.printoptions(legacy='1.21'):
|
| 1048 |
+
assert_equal(str(dt),
|
| 1049 |
+
"{'names':['top','bottom'], "
|
| 1050 |
+
"'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
|
| 1051 |
+
"('rtile', '>f4', (64, 36))], (3,)),"
|
| 1052 |
+
"[('bleft', ('>f4', (8, 64)), (1,)), "
|
| 1053 |
+
"('bright', '>f4', (8, 36))]], "
|
| 1054 |
+
"'offsets':[0,76800], "
|
| 1055 |
+
"'itemsize':80000, "
|
| 1056 |
+
"'aligned':True}")
|
| 1057 |
+
assert_equal(np.dtype(eval(str(dt))), dt)
|
| 1058 |
+
|
| 1059 |
+
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
|
| 1060 |
+
'offsets': [0, 1, 2],
|
| 1061 |
+
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
|
| 1062 |
+
assert_equal(str(dt),
|
| 1063 |
+
"[(('Red pixel', 'r'), 'u1'), "
|
| 1064 |
+
"(('Green pixel', 'g'), 'u1'), "
|
| 1065 |
+
"(('Blue pixel', 'b'), 'u1')]")
|
| 1066 |
+
|
| 1067 |
+
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
|
| 1068 |
+
'formats': ['<u4', 'u1', 'u1', 'u1'],
|
| 1069 |
+
'offsets': [0, 0, 1, 2],
|
| 1070 |
+
'titles': ['Color', 'Red pixel',
|
| 1071 |
+
'Green pixel', 'Blue pixel']})
|
| 1072 |
+
assert_equal(str(dt),
|
| 1073 |
+
"{'names': ['rgba', 'r', 'g', 'b'],"
|
| 1074 |
+
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
|
| 1075 |
+
" 'offsets': [0, 0, 1, 2],"
|
| 1076 |
+
" 'titles': ['Color', 'Red pixel', "
|
| 1077 |
+
"'Green pixel', 'Blue pixel'],"
|
| 1078 |
+
" 'itemsize': 4}")
|
| 1079 |
+
|
| 1080 |
+
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
| 1081 |
+
'offsets': [0, 2],
|
| 1082 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
| 1083 |
+
assert_equal(str(dt),
|
| 1084 |
+
"{'names': ['r', 'b'],"
|
| 1085 |
+
" 'formats': ['u1', 'u1'],"
|
| 1086 |
+
" 'offsets': [0, 2],"
|
| 1087 |
+
" 'titles': ['Red pixel', 'Blue pixel'],"
|
| 1088 |
+
" 'itemsize': 3}")
|
| 1089 |
+
|
| 1090 |
+
dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
|
| 1091 |
+
assert_equal(str(dt),
|
| 1092 |
+
"[('a', '<m8[D]'), ('b', '<M8[us]')]")
|
| 1093 |
+
|
| 1094 |
+
def test_repr_structured(self):
|
| 1095 |
+
dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
|
| 1096 |
+
('rtile', '>f4', (64, 36))], (3,)),
|
| 1097 |
+
('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
|
| 1098 |
+
('bright', '>f4', (8, 36))])])
|
| 1099 |
+
assert_equal(repr(dt),
|
| 1100 |
+
"dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
|
| 1101 |
+
"('rtile', '>f4', (64, 36))], (3,)), "
|
| 1102 |
+
"('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
|
| 1103 |
+
"('bright', '>f4', (8, 36))])])")
|
| 1104 |
+
|
| 1105 |
+
dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
|
| 1106 |
+
'offsets': [0, 1, 2],
|
| 1107 |
+
'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
|
| 1108 |
+
align=True)
|
| 1109 |
+
assert_equal(repr(dt),
|
| 1110 |
+
"dtype([(('Red pixel', 'r'), 'u1'), "
|
| 1111 |
+
"(('Green pixel', 'g'), 'u1'), "
|
| 1112 |
+
"(('Blue pixel', 'b'), 'u1')], align=True)")
|
| 1113 |
+
|
| 1114 |
+
def test_repr_structured_not_packed(self):
|
| 1115 |
+
dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
|
| 1116 |
+
'formats': ['<u4', 'u1', 'u1', 'u1'],
|
| 1117 |
+
'offsets': [0, 0, 1, 2],
|
| 1118 |
+
'titles': ['Color', 'Red pixel',
|
| 1119 |
+
'Green pixel', 'Blue pixel']}, align=True)
|
| 1120 |
+
assert_equal(repr(dt),
|
| 1121 |
+
"dtype({'names': ['rgba', 'r', 'g', 'b'],"
|
| 1122 |
+
" 'formats': ['<u4', 'u1', 'u1', 'u1'],"
|
| 1123 |
+
" 'offsets': [0, 0, 1, 2],"
|
| 1124 |
+
" 'titles': ['Color', 'Red pixel', "
|
| 1125 |
+
"'Green pixel', 'Blue pixel'],"
|
| 1126 |
+
" 'itemsize': 4}, align=True)")
|
| 1127 |
+
|
| 1128 |
+
dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
|
| 1129 |
+
'offsets': [0, 2],
|
| 1130 |
+
'titles': ['Red pixel', 'Blue pixel'],
|
| 1131 |
+
'itemsize': 4})
|
| 1132 |
+
assert_equal(repr(dt),
|
| 1133 |
+
"dtype({'names': ['r', 'b'], "
|
| 1134 |
+
"'formats': ['u1', 'u1'], "
|
| 1135 |
+
"'offsets': [0, 2], "
|
| 1136 |
+
"'titles': ['Red pixel', 'Blue pixel'], "
|
| 1137 |
+
"'itemsize': 4})")
|
| 1138 |
+
|
| 1139 |
+
def test_repr_structured_datetime(self):
|
| 1140 |
+
dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
|
| 1141 |
+
assert_equal(repr(dt),
|
| 1142 |
+
"dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
|
| 1143 |
+
|
| 1144 |
+
def test_repr_str_subarray(self):
|
| 1145 |
+
dt = np.dtype(('<i2', (1,)))
|
| 1146 |
+
assert_equal(repr(dt), "dtype(('<i2', (1,)))")
|
| 1147 |
+
assert_equal(str(dt), "('<i2', (1,))")
|
| 1148 |
+
|
| 1149 |
+
def test_base_dtype_with_object_type(self):
|
| 1150 |
+
# Issue gh-2798, should not error.
|
| 1151 |
+
np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
|
| 1152 |
+
|
| 1153 |
+
def test_empty_string_to_object(self):
|
| 1154 |
+
# Pull request #4722
|
| 1155 |
+
np.array(["", ""]).astype(object)
|
| 1156 |
+
|
| 1157 |
+
def test_void_subclass_unsized(self):
|
| 1158 |
+
dt = np.dtype(np.record)
|
| 1159 |
+
assert_equal(repr(dt), "dtype('V')")
|
| 1160 |
+
assert_equal(str(dt), '|V0')
|
| 1161 |
+
assert_equal(dt.name, 'record')
|
| 1162 |
+
|
| 1163 |
+
def test_void_subclass_sized(self):
|
| 1164 |
+
dt = np.dtype((np.record, 2))
|
| 1165 |
+
assert_equal(repr(dt), "dtype('V2')")
|
| 1166 |
+
assert_equal(str(dt), '|V2')
|
| 1167 |
+
assert_equal(dt.name, 'record16')
|
| 1168 |
+
|
| 1169 |
+
def test_void_subclass_fields(self):
|
| 1170 |
+
dt = np.dtype((np.record, [('a', '<u2')]))
|
| 1171 |
+
assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
|
| 1172 |
+
assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
|
| 1173 |
+
assert_equal(dt.name, 'record16')
|
| 1174 |
+
|
| 1175 |
+
|
| 1176 |
+
class TestDtypeAttributeDeletion:
|
| 1177 |
+
|
| 1178 |
+
def test_dtype_non_writable_attributes_deletion(self):
|
| 1179 |
+
dt = np.dtype(np.double)
|
| 1180 |
+
attr = ["subdtype", "descr", "str", "name", "base", "shape",
|
| 1181 |
+
"isbuiltin", "isnative", "isalignedstruct", "fields",
|
| 1182 |
+
"metadata", "hasobject"]
|
| 1183 |
+
|
| 1184 |
+
for s in attr:
|
| 1185 |
+
assert_raises(AttributeError, delattr, dt, s)
|
| 1186 |
+
|
| 1187 |
+
def test_dtype_writable_attributes_deletion(self):
|
| 1188 |
+
dt = np.dtype(np.double)
|
| 1189 |
+
attr = ["names"]
|
| 1190 |
+
for s in attr:
|
| 1191 |
+
assert_raises(AttributeError, delattr, dt, s)
|
| 1192 |
+
|
| 1193 |
+
|
| 1194 |
+
class TestDtypeAttributes:
|
| 1195 |
+
def test_descr_has_trailing_void(self):
|
| 1196 |
+
# see gh-6359
|
| 1197 |
+
dtype = np.dtype({
|
| 1198 |
+
'names': ['A', 'B'],
|
| 1199 |
+
'formats': ['f4', 'f4'],
|
| 1200 |
+
'offsets': [0, 8],
|
| 1201 |
+
'itemsize': 16})
|
| 1202 |
+
new_dtype = np.dtype(dtype.descr)
|
| 1203 |
+
assert_equal(new_dtype.itemsize, 16)
|
| 1204 |
+
|
| 1205 |
+
def test_name_dtype_subclass(self):
|
| 1206 |
+
# Ticket #4357
|
| 1207 |
+
class user_def_subcls(np.void):
|
| 1208 |
+
pass
|
| 1209 |
+
assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
|
| 1210 |
+
|
| 1211 |
+
def test_zero_stride(self):
|
| 1212 |
+
arr = np.ones(1, dtype="i8")
|
| 1213 |
+
arr = np.broadcast_to(arr, 10)
|
| 1214 |
+
assert arr.strides == (0,)
|
| 1215 |
+
with pytest.raises(ValueError):
|
| 1216 |
+
arr.dtype = "i1"
|
| 1217 |
+
|
| 1218 |
+
class TestDTypeMakeCanonical:
|
| 1219 |
+
def check_canonical(self, dtype, canonical):
|
| 1220 |
+
"""
|
| 1221 |
+
Check most properties relevant to "canonical" versions of a dtype,
|
| 1222 |
+
which is mainly native byte order for datatypes supporting this.
|
| 1223 |
+
|
| 1224 |
+
The main work is checking structured dtypes with fields, where we
|
| 1225 |
+
reproduce most the actual logic used in the C-code.
|
| 1226 |
+
"""
|
| 1227 |
+
assert type(dtype) is type(canonical)
|
| 1228 |
+
|
| 1229 |
+
# a canonical DType should always have equivalent casting (both ways)
|
| 1230 |
+
assert np.can_cast(dtype, canonical, casting="equiv")
|
| 1231 |
+
assert np.can_cast(canonical, dtype, casting="equiv")
|
| 1232 |
+
# a canonical dtype (and its fields) is always native (checks fields):
|
| 1233 |
+
assert canonical.isnative
|
| 1234 |
+
|
| 1235 |
+
# Check that canonical of canonical is the same (no casting):
|
| 1236 |
+
assert np.result_type(canonical) == canonical
|
| 1237 |
+
|
| 1238 |
+
if not dtype.names:
|
| 1239 |
+
# The flags currently never change for unstructured dtypes
|
| 1240 |
+
assert dtype.flags == canonical.flags
|
| 1241 |
+
return
|
| 1242 |
+
|
| 1243 |
+
# Must have all the needs API flag set:
|
| 1244 |
+
assert dtype.flags & 0b10000
|
| 1245 |
+
|
| 1246 |
+
# Check that the fields are identical (including titles):
|
| 1247 |
+
assert dtype.fields.keys() == canonical.fields.keys()
|
| 1248 |
+
|
| 1249 |
+
def aligned_offset(offset, alignment):
|
| 1250 |
+
# round up offset:
|
| 1251 |
+
return - (-offset // alignment) * alignment
|
| 1252 |
+
|
| 1253 |
+
totalsize = 0
|
| 1254 |
+
max_alignment = 1
|
| 1255 |
+
for name in dtype.names:
|
| 1256 |
+
# each field is also canonical:
|
| 1257 |
+
new_field_descr = canonical.fields[name][0]
|
| 1258 |
+
self.check_canonical(dtype.fields[name][0], new_field_descr)
|
| 1259 |
+
|
| 1260 |
+
# Must have the "inherited" object related flags:
|
| 1261 |
+
expected = 0b11011 & new_field_descr.flags
|
| 1262 |
+
assert (canonical.flags & expected) == expected
|
| 1263 |
+
|
| 1264 |
+
if canonical.isalignedstruct:
|
| 1265 |
+
totalsize = aligned_offset(totalsize, new_field_descr.alignment)
|
| 1266 |
+
max_alignment = max(new_field_descr.alignment, max_alignment)
|
| 1267 |
+
|
| 1268 |
+
assert canonical.fields[name][1] == totalsize
|
| 1269 |
+
# if a title exists, they must match (otherwise empty tuple):
|
| 1270 |
+
assert dtype.fields[name][2:] == canonical.fields[name][2:]
|
| 1271 |
+
|
| 1272 |
+
totalsize += new_field_descr.itemsize
|
| 1273 |
+
|
| 1274 |
+
if canonical.isalignedstruct:
|
| 1275 |
+
totalsize = aligned_offset(totalsize, max_alignment)
|
| 1276 |
+
assert canonical.itemsize == totalsize
|
| 1277 |
+
assert canonical.alignment == max_alignment
|
| 1278 |
+
|
| 1279 |
+
def test_simple(self):
|
| 1280 |
+
dt = np.dtype(">i4")
|
| 1281 |
+
assert np.result_type(dt).isnative
|
| 1282 |
+
assert np.result_type(dt).num == dt.num
|
| 1283 |
+
|
| 1284 |
+
# dtype with empty space:
|
| 1285 |
+
struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
|
| 1286 |
+
canonical = np.result_type(struct_dt)
|
| 1287 |
+
assert canonical.itemsize == 4+8
|
| 1288 |
+
assert canonical.isnative
|
| 1289 |
+
|
| 1290 |
+
# aligned struct dtype with empty space:
|
| 1291 |
+
struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
|
| 1292 |
+
canonical = np.result_type(struct_dt)
|
| 1293 |
+
assert canonical.isalignedstruct
|
| 1294 |
+
assert canonical.itemsize == np.dtype("i8").alignment + 8
|
| 1295 |
+
assert canonical.isnative
|
| 1296 |
+
|
| 1297 |
+
def test_object_flag_not_inherited(self):
|
| 1298 |
+
# The following dtype still indicates "object", because its included
|
| 1299 |
+
# in the unaccessible space (maybe this could change at some point):
|
| 1300 |
+
arr = np.ones(3, "i,O,i")[["f0", "f2"]]
|
| 1301 |
+
assert arr.dtype.hasobject
|
| 1302 |
+
canonical_dt = np.result_type(arr.dtype)
|
| 1303 |
+
assert not canonical_dt.hasobject
|
| 1304 |
+
|
| 1305 |
+
@pytest.mark.slow
|
| 1306 |
+
@hypothesis.given(dtype=hynp.nested_dtypes())
|
| 1307 |
+
def test_make_canonical_hypothesis(self, dtype):
|
| 1308 |
+
canonical = np.result_type(dtype)
|
| 1309 |
+
self.check_canonical(dtype, canonical)
|
| 1310 |
+
# result_type with two arguments should always give identical results:
|
| 1311 |
+
two_arg_result = np.result_type(dtype, dtype)
|
| 1312 |
+
assert np.can_cast(two_arg_result, canonical, casting="no")
|
| 1313 |
+
|
| 1314 |
+
@pytest.mark.slow
|
| 1315 |
+
@hypothesis.given(
|
| 1316 |
+
dtype=hypothesis.extra.numpy.array_dtypes(
|
| 1317 |
+
subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
|
| 1318 |
+
min_size=5, max_size=10, allow_subarrays=True))
|
| 1319 |
+
def test_structured(self, dtype):
|
| 1320 |
+
# Pick 4 of the fields at random. This will leave empty space in the
|
| 1321 |
+
# dtype (since we do not canonicalize it here).
|
| 1322 |
+
field_subset = random.sample(dtype.names, k=4)
|
| 1323 |
+
dtype_with_empty_space = dtype[field_subset]
|
| 1324 |
+
assert dtype_with_empty_space.itemsize == dtype.itemsize
|
| 1325 |
+
canonicalized = np.result_type(dtype_with_empty_space)
|
| 1326 |
+
self.check_canonical(dtype_with_empty_space, canonicalized)
|
| 1327 |
+
# promotion with two arguments should always give identical results:
|
| 1328 |
+
two_arg_result = np.promote_types(
|
| 1329 |
+
dtype_with_empty_space, dtype_with_empty_space)
|
| 1330 |
+
assert np.can_cast(two_arg_result, canonicalized, casting="no")
|
| 1331 |
+
|
| 1332 |
+
# Ensure that we also check aligned struct (check the opposite, in
|
| 1333 |
+
# case hypothesis grows support for `align`. Then repeat the test:
|
| 1334 |
+
dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
|
| 1335 |
+
dtype_with_empty_space = dtype_aligned[field_subset]
|
| 1336 |
+
assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
|
| 1337 |
+
canonicalized = np.result_type(dtype_with_empty_space)
|
| 1338 |
+
self.check_canonical(dtype_with_empty_space, canonicalized)
|
| 1339 |
+
# promotion with two arguments should always give identical results:
|
| 1340 |
+
two_arg_result = np.promote_types(
|
| 1341 |
+
dtype_with_empty_space, dtype_with_empty_space)
|
| 1342 |
+
assert np.can_cast(two_arg_result, canonicalized, casting="no")
|
| 1343 |
+
|
| 1344 |
+
|
| 1345 |
+
class TestPickling:
|
| 1346 |
+
|
| 1347 |
+
def check_pickling(self, dtype):
|
| 1348 |
+
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
| 1349 |
+
buf = pickle.dumps(dtype, proto)
|
| 1350 |
+
# The dtype pickling itself pickles `np.dtype` if it is pickled
|
| 1351 |
+
# as a singleton `dtype` should be stored in the buffer:
|
| 1352 |
+
assert b"_DType_reconstruct" not in buf
|
| 1353 |
+
assert b"dtype" in buf
|
| 1354 |
+
pickled = pickle.loads(buf)
|
| 1355 |
+
assert_equal(pickled, dtype)
|
| 1356 |
+
assert_equal(pickled.descr, dtype.descr)
|
| 1357 |
+
if dtype.metadata is not None:
|
| 1358 |
+
assert_equal(pickled.metadata, dtype.metadata)
|
| 1359 |
+
# Check the reconstructed dtype is functional
|
| 1360 |
+
x = np.zeros(3, dtype=dtype)
|
| 1361 |
+
y = np.zeros(3, dtype=pickled)
|
| 1362 |
+
assert_equal(x, y)
|
| 1363 |
+
assert_equal(x[0], y[0])
|
| 1364 |
+
|
| 1365 |
+
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
|
| 1366 |
+
bool])
|
| 1367 |
+
def test_builtin(self, t):
|
| 1368 |
+
self.check_pickling(np.dtype(t))
|
| 1369 |
+
|
| 1370 |
+
def test_structured(self):
|
| 1371 |
+
dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
|
| 1372 |
+
self.check_pickling(dt)
|
| 1373 |
+
|
| 1374 |
+
def test_structured_aligned(self):
|
| 1375 |
+
dt = np.dtype('i4, i1', align=True)
|
| 1376 |
+
self.check_pickling(dt)
|
| 1377 |
+
|
| 1378 |
+
def test_structured_unaligned(self):
|
| 1379 |
+
dt = np.dtype('i4, i1', align=False)
|
| 1380 |
+
self.check_pickling(dt)
|
| 1381 |
+
|
| 1382 |
+
def test_structured_padded(self):
|
| 1383 |
+
dt = np.dtype({
|
| 1384 |
+
'names': ['A', 'B'],
|
| 1385 |
+
'formats': ['f4', 'f4'],
|
| 1386 |
+
'offsets': [0, 8],
|
| 1387 |
+
'itemsize': 16})
|
| 1388 |
+
self.check_pickling(dt)
|
| 1389 |
+
|
| 1390 |
+
def test_structured_titles(self):
|
| 1391 |
+
dt = np.dtype({'names': ['r', 'b'],
|
| 1392 |
+
'formats': ['u1', 'u1'],
|
| 1393 |
+
'titles': ['Red pixel', 'Blue pixel']})
|
| 1394 |
+
self.check_pickling(dt)
|
| 1395 |
+
|
| 1396 |
+
@pytest.mark.parametrize('base', ['m8', 'M8'])
|
| 1397 |
+
@pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
|
| 1398 |
+
'ms', 'us', 'ns', 'ps', 'fs', 'as'])
|
| 1399 |
+
def test_datetime(self, base, unit):
|
| 1400 |
+
dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
|
| 1401 |
+
self.check_pickling(dt)
|
| 1402 |
+
if unit:
|
| 1403 |
+
dt = np.dtype('%s[7%s]' % (base, unit))
|
| 1404 |
+
self.check_pickling(dt)
|
| 1405 |
+
|
| 1406 |
+
def test_metadata(self):
|
| 1407 |
+
dt = np.dtype(int, metadata={'datum': 1})
|
| 1408 |
+
self.check_pickling(dt)
|
| 1409 |
+
|
| 1410 |
+
@pytest.mark.parametrize("DType",
|
| 1411 |
+
[type(np.dtype(t)) for t in np.typecodes['All']] +
|
| 1412 |
+
[type(np.dtype(rational)), np.dtype])
|
| 1413 |
+
def test_pickle_dtype_class(self, DType):
|
| 1414 |
+
# Check that DTypes (the classes/types) roundtrip when pickling
|
| 1415 |
+
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
| 1416 |
+
roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
|
| 1417 |
+
assert roundtrip_DType is DType
|
| 1418 |
+
|
| 1419 |
+
@pytest.mark.parametrize("dt",
|
| 1420 |
+
[np.dtype(t) for t in np.typecodes['All']] +
|
| 1421 |
+
[np.dtype(rational)])
|
| 1422 |
+
def test_pickle_dtype(self, dt):
|
| 1423 |
+
# Check that dtype instances roundtrip when pickling and that pickling
|
| 1424 |
+
# doesn't change the hash value
|
| 1425 |
+
pre_pickle_hash = hash(dt)
|
| 1426 |
+
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
|
| 1427 |
+
roundtrip_dt = pickle.loads(pickle.dumps(dt, proto))
|
| 1428 |
+
assert roundtrip_dt == dt
|
| 1429 |
+
assert hash(dt) == pre_pickle_hash
|
| 1430 |
+
|
| 1431 |
+
|
| 1432 |
+
class TestPromotion:
|
| 1433 |
+
"""Test cases related to more complex DType promotions. Further promotion
|
| 1434 |
+
tests are defined in `test_numeric.py`
|
| 1435 |
+
"""
|
| 1436 |
+
@pytest.mark.parametrize(["other", "expected"],
|
| 1437 |
+
[(2**16-1, np.complex64),
|
| 1438 |
+
(2**32-1, np.complex64),
|
| 1439 |
+
(np.float16(2), np.complex64),
|
| 1440 |
+
(np.float32(2), np.complex64),
|
| 1441 |
+
(np.longdouble(2), np.clongdouble),
|
| 1442 |
+
# Base of the double value to sidestep any rounding issues:
|
| 1443 |
+
(np.longdouble(np.nextafter(1.7e308, 0.)), np.clongdouble),
|
| 1444 |
+
# Additionally use "nextafter" so the cast can't round down:
|
| 1445 |
+
(np.longdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
|
| 1446 |
+
# repeat for complex scalars:
|
| 1447 |
+
(np.complex64(2), np.complex64),
|
| 1448 |
+
(np.clongdouble(2), np.clongdouble),
|
| 1449 |
+
# Base of the double value to sidestep any rounding issues:
|
| 1450 |
+
(np.clongdouble(np.nextafter(1.7e308, 0.) * 1j), np.clongdouble),
|
| 1451 |
+
# Additionally use "nextafter" so the cast can't round down:
|
| 1452 |
+
(np.clongdouble(np.nextafter(1.7e308, np.inf)), np.clongdouble),
|
| 1453 |
+
])
|
| 1454 |
+
def test_complex_other_value_based(self, other, expected):
|
| 1455 |
+
# This would change if we modify the value based promotion
|
| 1456 |
+
min_complex = np.dtype(np.complex64)
|
| 1457 |
+
|
| 1458 |
+
res = np.result_type(other, min_complex)
|
| 1459 |
+
assert res == expected
|
| 1460 |
+
# Check the same for a simple ufunc call that uses the same logic:
|
| 1461 |
+
res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
|
| 1462 |
+
assert res == expected
|
| 1463 |
+
|
| 1464 |
+
@pytest.mark.parametrize(["other", "expected"],
|
| 1465 |
+
[(np.bool, np.complex128),
|
| 1466 |
+
(np.int64, np.complex128),
|
| 1467 |
+
(np.float16, np.complex64),
|
| 1468 |
+
(np.float32, np.complex64),
|
| 1469 |
+
(np.float64, np.complex128),
|
| 1470 |
+
(np.longdouble, np.clongdouble),
|
| 1471 |
+
(np.complex64, np.complex64),
|
| 1472 |
+
(np.complex128, np.complex128),
|
| 1473 |
+
(np.clongdouble, np.clongdouble),
|
| 1474 |
+
])
|
| 1475 |
+
def test_complex_scalar_value_based(self, other, expected):
|
| 1476 |
+
# This would change if we modify the value based promotion
|
| 1477 |
+
complex_scalar = 1j
|
| 1478 |
+
|
| 1479 |
+
res = np.result_type(other, complex_scalar)
|
| 1480 |
+
assert res == expected
|
| 1481 |
+
# Check the same for a simple ufunc call that uses the same logic:
|
| 1482 |
+
res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
|
| 1483 |
+
assert res == expected
|
| 1484 |
+
|
| 1485 |
+
def test_complex_pyscalar_promote_rational(self):
|
| 1486 |
+
with pytest.raises(TypeError,
|
| 1487 |
+
match=r".* no common DType exists for the given inputs"):
|
| 1488 |
+
np.result_type(1j, rational)
|
| 1489 |
+
|
| 1490 |
+
with pytest.raises(TypeError,
|
| 1491 |
+
match=r".* no common DType exists for the given inputs"):
|
| 1492 |
+
np.result_type(1j, rational(1, 2))
|
| 1493 |
+
|
| 1494 |
+
@pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
|
| 1495 |
+
def test_python_integer_promotion(self, val):
|
| 1496 |
+
# If we only pass scalars (mainly python ones!), NEP 50 means
|
| 1497 |
+
# that we get the default integer
|
| 1498 |
+
expected_dtype = np.dtype(int) # the default integer
|
| 1499 |
+
assert np.result_type(val, 0) == expected_dtype
|
| 1500 |
+
# With NEP 50, the NumPy scalar wins though:
|
| 1501 |
+
assert np.result_type(val, np.int8(0)) == np.int8
|
| 1502 |
+
|
| 1503 |
+
@pytest.mark.parametrize(["other", "expected"],
|
| 1504 |
+
[(1, rational), (1., np.float64)])
|
| 1505 |
+
def test_float_int_pyscalar_promote_rational(self, other, expected):
|
| 1506 |
+
# Note that rationals are a bit awkward as they promote with float64
|
| 1507 |
+
# or default ints, but not float16 or uint8/int8 (which looks
|
| 1508 |
+
# inconsistent here). The new promotion fixed this (partially?)
|
| 1509 |
+
assert np.result_type(other, rational) == expected
|
| 1510 |
+
assert np.result_type(other, rational(1, 2)) == expected
|
| 1511 |
+
|
| 1512 |
+
@pytest.mark.parametrize(["dtypes", "expected"], [
|
| 1513 |
+
# These promotions are not associative/commutative:
|
| 1514 |
+
([np.uint16, np.int16, np.float16], np.float32),
|
| 1515 |
+
([np.uint16, np.int8, np.float16], np.float32),
|
| 1516 |
+
([np.uint8, np.int16, np.float16], np.float32),
|
| 1517 |
+
# The following promotions are not ambiguous, but cover code
|
| 1518 |
+
# paths of abstract promotion (no particular logic being tested)
|
| 1519 |
+
([1, 1, np.float64], np.float64),
|
| 1520 |
+
([1, 1., np.complex128], np.complex128),
|
| 1521 |
+
([1, 1j, np.float64], np.complex128),
|
| 1522 |
+
([1., 1., np.int64], np.float64),
|
| 1523 |
+
([1., 1j, np.float64], np.complex128),
|
| 1524 |
+
([1j, 1j, np.float64], np.complex128),
|
| 1525 |
+
([1, True, np.bool], np.int_),
|
| 1526 |
+
])
|
| 1527 |
+
def test_permutations_do_not_influence_result(self, dtypes, expected):
|
| 1528 |
+
# Tests that most permutations do not influence the result. In the
|
| 1529 |
+
# above some uint and int combinations promote to a larger integer
|
| 1530 |
+
# type, which would then promote to a larger than necessary float.
|
| 1531 |
+
for perm in permutations(dtypes):
|
| 1532 |
+
assert np.result_type(*perm) == expected
|
| 1533 |
+
|
| 1534 |
+
|
| 1535 |
+
def test_rational_dtype():
|
| 1536 |
+
# test for bug gh-5719
|
| 1537 |
+
a = np.array([1111], dtype=rational).astype
|
| 1538 |
+
assert_raises(OverflowError, a, 'int8')
|
| 1539 |
+
|
| 1540 |
+
# test that dtype detection finds user-defined types
|
| 1541 |
+
x = rational(1)
|
| 1542 |
+
assert_equal(np.array([x,x]).dtype, np.dtype(rational))
|
| 1543 |
+
|
| 1544 |
+
|
| 1545 |
+
def test_dtypes_are_true():
|
| 1546 |
+
# test for gh-6294
|
| 1547 |
+
assert bool(np.dtype('f8'))
|
| 1548 |
+
assert bool(np.dtype('i8'))
|
| 1549 |
+
assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
|
| 1550 |
+
|
| 1551 |
+
|
| 1552 |
+
def test_invalid_dtype_string():
|
| 1553 |
+
# test for gh-10440
|
| 1554 |
+
assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
|
| 1555 |
+
assert_raises(TypeError, np.dtype, 'Fl\xfcgel')
|
| 1556 |
+
|
| 1557 |
+
|
| 1558 |
+
def test_keyword_argument():
|
| 1559 |
+
# test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
|
| 1560 |
+
assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
|
| 1561 |
+
|
| 1562 |
+
|
| 1563 |
+
class TestFromDTypeAttribute:
|
| 1564 |
+
def test_simple(self):
|
| 1565 |
+
class dt:
|
| 1566 |
+
dtype = np.dtype("f8")
|
| 1567 |
+
|
| 1568 |
+
assert np.dtype(dt) == np.float64
|
| 1569 |
+
assert np.dtype(dt()) == np.float64
|
| 1570 |
+
|
| 1571 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
| 1572 |
+
def test_recursion(self):
|
| 1573 |
+
class dt:
|
| 1574 |
+
pass
|
| 1575 |
+
|
| 1576 |
+
dt.dtype = dt
|
| 1577 |
+
with pytest.raises(RecursionError):
|
| 1578 |
+
np.dtype(dt)
|
| 1579 |
+
|
| 1580 |
+
dt_instance = dt()
|
| 1581 |
+
dt_instance.dtype = dt
|
| 1582 |
+
with pytest.raises(RecursionError):
|
| 1583 |
+
np.dtype(dt_instance)
|
| 1584 |
+
|
| 1585 |
+
def test_void_subtype(self):
|
| 1586 |
+
class dt(np.void):
|
| 1587 |
+
# This code path is fully untested before, so it is unclear
|
| 1588 |
+
# what this should be useful for. Note that if np.void is used
|
| 1589 |
+
# numpy will think we are deallocating a base type [1.17, 2019-02].
|
| 1590 |
+
dtype = np.dtype("f,f")
|
| 1591 |
+
|
| 1592 |
+
np.dtype(dt)
|
| 1593 |
+
np.dtype(dt(1))
|
| 1594 |
+
|
| 1595 |
+
@pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
|
| 1596 |
+
def test_void_subtype_recursion(self):
|
| 1597 |
+
class vdt(np.void):
|
| 1598 |
+
pass
|
| 1599 |
+
|
| 1600 |
+
vdt.dtype = vdt
|
| 1601 |
+
|
| 1602 |
+
with pytest.raises(RecursionError):
|
| 1603 |
+
np.dtype(vdt)
|
| 1604 |
+
|
| 1605 |
+
with pytest.raises(RecursionError):
|
| 1606 |
+
np.dtype(vdt(1))
|
| 1607 |
+
|
| 1608 |
+
|
| 1609 |
+
class TestDTypeClasses:
|
| 1610 |
+
@pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
|
| 1611 |
+
def test_basic_dtypes_subclass_properties(self, dtype):
|
| 1612 |
+
# Note: Except for the isinstance and type checks, these attributes
|
| 1613 |
+
# are considered currently private and may change.
|
| 1614 |
+
dtype = np.dtype(dtype)
|
| 1615 |
+
assert isinstance(dtype, np.dtype)
|
| 1616 |
+
assert type(dtype) is not np.dtype
|
| 1617 |
+
if dtype.type.__name__ != "rational":
|
| 1618 |
+
dt_name = type(dtype).__name__.lower().removesuffix("dtype")
|
| 1619 |
+
if dt_name == "uint" or dt_name == "int":
|
| 1620 |
+
# The scalar names has a `c` attached because "int" is Python
|
| 1621 |
+
# int and that is long...
|
| 1622 |
+
dt_name += "c"
|
| 1623 |
+
sc_name = dtype.type.__name__
|
| 1624 |
+
assert dt_name == sc_name.strip("_")
|
| 1625 |
+
assert type(dtype).__module__ == "numpy.dtypes"
|
| 1626 |
+
|
| 1627 |
+
assert getattr(numpy.dtypes, type(dtype).__name__) is type(dtype)
|
| 1628 |
+
else:
|
| 1629 |
+
assert type(dtype).__name__ == "dtype[rational]"
|
| 1630 |
+
assert type(dtype).__module__ == "numpy"
|
| 1631 |
+
|
| 1632 |
+
assert not type(dtype)._abstract
|
| 1633 |
+
|
| 1634 |
+
# the flexible dtypes and datetime/timedelta have additional parameters
|
| 1635 |
+
# which are more than just storage information, these would need to be
|
| 1636 |
+
# given when creating a dtype:
|
| 1637 |
+
parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
|
| 1638 |
+
if dtype.type not in parametric:
|
| 1639 |
+
assert not type(dtype)._parametric
|
| 1640 |
+
assert type(dtype)() is dtype
|
| 1641 |
+
else:
|
| 1642 |
+
assert type(dtype)._parametric
|
| 1643 |
+
with assert_raises(TypeError):
|
| 1644 |
+
type(dtype)()
|
| 1645 |
+
|
| 1646 |
+
def test_dtype_superclass(self):
|
| 1647 |
+
assert type(np.dtype) is not type
|
| 1648 |
+
assert isinstance(np.dtype, type)
|
| 1649 |
+
|
| 1650 |
+
assert type(np.dtype).__name__ == "_DTypeMeta"
|
| 1651 |
+
assert type(np.dtype).__module__ == "numpy"
|
| 1652 |
+
assert np.dtype._abstract
|
| 1653 |
+
|
| 1654 |
+
def test_is_numeric(self):
|
| 1655 |
+
all_codes = set(np.typecodes['All'])
|
| 1656 |
+
numeric_codes = set(np.typecodes['AllInteger'] +
|
| 1657 |
+
np.typecodes['AllFloat'] + '?')
|
| 1658 |
+
non_numeric_codes = all_codes - numeric_codes
|
| 1659 |
+
|
| 1660 |
+
for code in numeric_codes:
|
| 1661 |
+
assert type(np.dtype(code))._is_numeric
|
| 1662 |
+
|
| 1663 |
+
for code in non_numeric_codes:
|
| 1664 |
+
assert not type(np.dtype(code))._is_numeric
|
| 1665 |
+
|
| 1666 |
+
@pytest.mark.parametrize("int_", ["UInt", "Int"])
|
| 1667 |
+
@pytest.mark.parametrize("size", [8, 16, 32, 64])
|
| 1668 |
+
def test_integer_alias_names(self, int_, size):
|
| 1669 |
+
DType = getattr(numpy.dtypes, f"{int_}{size}DType")
|
| 1670 |
+
sctype = getattr(numpy, f"{int_.lower()}{size}")
|
| 1671 |
+
assert DType.type is sctype
|
| 1672 |
+
assert DType.__name__.lower().removesuffix("dtype") == sctype.__name__
|
| 1673 |
+
|
| 1674 |
+
@pytest.mark.parametrize("name",
|
| 1675 |
+
["Half", "Float", "Double", "CFloat", "CDouble"])
|
| 1676 |
+
def test_float_alias_names(self, name):
|
| 1677 |
+
with pytest.raises(AttributeError):
|
| 1678 |
+
getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType
|
| 1679 |
+
|
| 1680 |
+
|
| 1681 |
+
class TestFromCTypes:
|
| 1682 |
+
|
| 1683 |
+
@staticmethod
|
| 1684 |
+
def check(ctype, dtype):
|
| 1685 |
+
dtype = np.dtype(dtype)
|
| 1686 |
+
assert np.dtype(ctype) == dtype
|
| 1687 |
+
assert np.dtype(ctype()) == dtype
|
| 1688 |
+
assert ctypes.sizeof(ctype) == dtype.itemsize
|
| 1689 |
+
|
| 1690 |
+
def test_array(self):
|
| 1691 |
+
c8 = ctypes.c_uint8
|
| 1692 |
+
self.check( 3 * c8, (np.uint8, (3,)))
|
| 1693 |
+
self.check( 1 * c8, (np.uint8, (1,)))
|
| 1694 |
+
self.check( 0 * c8, (np.uint8, (0,)))
|
| 1695 |
+
self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
|
| 1696 |
+
self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
|
| 1697 |
+
|
| 1698 |
+
def test_padded_structure(self):
|
| 1699 |
+
class PaddedStruct(ctypes.Structure):
|
| 1700 |
+
_fields_ = [
|
| 1701 |
+
('a', ctypes.c_uint8),
|
| 1702 |
+
('b', ctypes.c_uint16)
|
| 1703 |
+
]
|
| 1704 |
+
expected = np.dtype([
|
| 1705 |
+
('a', np.uint8),
|
| 1706 |
+
('b', np.uint16)
|
| 1707 |
+
], align=True)
|
| 1708 |
+
self.check(PaddedStruct, expected)
|
| 1709 |
+
|
| 1710 |
+
def test_bit_fields(self):
|
| 1711 |
+
class BitfieldStruct(ctypes.Structure):
|
| 1712 |
+
_fields_ = [
|
| 1713 |
+
('a', ctypes.c_uint8, 7),
|
| 1714 |
+
('b', ctypes.c_uint8, 1)
|
| 1715 |
+
]
|
| 1716 |
+
assert_raises(TypeError, np.dtype, BitfieldStruct)
|
| 1717 |
+
assert_raises(TypeError, np.dtype, BitfieldStruct())
|
| 1718 |
+
|
| 1719 |
+
def test_pointer(self):
|
| 1720 |
+
p_uint8 = ctypes.POINTER(ctypes.c_uint8)
|
| 1721 |
+
assert_raises(TypeError, np.dtype, p_uint8)
|
| 1722 |
+
|
| 1723 |
+
def test_size_t(self):
|
| 1724 |
+
assert np.dtype(np.uintp) is np.dtype("N")
|
| 1725 |
+
self.check(ctypes.c_size_t, np.uintp)
|
| 1726 |
+
|
| 1727 |
+
def test_void_pointer(self):
|
| 1728 |
+
self.check(ctypes.c_void_p, "P")
|
| 1729 |
+
|
| 1730 |
+
def test_union(self):
|
| 1731 |
+
class Union(ctypes.Union):
|
| 1732 |
+
_fields_ = [
|
| 1733 |
+
('a', ctypes.c_uint8),
|
| 1734 |
+
('b', ctypes.c_uint16),
|
| 1735 |
+
]
|
| 1736 |
+
expected = np.dtype(dict(
|
| 1737 |
+
names=['a', 'b'],
|
| 1738 |
+
formats=[np.uint8, np.uint16],
|
| 1739 |
+
offsets=[0, 0],
|
| 1740 |
+
itemsize=2
|
| 1741 |
+
))
|
| 1742 |
+
self.check(Union, expected)
|
| 1743 |
+
|
| 1744 |
+
def test_union_with_struct_packed(self):
|
| 1745 |
+
class Struct(ctypes.Structure):
|
| 1746 |
+
_pack_ = 1
|
| 1747 |
+
_fields_ = [
|
| 1748 |
+
('one', ctypes.c_uint8),
|
| 1749 |
+
('two', ctypes.c_uint32)
|
| 1750 |
+
]
|
| 1751 |
+
|
| 1752 |
+
class Union(ctypes.Union):
|
| 1753 |
+
_fields_ = [
|
| 1754 |
+
('a', ctypes.c_uint8),
|
| 1755 |
+
('b', ctypes.c_uint16),
|
| 1756 |
+
('c', ctypes.c_uint32),
|
| 1757 |
+
('d', Struct),
|
| 1758 |
+
]
|
| 1759 |
+
expected = np.dtype(dict(
|
| 1760 |
+
names=['a', 'b', 'c', 'd'],
|
| 1761 |
+
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
|
| 1762 |
+
offsets=[0, 0, 0, 0],
|
| 1763 |
+
itemsize=ctypes.sizeof(Union)
|
| 1764 |
+
))
|
| 1765 |
+
self.check(Union, expected)
|
| 1766 |
+
|
| 1767 |
+
def test_union_packed(self):
|
| 1768 |
+
class Struct(ctypes.Structure):
|
| 1769 |
+
_fields_ = [
|
| 1770 |
+
('one', ctypes.c_uint8),
|
| 1771 |
+
('two', ctypes.c_uint32)
|
| 1772 |
+
]
|
| 1773 |
+
_pack_ = 1
|
| 1774 |
+
class Union(ctypes.Union):
|
| 1775 |
+
_pack_ = 1
|
| 1776 |
+
_fields_ = [
|
| 1777 |
+
('a', ctypes.c_uint8),
|
| 1778 |
+
('b', ctypes.c_uint16),
|
| 1779 |
+
('c', ctypes.c_uint32),
|
| 1780 |
+
('d', Struct),
|
| 1781 |
+
]
|
| 1782 |
+
expected = np.dtype(dict(
|
| 1783 |
+
names=['a', 'b', 'c', 'd'],
|
| 1784 |
+
formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
|
| 1785 |
+
offsets=[0, 0, 0, 0],
|
| 1786 |
+
itemsize=ctypes.sizeof(Union)
|
| 1787 |
+
))
|
| 1788 |
+
self.check(Union, expected)
|
| 1789 |
+
|
| 1790 |
+
def test_packed_structure(self):
|
| 1791 |
+
class PackedStructure(ctypes.Structure):
|
| 1792 |
+
_pack_ = 1
|
| 1793 |
+
_fields_ = [
|
| 1794 |
+
('a', ctypes.c_uint8),
|
| 1795 |
+
('b', ctypes.c_uint16)
|
| 1796 |
+
]
|
| 1797 |
+
expected = np.dtype([
|
| 1798 |
+
('a', np.uint8),
|
| 1799 |
+
('b', np.uint16)
|
| 1800 |
+
])
|
| 1801 |
+
self.check(PackedStructure, expected)
|
| 1802 |
+
|
| 1803 |
+
def test_large_packed_structure(self):
|
| 1804 |
+
class PackedStructure(ctypes.Structure):
|
| 1805 |
+
_pack_ = 2
|
| 1806 |
+
_fields_ = [
|
| 1807 |
+
('a', ctypes.c_uint8),
|
| 1808 |
+
('b', ctypes.c_uint16),
|
| 1809 |
+
('c', ctypes.c_uint8),
|
| 1810 |
+
('d', ctypes.c_uint16),
|
| 1811 |
+
('e', ctypes.c_uint32),
|
| 1812 |
+
('f', ctypes.c_uint32),
|
| 1813 |
+
('g', ctypes.c_uint8)
|
| 1814 |
+
]
|
| 1815 |
+
expected = np.dtype(dict(
|
| 1816 |
+
formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
|
| 1817 |
+
offsets=[0, 2, 4, 6, 8, 12, 16],
|
| 1818 |
+
names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
|
| 1819 |
+
itemsize=18))
|
| 1820 |
+
self.check(PackedStructure, expected)
|
| 1821 |
+
|
| 1822 |
+
def test_big_endian_structure_packed(self):
|
| 1823 |
+
class BigEndStruct(ctypes.BigEndianStructure):
|
| 1824 |
+
_fields_ = [
|
| 1825 |
+
('one', ctypes.c_uint8),
|
| 1826 |
+
('two', ctypes.c_uint32)
|
| 1827 |
+
]
|
| 1828 |
+
_pack_ = 1
|
| 1829 |
+
expected = np.dtype([('one', 'u1'), ('two', '>u4')])
|
| 1830 |
+
self.check(BigEndStruct, expected)
|
| 1831 |
+
|
| 1832 |
+
def test_little_endian_structure_packed(self):
|
| 1833 |
+
class LittleEndStruct(ctypes.LittleEndianStructure):
|
| 1834 |
+
_fields_ = [
|
| 1835 |
+
('one', ctypes.c_uint8),
|
| 1836 |
+
('two', ctypes.c_uint32)
|
| 1837 |
+
]
|
| 1838 |
+
_pack_ = 1
|
| 1839 |
+
expected = np.dtype([('one', 'u1'), ('two', '<u4')])
|
| 1840 |
+
self.check(LittleEndStruct, expected)
|
| 1841 |
+
|
| 1842 |
+
def test_little_endian_structure(self):
|
| 1843 |
+
class PaddedStruct(ctypes.LittleEndianStructure):
|
| 1844 |
+
_fields_ = [
|
| 1845 |
+
('a', ctypes.c_uint8),
|
| 1846 |
+
('b', ctypes.c_uint16)
|
| 1847 |
+
]
|
| 1848 |
+
expected = np.dtype([
|
| 1849 |
+
('a', '<B'),
|
| 1850 |
+
('b', '<H')
|
| 1851 |
+
], align=True)
|
| 1852 |
+
self.check(PaddedStruct, expected)
|
| 1853 |
+
|
| 1854 |
+
def test_big_endian_structure(self):
|
| 1855 |
+
class PaddedStruct(ctypes.BigEndianStructure):
|
| 1856 |
+
_fields_ = [
|
| 1857 |
+
('a', ctypes.c_uint8),
|
| 1858 |
+
('b', ctypes.c_uint16)
|
| 1859 |
+
]
|
| 1860 |
+
expected = np.dtype([
|
| 1861 |
+
('a', '>B'),
|
| 1862 |
+
('b', '>H')
|
| 1863 |
+
], align=True)
|
| 1864 |
+
self.check(PaddedStruct, expected)
|
| 1865 |
+
|
| 1866 |
+
def test_simple_endian_types(self):
|
| 1867 |
+
self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
|
| 1868 |
+
self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
|
| 1869 |
+
self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
|
| 1870 |
+
self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
|
| 1871 |
+
|
| 1872 |
+
all_types = set(np.typecodes['All'])
|
| 1873 |
+
all_pairs = permutations(all_types, 2)
|
| 1874 |
+
|
| 1875 |
+
@pytest.mark.parametrize("pair", all_pairs)
|
| 1876 |
+
def test_pairs(self, pair):
|
| 1877 |
+
"""
|
| 1878 |
+
Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
|
| 1879 |
+
Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
|
| 1880 |
+
"""
|
| 1881 |
+
# gh-5645: check that np.dtype('i,L') can be used
|
| 1882 |
+
pair_type = np.dtype('{},{}'.format(*pair))
|
| 1883 |
+
expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
|
| 1884 |
+
assert_equal(pair_type, expected)
|
| 1885 |
+
|
| 1886 |
+
|
| 1887 |
+
class TestUserDType:
|
| 1888 |
+
@pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
|
| 1889 |
+
def test_custom_structured_dtype(self):
|
| 1890 |
+
class mytype:
|
| 1891 |
+
pass
|
| 1892 |
+
|
| 1893 |
+
blueprint = np.dtype([("field", object)])
|
| 1894 |
+
dt = create_custom_field_dtype(blueprint, mytype, 0)
|
| 1895 |
+
assert dt.type == mytype
|
| 1896 |
+
# We cannot (currently) *create* this dtype with `np.dtype` because
|
| 1897 |
+
# mytype does not inherit from `np.generic`. This seems like an
|
| 1898 |
+
# unnecessary restriction, but one that has been around forever:
|
| 1899 |
+
assert np.dtype(mytype) == np.dtype("O")
|
| 1900 |
+
|
| 1901 |
+
if HAS_REFCOUNT:
|
| 1902 |
+
# Create an array and test that memory gets cleaned up (gh-25949)
|
| 1903 |
+
o = object()
|
| 1904 |
+
a = np.array([o], dtype=dt)
|
| 1905 |
+
del a
|
| 1906 |
+
assert sys.getrefcount(o) == 2
|
| 1907 |
+
|
| 1908 |
+
def test_custom_structured_dtype_errors(self):
|
| 1909 |
+
class mytype:
|
| 1910 |
+
pass
|
| 1911 |
+
|
| 1912 |
+
blueprint = np.dtype([("field", object)])
|
| 1913 |
+
|
| 1914 |
+
with pytest.raises(ValueError):
|
| 1915 |
+
# Tests what happens if fields are unset during creation
|
| 1916 |
+
# which is currently rejected due to the containing object
|
| 1917 |
+
# (see PyArray_RegisterDataType).
|
| 1918 |
+
create_custom_field_dtype(blueprint, mytype, 1)
|
| 1919 |
+
|
| 1920 |
+
with pytest.raises(RuntimeError):
|
| 1921 |
+
# Tests that a dtype must have its type field set up to np.dtype
|
| 1922 |
+
# or in this case a builtin instance.
|
| 1923 |
+
create_custom_field_dtype(blueprint, mytype, 2)
|
| 1924 |
+
|
| 1925 |
+
|
| 1926 |
+
class TestClassGetItem:
|
| 1927 |
+
def test_dtype(self) -> None:
|
| 1928 |
+
alias = np.dtype[Any]
|
| 1929 |
+
assert isinstance(alias, types.GenericAlias)
|
| 1930 |
+
assert alias.__origin__ is np.dtype
|
| 1931 |
+
|
| 1932 |
+
@pytest.mark.parametrize("code", np.typecodes["All"])
|
| 1933 |
+
def test_dtype_subclass(self, code: str) -> None:
|
| 1934 |
+
cls = type(np.dtype(code))
|
| 1935 |
+
alias = cls[Any]
|
| 1936 |
+
assert isinstance(alias, types.GenericAlias)
|
| 1937 |
+
assert alias.__origin__ is cls
|
| 1938 |
+
|
| 1939 |
+
@pytest.mark.parametrize("arg_len", range(4))
|
| 1940 |
+
def test_subscript_tuple(self, arg_len: int) -> None:
|
| 1941 |
+
arg_tup = (Any,) * arg_len
|
| 1942 |
+
if arg_len == 1:
|
| 1943 |
+
assert np.dtype[arg_tup]
|
| 1944 |
+
else:
|
| 1945 |
+
with pytest.raises(TypeError):
|
| 1946 |
+
np.dtype[arg_tup]
|
| 1947 |
+
|
| 1948 |
+
def test_subscript_scalar(self) -> None:
|
| 1949 |
+
assert np.dtype[Any]
|
| 1950 |
+
|
| 1951 |
+
|
| 1952 |
+
def test_result_type_integers_and_unitless_timedelta64():
|
| 1953 |
+
# Regression test for gh-20077. The following call of `result_type`
|
| 1954 |
+
# would cause a seg. fault.
|
| 1955 |
+
td = np.timedelta64(4)
|
| 1956 |
+
result = np.result_type(0, td)
|
| 1957 |
+
assert_dtype_equal(result, td.dtype)
|
| 1958 |
+
|
| 1959 |
+
|
| 1960 |
+
def test_creating_dtype_with_dtype_class_errors():
|
| 1961 |
+
# Regression test for #25031, calling `np.dtype` with itself segfaulted.
|
| 1962 |
+
with pytest.raises(TypeError, match="Cannot convert np.dtype into a"):
|
| 1963 |
+
np.array(np.ones(10), dtype=np.dtype)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_einsum.py
ADDED
|
@@ -0,0 +1,1229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
import numpy as np
|
| 6 |
+
from numpy.testing import (
|
| 7 |
+
assert_, assert_equal, assert_array_equal, assert_almost_equal,
|
| 8 |
+
assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
# Setup for optimize einsum
|
| 12 |
+
chars = 'abcdefghij'
|
| 13 |
+
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
|
| 14 |
+
global_size_dict = dict(zip(chars, sizes))
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TestEinsum:
|
| 18 |
+
@pytest.mark.parametrize("do_opt", [True, False])
|
| 19 |
+
@pytest.mark.parametrize("einsum_fn", [np.einsum, np.einsum_path])
|
| 20 |
+
def test_einsum_errors(self, do_opt, einsum_fn):
|
| 21 |
+
# Need enough arguments
|
| 22 |
+
assert_raises(ValueError, einsum_fn, optimize=do_opt)
|
| 23 |
+
assert_raises(ValueError, einsum_fn, "", optimize=do_opt)
|
| 24 |
+
|
| 25 |
+
# subscripts must be a string
|
| 26 |
+
assert_raises(TypeError, einsum_fn, 0, 0, optimize=do_opt)
|
| 27 |
+
|
| 28 |
+
# issue 4528 revealed a segfault with this call
|
| 29 |
+
assert_raises(TypeError, einsum_fn, *(None,)*63, optimize=do_opt)
|
| 30 |
+
|
| 31 |
+
# number of operands must match count in subscripts string
|
| 32 |
+
assert_raises(ValueError, einsum_fn, "", 0, 0, optimize=do_opt)
|
| 33 |
+
assert_raises(ValueError, einsum_fn, ",", 0, [0], [0],
|
| 34 |
+
optimize=do_opt)
|
| 35 |
+
assert_raises(ValueError, einsum_fn, ",", [0], optimize=do_opt)
|
| 36 |
+
|
| 37 |
+
# can't have more subscripts than dimensions in the operand
|
| 38 |
+
assert_raises(ValueError, einsum_fn, "i", 0, optimize=do_opt)
|
| 39 |
+
assert_raises(ValueError, einsum_fn, "ij", [0, 0], optimize=do_opt)
|
| 40 |
+
assert_raises(ValueError, einsum_fn, "...i", 0, optimize=do_opt)
|
| 41 |
+
assert_raises(ValueError, einsum_fn, "i...j", [0, 0], optimize=do_opt)
|
| 42 |
+
assert_raises(ValueError, einsum_fn, "i...", 0, optimize=do_opt)
|
| 43 |
+
assert_raises(ValueError, einsum_fn, "ij...", [0, 0], optimize=do_opt)
|
| 44 |
+
|
| 45 |
+
# invalid ellipsis
|
| 46 |
+
assert_raises(ValueError, einsum_fn, "i..", [0, 0], optimize=do_opt)
|
| 47 |
+
assert_raises(ValueError, einsum_fn, ".i...", [0, 0], optimize=do_opt)
|
| 48 |
+
assert_raises(ValueError, einsum_fn, "j->..j", [0, 0], optimize=do_opt)
|
| 49 |
+
assert_raises(ValueError, einsum_fn, "j->.j...", [0, 0],
|
| 50 |
+
optimize=do_opt)
|
| 51 |
+
|
| 52 |
+
# invalid subscript character
|
| 53 |
+
assert_raises(ValueError, einsum_fn, "i%...", [0, 0], optimize=do_opt)
|
| 54 |
+
assert_raises(ValueError, einsum_fn, "...j$", [0, 0], optimize=do_opt)
|
| 55 |
+
assert_raises(ValueError, einsum_fn, "i->&", [0, 0], optimize=do_opt)
|
| 56 |
+
|
| 57 |
+
# output subscripts must appear in input
|
| 58 |
+
assert_raises(ValueError, einsum_fn, "i->ij", [0, 0], optimize=do_opt)
|
| 59 |
+
|
| 60 |
+
# output subscripts may only be specified once
|
| 61 |
+
assert_raises(ValueError, einsum_fn, "ij->jij", [[0, 0], [0, 0]],
|
| 62 |
+
optimize=do_opt)
|
| 63 |
+
|
| 64 |
+
# dimensions must match when being collapsed
|
| 65 |
+
assert_raises(ValueError, einsum_fn, "ii",
|
| 66 |
+
np.arange(6).reshape(2, 3), optimize=do_opt)
|
| 67 |
+
assert_raises(ValueError, einsum_fn, "ii->i",
|
| 68 |
+
np.arange(6).reshape(2, 3), optimize=do_opt)
|
| 69 |
+
|
| 70 |
+
with assert_raises_regex(ValueError, "'b'"):
|
| 71 |
+
# gh-11221 - 'c' erroneously appeared in the error message
|
| 72 |
+
a = np.ones((3, 3, 4, 5, 6))
|
| 73 |
+
b = np.ones((3, 4, 5))
|
| 74 |
+
einsum_fn('aabcb,abc', a, b)
|
| 75 |
+
|
| 76 |
+
@pytest.mark.parametrize("do_opt", [True, False])
|
| 77 |
+
def test_einsum_specific_errors(self, do_opt):
|
| 78 |
+
# out parameter must be an array
|
| 79 |
+
assert_raises(TypeError, np.einsum, "", 0, out='test',
|
| 80 |
+
optimize=do_opt)
|
| 81 |
+
|
| 82 |
+
# order parameter must be a valid order
|
| 83 |
+
assert_raises(ValueError, np.einsum, "", 0, order='W',
|
| 84 |
+
optimize=do_opt)
|
| 85 |
+
|
| 86 |
+
# casting parameter must be a valid casting
|
| 87 |
+
assert_raises(ValueError, np.einsum, "", 0, casting='blah',
|
| 88 |
+
optimize=do_opt)
|
| 89 |
+
|
| 90 |
+
# dtype parameter must be a valid dtype
|
| 91 |
+
assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
|
| 92 |
+
optimize=do_opt)
|
| 93 |
+
|
| 94 |
+
# other keyword arguments are rejected
|
| 95 |
+
assert_raises(TypeError, np.einsum, "", 0, bad_arg=0, optimize=do_opt)
|
| 96 |
+
|
| 97 |
+
# broadcasting to new dimensions must be enabled explicitly
|
| 98 |
+
assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
|
| 99 |
+
optimize=do_opt)
|
| 100 |
+
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
|
| 101 |
+
out=np.arange(4).reshape(2, 2), optimize=do_opt)
|
| 102 |
+
|
| 103 |
+
# Check order kwarg, asanyarray allows 1d to pass through
|
| 104 |
+
assert_raises(ValueError, np.einsum, "i->i",
|
| 105 |
+
np.arange(6).reshape(-1, 1), optimize=do_opt, order='d')
|
| 106 |
+
|
| 107 |
+
def test_einsum_object_errors(self):
|
| 108 |
+
# Exceptions created by object arithmetic should
|
| 109 |
+
# successfully propagate
|
| 110 |
+
|
| 111 |
+
class CustomException(Exception):
|
| 112 |
+
pass
|
| 113 |
+
|
| 114 |
+
class DestructoBox:
|
| 115 |
+
|
| 116 |
+
def __init__(self, value, destruct):
|
| 117 |
+
self._val = value
|
| 118 |
+
self._destruct = destruct
|
| 119 |
+
|
| 120 |
+
def __add__(self, other):
|
| 121 |
+
tmp = self._val + other._val
|
| 122 |
+
if tmp >= self._destruct:
|
| 123 |
+
raise CustomException
|
| 124 |
+
else:
|
| 125 |
+
self._val = tmp
|
| 126 |
+
return self
|
| 127 |
+
|
| 128 |
+
def __radd__(self, other):
|
| 129 |
+
if other == 0:
|
| 130 |
+
return self
|
| 131 |
+
else:
|
| 132 |
+
return self.__add__(other)
|
| 133 |
+
|
| 134 |
+
def __mul__(self, other):
|
| 135 |
+
tmp = self._val * other._val
|
| 136 |
+
if tmp >= self._destruct:
|
| 137 |
+
raise CustomException
|
| 138 |
+
else:
|
| 139 |
+
self._val = tmp
|
| 140 |
+
return self
|
| 141 |
+
|
| 142 |
+
def __rmul__(self, other):
|
| 143 |
+
if other == 0:
|
| 144 |
+
return self
|
| 145 |
+
else:
|
| 146 |
+
return self.__mul__(other)
|
| 147 |
+
|
| 148 |
+
a = np.array([DestructoBox(i, 5) for i in range(1, 10)],
|
| 149 |
+
dtype='object').reshape(3, 3)
|
| 150 |
+
|
| 151 |
+
# raised from unbuffered_loop_nop1_ndim2
|
| 152 |
+
assert_raises(CustomException, np.einsum, "ij->i", a)
|
| 153 |
+
|
| 154 |
+
# raised from unbuffered_loop_nop1_ndim3
|
| 155 |
+
b = np.array([DestructoBox(i, 100) for i in range(0, 27)],
|
| 156 |
+
dtype='object').reshape(3, 3, 3)
|
| 157 |
+
assert_raises(CustomException, np.einsum, "i...k->...", b)
|
| 158 |
+
|
| 159 |
+
# raised from unbuffered_loop_nop2_ndim2
|
| 160 |
+
b = np.array([DestructoBox(i, 55) for i in range(1, 4)],
|
| 161 |
+
dtype='object')
|
| 162 |
+
assert_raises(CustomException, np.einsum, "ij, j", a, b)
|
| 163 |
+
|
| 164 |
+
# raised from unbuffered_loop_nop2_ndim3
|
| 165 |
+
assert_raises(CustomException, np.einsum, "ij, jh", a, a)
|
| 166 |
+
|
| 167 |
+
# raised from PyArray_EinsteinSum
|
| 168 |
+
assert_raises(CustomException, np.einsum, "ij->", a)
|
| 169 |
+
|
| 170 |
+
def test_einsum_views(self):
|
| 171 |
+
# pass-through
|
| 172 |
+
for do_opt in [True, False]:
|
| 173 |
+
a = np.arange(6)
|
| 174 |
+
a.shape = (2, 3)
|
| 175 |
+
|
| 176 |
+
b = np.einsum("...", a, optimize=do_opt)
|
| 177 |
+
assert_(b.base is a)
|
| 178 |
+
|
| 179 |
+
b = np.einsum(a, [Ellipsis], optimize=do_opt)
|
| 180 |
+
assert_(b.base is a)
|
| 181 |
+
|
| 182 |
+
b = np.einsum("ij", a, optimize=do_opt)
|
| 183 |
+
assert_(b.base is a)
|
| 184 |
+
assert_equal(b, a)
|
| 185 |
+
|
| 186 |
+
b = np.einsum(a, [0, 1], optimize=do_opt)
|
| 187 |
+
assert_(b.base is a)
|
| 188 |
+
assert_equal(b, a)
|
| 189 |
+
|
| 190 |
+
# output is writeable whenever input is writeable
|
| 191 |
+
b = np.einsum("...", a, optimize=do_opt)
|
| 192 |
+
assert_(b.flags['WRITEABLE'])
|
| 193 |
+
a.flags['WRITEABLE'] = False
|
| 194 |
+
b = np.einsum("...", a, optimize=do_opt)
|
| 195 |
+
assert_(not b.flags['WRITEABLE'])
|
| 196 |
+
|
| 197 |
+
# transpose
|
| 198 |
+
a = np.arange(6)
|
| 199 |
+
a.shape = (2, 3)
|
| 200 |
+
|
| 201 |
+
b = np.einsum("ji", a, optimize=do_opt)
|
| 202 |
+
assert_(b.base is a)
|
| 203 |
+
assert_equal(b, a.T)
|
| 204 |
+
|
| 205 |
+
b = np.einsum(a, [1, 0], optimize=do_opt)
|
| 206 |
+
assert_(b.base is a)
|
| 207 |
+
assert_equal(b, a.T)
|
| 208 |
+
|
| 209 |
+
# diagonal
|
| 210 |
+
a = np.arange(9)
|
| 211 |
+
a.shape = (3, 3)
|
| 212 |
+
|
| 213 |
+
b = np.einsum("ii->i", a, optimize=do_opt)
|
| 214 |
+
assert_(b.base is a)
|
| 215 |
+
assert_equal(b, [a[i, i] for i in range(3)])
|
| 216 |
+
|
| 217 |
+
b = np.einsum(a, [0, 0], [0], optimize=do_opt)
|
| 218 |
+
assert_(b.base is a)
|
| 219 |
+
assert_equal(b, [a[i, i] for i in range(3)])
|
| 220 |
+
|
| 221 |
+
# diagonal with various ways of broadcasting an additional dimension
|
| 222 |
+
a = np.arange(27)
|
| 223 |
+
a.shape = (3, 3, 3)
|
| 224 |
+
|
| 225 |
+
b = np.einsum("...ii->...i", a, optimize=do_opt)
|
| 226 |
+
assert_(b.base is a)
|
| 227 |
+
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
|
| 228 |
+
|
| 229 |
+
b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
|
| 230 |
+
assert_(b.base is a)
|
| 231 |
+
assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
|
| 232 |
+
|
| 233 |
+
b = np.einsum("ii...->...i", a, optimize=do_opt)
|
| 234 |
+
assert_(b.base is a)
|
| 235 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
| 236 |
+
for x in a.transpose(2, 0, 1)])
|
| 237 |
+
|
| 238 |
+
b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
|
| 239 |
+
assert_(b.base is a)
|
| 240 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
| 241 |
+
for x in a.transpose(2, 0, 1)])
|
| 242 |
+
|
| 243 |
+
b = np.einsum("...ii->i...", a, optimize=do_opt)
|
| 244 |
+
assert_(b.base is a)
|
| 245 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
| 246 |
+
|
| 247 |
+
b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
|
| 248 |
+
assert_(b.base is a)
|
| 249 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
| 250 |
+
|
| 251 |
+
b = np.einsum("jii->ij", a, optimize=do_opt)
|
| 252 |
+
assert_(b.base is a)
|
| 253 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
| 254 |
+
|
| 255 |
+
b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
|
| 256 |
+
assert_(b.base is a)
|
| 257 |
+
assert_equal(b, [a[:, i, i] for i in range(3)])
|
| 258 |
+
|
| 259 |
+
b = np.einsum("ii...->i...", a, optimize=do_opt)
|
| 260 |
+
assert_(b.base is a)
|
| 261 |
+
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
|
| 262 |
+
|
| 263 |
+
b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
|
| 264 |
+
assert_(b.base is a)
|
| 265 |
+
assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
|
| 266 |
+
|
| 267 |
+
b = np.einsum("i...i->i...", a, optimize=do_opt)
|
| 268 |
+
assert_(b.base is a)
|
| 269 |
+
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
|
| 270 |
+
|
| 271 |
+
b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
|
| 272 |
+
assert_(b.base is a)
|
| 273 |
+
assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
|
| 274 |
+
|
| 275 |
+
b = np.einsum("i...i->...i", a, optimize=do_opt)
|
| 276 |
+
assert_(b.base is a)
|
| 277 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
| 278 |
+
for x in a.transpose(1, 0, 2)])
|
| 279 |
+
|
| 280 |
+
b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
|
| 281 |
+
assert_(b.base is a)
|
| 282 |
+
assert_equal(b, [[x[i, i] for i in range(3)]
|
| 283 |
+
for x in a.transpose(1, 0, 2)])
|
| 284 |
+
|
| 285 |
+
# triple diagonal
|
| 286 |
+
a = np.arange(27)
|
| 287 |
+
a.shape = (3, 3, 3)
|
| 288 |
+
|
| 289 |
+
b = np.einsum("iii->i", a, optimize=do_opt)
|
| 290 |
+
assert_(b.base is a)
|
| 291 |
+
assert_equal(b, [a[i, i, i] for i in range(3)])
|
| 292 |
+
|
| 293 |
+
b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
|
| 294 |
+
assert_(b.base is a)
|
| 295 |
+
assert_equal(b, [a[i, i, i] for i in range(3)])
|
| 296 |
+
|
| 297 |
+
# swap axes
|
| 298 |
+
a = np.arange(24)
|
| 299 |
+
a.shape = (2, 3, 4)
|
| 300 |
+
|
| 301 |
+
b = np.einsum("ijk->jik", a, optimize=do_opt)
|
| 302 |
+
assert_(b.base is a)
|
| 303 |
+
assert_equal(b, a.swapaxes(0, 1))
|
| 304 |
+
|
| 305 |
+
b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
|
| 306 |
+
assert_(b.base is a)
|
| 307 |
+
assert_equal(b, a.swapaxes(0, 1))
|
| 308 |
+
|
| 309 |
+
def check_einsum_sums(self, dtype, do_opt=False):
|
| 310 |
+
dtype = np.dtype(dtype)
|
| 311 |
+
# Check various sums. Does many sizes to exercise unrolled loops.
|
| 312 |
+
|
| 313 |
+
# sum(a, axis=-1)
|
| 314 |
+
for n in range(1, 17):
|
| 315 |
+
a = np.arange(n, dtype=dtype)
|
| 316 |
+
b = np.sum(a, axis=-1)
|
| 317 |
+
if hasattr(b, 'astype'):
|
| 318 |
+
b = b.astype(dtype)
|
| 319 |
+
assert_equal(np.einsum("i->", a, optimize=do_opt), b)
|
| 320 |
+
assert_equal(np.einsum(a, [0], [], optimize=do_opt), b)
|
| 321 |
+
|
| 322 |
+
for n in range(1, 17):
|
| 323 |
+
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
|
| 324 |
+
b = np.sum(a, axis=-1)
|
| 325 |
+
if hasattr(b, 'astype'):
|
| 326 |
+
b = b.astype(dtype)
|
| 327 |
+
assert_equal(np.einsum("...i->...", a, optimize=do_opt), b)
|
| 328 |
+
assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b)
|
| 329 |
+
|
| 330 |
+
# sum(a, axis=0)
|
| 331 |
+
for n in range(1, 17):
|
| 332 |
+
a = np.arange(2*n, dtype=dtype).reshape(2, n)
|
| 333 |
+
b = np.sum(a, axis=0)
|
| 334 |
+
if hasattr(b, 'astype'):
|
| 335 |
+
b = b.astype(dtype)
|
| 336 |
+
assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
|
| 337 |
+
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
|
| 338 |
+
|
| 339 |
+
for n in range(1, 17):
|
| 340 |
+
a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
|
| 341 |
+
b = np.sum(a, axis=0)
|
| 342 |
+
if hasattr(b, 'astype'):
|
| 343 |
+
b = b.astype(dtype)
|
| 344 |
+
assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
|
| 345 |
+
assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
|
| 346 |
+
|
| 347 |
+
# trace(a)
|
| 348 |
+
for n in range(1, 17):
|
| 349 |
+
a = np.arange(n*n, dtype=dtype).reshape(n, n)
|
| 350 |
+
b = np.trace(a)
|
| 351 |
+
if hasattr(b, 'astype'):
|
| 352 |
+
b = b.astype(dtype)
|
| 353 |
+
assert_equal(np.einsum("ii", a, optimize=do_opt), b)
|
| 354 |
+
assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b)
|
| 355 |
+
|
| 356 |
+
# gh-15961: should accept numpy int64 type in subscript list
|
| 357 |
+
np_array = np.asarray([0, 0])
|
| 358 |
+
assert_equal(np.einsum(a, np_array, optimize=do_opt), b)
|
| 359 |
+
assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b)
|
| 360 |
+
|
| 361 |
+
# multiply(a, b)
|
| 362 |
+
assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
|
| 363 |
+
for n in range(1, 17):
|
| 364 |
+
a = np.arange(3 * n, dtype=dtype).reshape(3, n)
|
| 365 |
+
b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
|
| 366 |
+
assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
|
| 367 |
+
np.multiply(a, b))
|
| 368 |
+
assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
|
| 369 |
+
np.multiply(a, b))
|
| 370 |
+
|
| 371 |
+
# inner(a,b)
|
| 372 |
+
for n in range(1, 17):
|
| 373 |
+
a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
|
| 374 |
+
b = np.arange(n, dtype=dtype)
|
| 375 |
+
assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
|
| 376 |
+
assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
|
| 377 |
+
np.inner(a, b))
|
| 378 |
+
|
| 379 |
+
for n in range(1, 11):
|
| 380 |
+
a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
|
| 381 |
+
b = np.arange(n, dtype=dtype)
|
| 382 |
+
assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
|
| 383 |
+
np.inner(a.T, b.T).T)
|
| 384 |
+
assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
|
| 385 |
+
np.inner(a.T, b.T).T)
|
| 386 |
+
|
| 387 |
+
# outer(a,b)
|
| 388 |
+
for n in range(1, 17):
|
| 389 |
+
a = np.arange(3, dtype=dtype)+1
|
| 390 |
+
b = np.arange(n, dtype=dtype)+1
|
| 391 |
+
assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
|
| 392 |
+
np.outer(a, b))
|
| 393 |
+
assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
|
| 394 |
+
np.outer(a, b))
|
| 395 |
+
|
| 396 |
+
# Suppress the complex warnings for the 'as f8' tests
|
| 397 |
+
with suppress_warnings() as sup:
|
| 398 |
+
sup.filter(np.exceptions.ComplexWarning)
|
| 399 |
+
|
| 400 |
+
# matvec(a,b) / a.dot(b) where a is matrix, b is vector
|
| 401 |
+
for n in range(1, 17):
|
| 402 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
| 403 |
+
b = np.arange(n, dtype=dtype)
|
| 404 |
+
assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
|
| 405 |
+
np.dot(a, b))
|
| 406 |
+
assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
|
| 407 |
+
np.dot(a, b))
|
| 408 |
+
|
| 409 |
+
c = np.arange(4, dtype=dtype)
|
| 410 |
+
np.einsum("ij,j", a, b, out=c,
|
| 411 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 412 |
+
assert_equal(c,
|
| 413 |
+
np.dot(a.astype('f8'),
|
| 414 |
+
b.astype('f8')).astype(dtype))
|
| 415 |
+
c[...] = 0
|
| 416 |
+
np.einsum(a, [0, 1], b, [1], out=c,
|
| 417 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 418 |
+
assert_equal(c,
|
| 419 |
+
np.dot(a.astype('f8'),
|
| 420 |
+
b.astype('f8')).astype(dtype))
|
| 421 |
+
|
| 422 |
+
for n in range(1, 17):
|
| 423 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
| 424 |
+
b = np.arange(n, dtype=dtype)
|
| 425 |
+
assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
|
| 426 |
+
np.dot(b.T, a.T))
|
| 427 |
+
assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
|
| 428 |
+
np.dot(b.T, a.T))
|
| 429 |
+
|
| 430 |
+
c = np.arange(4, dtype=dtype)
|
| 431 |
+
np.einsum("ji,j", a.T, b.T, out=c,
|
| 432 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 433 |
+
assert_equal(c,
|
| 434 |
+
np.dot(b.T.astype('f8'),
|
| 435 |
+
a.T.astype('f8')).astype(dtype))
|
| 436 |
+
c[...] = 0
|
| 437 |
+
np.einsum(a.T, [1, 0], b.T, [1], out=c,
|
| 438 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 439 |
+
assert_equal(c,
|
| 440 |
+
np.dot(b.T.astype('f8'),
|
| 441 |
+
a.T.astype('f8')).astype(dtype))
|
| 442 |
+
|
| 443 |
+
# matmat(a,b) / a.dot(b) where a is matrix, b is matrix
|
| 444 |
+
for n in range(1, 17):
|
| 445 |
+
if n < 8 or dtype != 'f2':
|
| 446 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
| 447 |
+
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
|
| 448 |
+
assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
|
| 449 |
+
np.dot(a, b))
|
| 450 |
+
assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
|
| 451 |
+
np.dot(a, b))
|
| 452 |
+
|
| 453 |
+
for n in range(1, 17):
|
| 454 |
+
a = np.arange(4*n, dtype=dtype).reshape(4, n)
|
| 455 |
+
b = np.arange(n*6, dtype=dtype).reshape(n, 6)
|
| 456 |
+
c = np.arange(24, dtype=dtype).reshape(4, 6)
|
| 457 |
+
np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
|
| 458 |
+
optimize=do_opt)
|
| 459 |
+
assert_equal(c,
|
| 460 |
+
np.dot(a.astype('f8'),
|
| 461 |
+
b.astype('f8')).astype(dtype))
|
| 462 |
+
c[...] = 0
|
| 463 |
+
np.einsum(a, [0, 1], b, [1, 2], out=c,
|
| 464 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 465 |
+
assert_equal(c,
|
| 466 |
+
np.dot(a.astype('f8'),
|
| 467 |
+
b.astype('f8')).astype(dtype))
|
| 468 |
+
|
| 469 |
+
# matrix triple product (note this is not currently an efficient
|
| 470 |
+
# way to multiply 3 matrices)
|
| 471 |
+
a = np.arange(12, dtype=dtype).reshape(3, 4)
|
| 472 |
+
b = np.arange(20, dtype=dtype).reshape(4, 5)
|
| 473 |
+
c = np.arange(30, dtype=dtype).reshape(5, 6)
|
| 474 |
+
if dtype != 'f2':
|
| 475 |
+
assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
|
| 476 |
+
a.dot(b).dot(c))
|
| 477 |
+
assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
|
| 478 |
+
optimize=do_opt), a.dot(b).dot(c))
|
| 479 |
+
|
| 480 |
+
d = np.arange(18, dtype=dtype).reshape(3, 6)
|
| 481 |
+
np.einsum("ij,jk,kl", a, b, c, out=d,
|
| 482 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 483 |
+
tgt = a.astype('f8').dot(b.astype('f8'))
|
| 484 |
+
tgt = tgt.dot(c.astype('f8')).astype(dtype)
|
| 485 |
+
assert_equal(d, tgt)
|
| 486 |
+
|
| 487 |
+
d[...] = 0
|
| 488 |
+
np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
|
| 489 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 490 |
+
tgt = a.astype('f8').dot(b.astype('f8'))
|
| 491 |
+
tgt = tgt.dot(c.astype('f8')).astype(dtype)
|
| 492 |
+
assert_equal(d, tgt)
|
| 493 |
+
|
| 494 |
+
# tensordot(a, b)
|
| 495 |
+
if np.dtype(dtype) != np.dtype('f2'):
|
| 496 |
+
a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
|
| 497 |
+
b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
|
| 498 |
+
assert_equal(np.einsum("ijk, jil -> kl", a, b),
|
| 499 |
+
np.tensordot(a, b, axes=([1, 0], [0, 1])))
|
| 500 |
+
assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
|
| 501 |
+
np.tensordot(a, b, axes=([1, 0], [0, 1])))
|
| 502 |
+
|
| 503 |
+
c = np.arange(10, dtype=dtype).reshape(5, 2)
|
| 504 |
+
np.einsum("ijk,jil->kl", a, b, out=c,
|
| 505 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 506 |
+
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
|
| 507 |
+
axes=([1, 0], [0, 1])).astype(dtype))
|
| 508 |
+
c[...] = 0
|
| 509 |
+
np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
|
| 510 |
+
dtype='f8', casting='unsafe', optimize=do_opt)
|
| 511 |
+
assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
|
| 512 |
+
axes=([1, 0], [0, 1])).astype(dtype))
|
| 513 |
+
|
| 514 |
+
# logical_and(logical_and(a!=0, b!=0), c!=0)
|
| 515 |
+
neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1
|
| 516 |
+
a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype)
|
| 517 |
+
b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype)
|
| 518 |
+
c = np.array([True, True, False, True, True, False, True, True])
|
| 519 |
+
|
| 520 |
+
assert_equal(np.einsum("i,i,i->i", a, b, c,
|
| 521 |
+
dtype='?', casting='unsafe', optimize=do_opt),
|
| 522 |
+
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
|
| 523 |
+
assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
|
| 524 |
+
dtype='?', casting='unsafe'),
|
| 525 |
+
np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
|
| 526 |
+
|
| 527 |
+
a = np.arange(9, dtype=dtype)
|
| 528 |
+
assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
|
| 529 |
+
assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
|
| 530 |
+
assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
|
| 531 |
+
assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
|
| 532 |
+
|
| 533 |
+
# Various stride0, contiguous, and SSE aligned variants
|
| 534 |
+
for n in range(1, 25):
|
| 535 |
+
a = np.arange(n, dtype=dtype)
|
| 536 |
+
if np.dtype(dtype).itemsize > 1:
|
| 537 |
+
assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
|
| 538 |
+
np.multiply(a, a))
|
| 539 |
+
assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
|
| 540 |
+
assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
|
| 541 |
+
assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
|
| 542 |
+
assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
|
| 543 |
+
assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
|
| 544 |
+
|
| 545 |
+
assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
|
| 546 |
+
np.multiply(a[1:], a[:-1]))
|
| 547 |
+
assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
|
| 548 |
+
np.dot(a[1:], a[:-1]))
|
| 549 |
+
assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
|
| 550 |
+
assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
|
| 551 |
+
assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
|
| 552 |
+
2*np.sum(a[1:]))
|
| 553 |
+
assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
|
| 554 |
+
2*np.sum(a[1:]))
|
| 555 |
+
|
| 556 |
+
# An object array, summed as the data type
|
| 557 |
+
a = np.arange(9, dtype=object)
|
| 558 |
+
|
| 559 |
+
b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
|
| 560 |
+
assert_equal(b, np.sum(a))
|
| 561 |
+
if hasattr(b, "dtype"):
|
| 562 |
+
# Can be a python object when dtype is object
|
| 563 |
+
assert_equal(b.dtype, np.dtype(dtype))
|
| 564 |
+
|
| 565 |
+
b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
|
| 566 |
+
assert_equal(b, np.sum(a))
|
| 567 |
+
if hasattr(b, "dtype"):
|
| 568 |
+
# Can be a python object when dtype is object
|
| 569 |
+
assert_equal(b.dtype, np.dtype(dtype))
|
| 570 |
+
|
| 571 |
+
# A case which was failing (ticket #1885)
|
| 572 |
+
p = np.arange(2) + 1
|
| 573 |
+
q = np.arange(4).reshape(2, 2) + 3
|
| 574 |
+
r = np.arange(4).reshape(2, 2) + 7
|
| 575 |
+
assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
|
| 576 |
+
|
| 577 |
+
# singleton dimensions broadcast (gh-10343)
|
| 578 |
+
p = np.ones((10,2))
|
| 579 |
+
q = np.ones((1,2))
|
| 580 |
+
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
|
| 581 |
+
np.einsum('ij,ij->j', p, q, optimize=False))
|
| 582 |
+
assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
|
| 583 |
+
[10.] * 2)
|
| 584 |
+
|
| 585 |
+
# a blas-compatible contraction broadcasting case which was failing
|
| 586 |
+
# for optimize=True (ticket #10930)
|
| 587 |
+
x = np.array([2., 3.])
|
| 588 |
+
y = np.array([4.])
|
| 589 |
+
assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
|
| 590 |
+
assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
|
| 591 |
+
|
| 592 |
+
# all-ones array was bypassing bug (ticket #10930)
|
| 593 |
+
p = np.ones((1, 5)) / 2
|
| 594 |
+
q = np.ones((5, 5)) / 2
|
| 595 |
+
for optimize in (True, False):
|
| 596 |
+
assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
|
| 597 |
+
optimize=optimize),
|
| 598 |
+
np.einsum("...ij,...jk->...ik", p, q,
|
| 599 |
+
optimize=optimize))
|
| 600 |
+
assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
|
| 601 |
+
optimize=optimize),
|
| 602 |
+
np.full((1, 5), 1.25))
|
| 603 |
+
|
| 604 |
+
# Cases which were failing (gh-10899)
|
| 605 |
+
x = np.eye(2, dtype=dtype)
|
| 606 |
+
y = np.ones(2, dtype=dtype)
|
| 607 |
+
assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
|
| 608 |
+
[2.]) # contig_contig_outstride0_two
|
| 609 |
+
assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
|
| 610 |
+
[2.]) # stride0_contig_outstride0_two
|
| 611 |
+
assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
|
| 612 |
+
[2.]) # contig_stride0_outstride0_two
|
| 613 |
+
|
| 614 |
+
def test_einsum_sums_int8(self):
|
| 615 |
+
self.check_einsum_sums('i1')
|
| 616 |
+
|
| 617 |
+
def test_einsum_sums_uint8(self):
|
| 618 |
+
self.check_einsum_sums('u1')
|
| 619 |
+
|
| 620 |
+
def test_einsum_sums_int16(self):
|
| 621 |
+
self.check_einsum_sums('i2')
|
| 622 |
+
|
| 623 |
+
def test_einsum_sums_uint16(self):
|
| 624 |
+
self.check_einsum_sums('u2')
|
| 625 |
+
|
| 626 |
+
def test_einsum_sums_int32(self):
|
| 627 |
+
self.check_einsum_sums('i4')
|
| 628 |
+
self.check_einsum_sums('i4', True)
|
| 629 |
+
|
| 630 |
+
def test_einsum_sums_uint32(self):
|
| 631 |
+
self.check_einsum_sums('u4')
|
| 632 |
+
self.check_einsum_sums('u4', True)
|
| 633 |
+
|
| 634 |
+
def test_einsum_sums_int64(self):
|
| 635 |
+
self.check_einsum_sums('i8')
|
| 636 |
+
|
| 637 |
+
def test_einsum_sums_uint64(self):
|
| 638 |
+
self.check_einsum_sums('u8')
|
| 639 |
+
|
| 640 |
+
def test_einsum_sums_float16(self):
|
| 641 |
+
self.check_einsum_sums('f2')
|
| 642 |
+
|
| 643 |
+
def test_einsum_sums_float32(self):
|
| 644 |
+
self.check_einsum_sums('f4')
|
| 645 |
+
|
| 646 |
+
def test_einsum_sums_float64(self):
|
| 647 |
+
self.check_einsum_sums('f8')
|
| 648 |
+
self.check_einsum_sums('f8', True)
|
| 649 |
+
|
| 650 |
+
def test_einsum_sums_longdouble(self):
|
| 651 |
+
self.check_einsum_sums(np.longdouble)
|
| 652 |
+
|
| 653 |
+
def test_einsum_sums_cfloat64(self):
|
| 654 |
+
self.check_einsum_sums('c8')
|
| 655 |
+
self.check_einsum_sums('c8', True)
|
| 656 |
+
|
| 657 |
+
def test_einsum_sums_cfloat128(self):
|
| 658 |
+
self.check_einsum_sums('c16')
|
| 659 |
+
|
| 660 |
+
def test_einsum_sums_clongdouble(self):
|
| 661 |
+
self.check_einsum_sums(np.clongdouble)
|
| 662 |
+
|
| 663 |
+
def test_einsum_sums_object(self):
|
| 664 |
+
self.check_einsum_sums('object')
|
| 665 |
+
self.check_einsum_sums('object', True)
|
| 666 |
+
|
| 667 |
+
def test_einsum_misc(self):
|
| 668 |
+
# This call used to crash because of a bug in
|
| 669 |
+
# PyArray_AssignZero
|
| 670 |
+
a = np.ones((1, 2))
|
| 671 |
+
b = np.ones((2, 2, 1))
|
| 672 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
|
| 673 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
|
| 674 |
+
|
| 675 |
+
# Regression test for issue #10369 (test unicode inputs with Python 2)
|
| 676 |
+
assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
|
| 677 |
+
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
|
| 678 |
+
assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
|
| 679 |
+
optimize='greedy'), 20)
|
| 680 |
+
|
| 681 |
+
# The iterator had an issue with buffering this reduction
|
| 682 |
+
a = np.ones((5, 12, 4, 2, 3), np.int64)
|
| 683 |
+
b = np.ones((5, 12, 11), np.int64)
|
| 684 |
+
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
|
| 685 |
+
np.einsum('ijklm,ijn->', a, b))
|
| 686 |
+
assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
|
| 687 |
+
np.einsum('ijklm,ijn->', a, b, optimize=True))
|
| 688 |
+
|
| 689 |
+
# Issue #2027, was a problem in the contiguous 3-argument
|
| 690 |
+
# inner loop implementation
|
| 691 |
+
a = np.arange(1, 3)
|
| 692 |
+
b = np.arange(1, 5).reshape(2, 2)
|
| 693 |
+
c = np.arange(1, 9).reshape(4, 2)
|
| 694 |
+
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
|
| 695 |
+
[[[1, 3], [3, 9], [5, 15], [7, 21]],
|
| 696 |
+
[[8, 16], [16, 32], [24, 48], [32, 64]]])
|
| 697 |
+
assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
|
| 698 |
+
[[[1, 3], [3, 9], [5, 15], [7, 21]],
|
| 699 |
+
[[8, 16], [16, 32], [24, 48], [32, 64]]])
|
| 700 |
+
|
| 701 |
+
# Ensure explicitly setting out=None does not cause an error
|
| 702 |
+
# see issue gh-15776 and issue gh-15256
|
| 703 |
+
assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
|
| 704 |
+
|
| 705 |
+
def test_object_loop(self):
|
| 706 |
+
|
| 707 |
+
class Mult:
|
| 708 |
+
def __mul__(self, other):
|
| 709 |
+
return 42
|
| 710 |
+
|
| 711 |
+
objMult = np.array([Mult()])
|
| 712 |
+
objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object)
|
| 713 |
+
|
| 714 |
+
with pytest.raises(TypeError):
|
| 715 |
+
np.einsum("i,j", [1], objNULL)
|
| 716 |
+
with pytest.raises(TypeError):
|
| 717 |
+
np.einsum("i,j", objNULL, [1])
|
| 718 |
+
assert np.einsum("i,j", objMult, objMult) == 42
|
| 719 |
+
|
| 720 |
+
def test_subscript_range(self):
|
| 721 |
+
# Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
|
| 722 |
+
# when creating a subscript from arrays
|
| 723 |
+
a = np.ones((2, 3))
|
| 724 |
+
b = np.ones((3, 4))
|
| 725 |
+
np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
|
| 726 |
+
np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
|
| 727 |
+
np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
|
| 728 |
+
assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
|
| 729 |
+
assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
|
| 730 |
+
|
| 731 |
+
def test_einsum_broadcast(self):
|
| 732 |
+
# Issue #2455 change in handling ellipsis
|
| 733 |
+
# remove the 'middle broadcast' error
|
| 734 |
+
# only use the 'RIGHT' iteration in prepare_op_axes
|
| 735 |
+
# adds auto broadcast on left where it belongs
|
| 736 |
+
# broadcast on right has to be explicit
|
| 737 |
+
# We need to test the optimized parsing as well
|
| 738 |
+
|
| 739 |
+
A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
|
| 740 |
+
B = np.arange(3)
|
| 741 |
+
ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
|
| 742 |
+
for opt in [True, False]:
|
| 743 |
+
assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
|
| 744 |
+
assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
|
| 745 |
+
assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
|
| 746 |
+
|
| 747 |
+
A = np.arange(12).reshape((4, 3))
|
| 748 |
+
B = np.arange(6).reshape((3, 2))
|
| 749 |
+
ref = np.einsum('ik,kj->ij', A, B, optimize=False)
|
| 750 |
+
for opt in [True, False]:
|
| 751 |
+
assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
|
| 752 |
+
assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
|
| 753 |
+
assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
|
| 754 |
+
assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
|
| 755 |
+
|
| 756 |
+
dims = [2, 3, 4, 5]
|
| 757 |
+
a = np.arange(np.prod(dims)).reshape(dims)
|
| 758 |
+
v = np.arange(dims[2])
|
| 759 |
+
ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
|
| 760 |
+
for opt in [True, False]:
|
| 761 |
+
assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
|
| 762 |
+
assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
|
| 763 |
+
assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
|
| 764 |
+
|
| 765 |
+
J, K, M = 160, 160, 120
|
| 766 |
+
A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
|
| 767 |
+
B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
|
| 768 |
+
ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
|
| 769 |
+
for opt in [True, False]:
|
| 770 |
+
assert_equal(np.einsum('...lmn,lmno->...o', A, B,
|
| 771 |
+
optimize=opt), ref) # used to raise error
|
| 772 |
+
|
| 773 |
+
def test_einsum_fixedstridebug(self):
|
| 774 |
+
# Issue #4485 obscure einsum bug
|
| 775 |
+
# This case revealed a bug in nditer where it reported a stride
|
| 776 |
+
# as 'fixed' (0) when it was in fact not fixed during processing
|
| 777 |
+
# (0 or 4). The reason for the bug was that the check for a fixed
|
| 778 |
+
# stride was using the information from the 2D inner loop reuse
|
| 779 |
+
# to restrict the iteration dimensions it had to validate to be
|
| 780 |
+
# the same, but that 2D inner loop reuse logic is only triggered
|
| 781 |
+
# during the buffer copying step, and hence it was invalid to
|
| 782 |
+
# rely on those values. The fix is to check all the dimensions
|
| 783 |
+
# of the stride in question, which in the test case reveals that
|
| 784 |
+
# the stride is not fixed.
|
| 785 |
+
#
|
| 786 |
+
# NOTE: This test is triggered by the fact that the default buffersize,
|
| 787 |
+
# used by einsum, is 8192, and 3*2731 = 8193, is larger than that
|
| 788 |
+
# and results in a mismatch between the buffering and the
|
| 789 |
+
# striding for operand A.
|
| 790 |
+
A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
|
| 791 |
+
B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
|
| 792 |
+
es = np.einsum('cl, cpx->lpx', A, B)
|
| 793 |
+
tp = np.tensordot(A, B, axes=(0, 0))
|
| 794 |
+
assert_equal(es, tp)
|
| 795 |
+
# The following is the original test case from the bug report,
|
| 796 |
+
# made repeatable by changing random arrays to aranges.
|
| 797 |
+
A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
|
| 798 |
+
B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
|
| 799 |
+
es = np.einsum('cl, cpxy->lpxy', A, B)
|
| 800 |
+
tp = np.tensordot(A, B, axes=(0, 0))
|
| 801 |
+
assert_equal(es, tp)
|
| 802 |
+
|
| 803 |
+
def test_einsum_fixed_collapsingbug(self):
|
| 804 |
+
# Issue #5147.
|
| 805 |
+
# The bug only occurred when output argument of einssum was used.
|
| 806 |
+
x = np.random.normal(0, 1, (5, 5, 5, 5))
|
| 807 |
+
y1 = np.zeros((5, 5))
|
| 808 |
+
np.einsum('aabb->ab', x, out=y1)
|
| 809 |
+
idx = np.arange(5)
|
| 810 |
+
y2 = x[idx[:, None], idx[:, None], idx, idx]
|
| 811 |
+
assert_equal(y1, y2)
|
| 812 |
+
|
| 813 |
+
def test_einsum_failed_on_p9_and_s390x(self):
|
| 814 |
+
# Issues gh-14692 and gh-12689
|
| 815 |
+
# Bug with signed vs unsigned char errored on power9 and s390x Linux
|
| 816 |
+
tensor = np.random.random_sample((10, 10, 10, 10))
|
| 817 |
+
x = np.einsum('ijij->', tensor)
|
| 818 |
+
y = tensor.trace(axis1=0, axis2=2).trace()
|
| 819 |
+
assert_allclose(x, y)
|
| 820 |
+
|
| 821 |
+
def test_einsum_all_contig_non_contig_output(self):
|
| 822 |
+
# Issue gh-5907, tests that the all contiguous special case
|
| 823 |
+
# actually checks the contiguity of the output
|
| 824 |
+
x = np.ones((5, 5))
|
| 825 |
+
out = np.ones(10)[::2]
|
| 826 |
+
correct_base = np.ones(10)
|
| 827 |
+
correct_base[::2] = 5
|
| 828 |
+
# Always worked (inner iteration is done with 0-stride):
|
| 829 |
+
np.einsum('mi,mi,mi->m', x, x, x, out=out)
|
| 830 |
+
assert_array_equal(out.base, correct_base)
|
| 831 |
+
# Example 1:
|
| 832 |
+
out = np.ones(10)[::2]
|
| 833 |
+
np.einsum('im,im,im->m', x, x, x, out=out)
|
| 834 |
+
assert_array_equal(out.base, correct_base)
|
| 835 |
+
# Example 2, buffering causes x to be contiguous but
|
| 836 |
+
# special cases do not catch the operation before:
|
| 837 |
+
out = np.ones((2, 2, 2))[..., 0]
|
| 838 |
+
correct_base = np.ones((2, 2, 2))
|
| 839 |
+
correct_base[..., 0] = 2
|
| 840 |
+
x = np.ones((2, 2), np.float32)
|
| 841 |
+
np.einsum('ij,jk->ik', x, x, out=out)
|
| 842 |
+
assert_array_equal(out.base, correct_base)
|
| 843 |
+
|
| 844 |
+
@pytest.mark.parametrize("dtype",
|
| 845 |
+
np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
|
| 846 |
+
def test_different_paths(self, dtype):
|
| 847 |
+
# Test originally added to cover broken float16 path: gh-20305
|
| 848 |
+
# Likely most are covered elsewhere, at least partially.
|
| 849 |
+
dtype = np.dtype(dtype)
|
| 850 |
+
# Simple test, designed to exercise most specialized code paths,
|
| 851 |
+
# note the +0.5 for floats. This makes sure we use a float value
|
| 852 |
+
# where the results must be exact.
|
| 853 |
+
arr = (np.arange(7) + 0.5).astype(dtype)
|
| 854 |
+
scalar = np.array(2, dtype=dtype)
|
| 855 |
+
|
| 856 |
+
# contig -> scalar:
|
| 857 |
+
res = np.einsum('i->', arr)
|
| 858 |
+
assert res == arr.sum()
|
| 859 |
+
# contig, contig -> contig:
|
| 860 |
+
res = np.einsum('i,i->i', arr, arr)
|
| 861 |
+
assert_array_equal(res, arr * arr)
|
| 862 |
+
# noncontig, noncontig -> contig:
|
| 863 |
+
res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
|
| 864 |
+
assert_array_equal(res, arr * arr)
|
| 865 |
+
# contig + contig -> scalar
|
| 866 |
+
assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
|
| 867 |
+
# contig + scalar -> contig (with out)
|
| 868 |
+
out = np.ones(7, dtype=dtype)
|
| 869 |
+
res = np.einsum('i,->i', arr, dtype.type(2), out=out)
|
| 870 |
+
assert_array_equal(res, arr * dtype.type(2))
|
| 871 |
+
# scalar + contig -> contig (with out)
|
| 872 |
+
res = np.einsum(',i->i', scalar, arr)
|
| 873 |
+
assert_array_equal(res, arr * dtype.type(2))
|
| 874 |
+
# scalar + contig -> scalar
|
| 875 |
+
res = np.einsum(',i->', scalar, arr)
|
| 876 |
+
# Use einsum to compare to not have difference due to sum round-offs:
|
| 877 |
+
assert res == np.einsum('i->', scalar * arr)
|
| 878 |
+
# contig + scalar -> scalar
|
| 879 |
+
res = np.einsum('i,->', arr, scalar)
|
| 880 |
+
# Use einsum to compare to not have difference due to sum round-offs:
|
| 881 |
+
assert res == np.einsum('i->', scalar * arr)
|
| 882 |
+
# contig + contig + contig -> scalar
|
| 883 |
+
arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
|
| 884 |
+
res = np.einsum('i,i,i->', arr, arr, arr)
|
| 885 |
+
assert_array_equal(res, (arr * arr * arr).sum())
|
| 886 |
+
# four arrays:
|
| 887 |
+
res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
|
| 888 |
+
assert_array_equal(res, (arr * arr * arr * arr).sum())
|
| 889 |
+
|
| 890 |
+
def test_small_boolean_arrays(self):
|
| 891 |
+
# See gh-5946.
|
| 892 |
+
# Use array of True embedded in False.
|
| 893 |
+
a = np.zeros((16, 1, 1), dtype=np.bool)[:2]
|
| 894 |
+
a[...] = True
|
| 895 |
+
out = np.zeros((16, 1, 1), dtype=np.bool)[:2]
|
| 896 |
+
tgt = np.ones((2, 1, 1), dtype=np.bool)
|
| 897 |
+
res = np.einsum('...ij,...jk->...ik', a, a, out=out)
|
| 898 |
+
assert_equal(res, tgt)
|
| 899 |
+
|
| 900 |
+
def test_out_is_res(self):
|
| 901 |
+
a = np.arange(9).reshape(3, 3)
|
| 902 |
+
res = np.einsum('...ij,...jk->...ik', a, a, out=a)
|
| 903 |
+
assert res is a
|
| 904 |
+
|
| 905 |
+
def optimize_compare(self, subscripts, operands=None):
|
| 906 |
+
# Tests all paths of the optimization function against
|
| 907 |
+
# conventional einsum
|
| 908 |
+
if operands is None:
|
| 909 |
+
args = [subscripts]
|
| 910 |
+
terms = subscripts.split('->')[0].split(',')
|
| 911 |
+
for term in terms:
|
| 912 |
+
dims = [global_size_dict[x] for x in term]
|
| 913 |
+
args.append(np.random.rand(*dims))
|
| 914 |
+
else:
|
| 915 |
+
args = [subscripts] + operands
|
| 916 |
+
|
| 917 |
+
noopt = np.einsum(*args, optimize=False)
|
| 918 |
+
opt = np.einsum(*args, optimize='greedy')
|
| 919 |
+
assert_almost_equal(opt, noopt)
|
| 920 |
+
opt = np.einsum(*args, optimize='optimal')
|
| 921 |
+
assert_almost_equal(opt, noopt)
|
| 922 |
+
|
| 923 |
+
def test_hadamard_like_products(self):
|
| 924 |
+
# Hadamard outer products
|
| 925 |
+
self.optimize_compare('a,ab,abc->abc')
|
| 926 |
+
self.optimize_compare('a,b,ab->ab')
|
| 927 |
+
|
| 928 |
+
def test_index_transformations(self):
|
| 929 |
+
# Simple index transformation cases
|
| 930 |
+
self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
|
| 931 |
+
self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
|
| 932 |
+
self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
|
| 933 |
+
|
| 934 |
+
def test_complex(self):
|
| 935 |
+
# Long test cases
|
| 936 |
+
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
| 937 |
+
self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
| 938 |
+
self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
|
| 939 |
+
self.optimize_compare('abhe,hidj,jgba,hiab,gab')
|
| 940 |
+
self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
|
| 941 |
+
self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
|
| 942 |
+
self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
|
| 943 |
+
self.optimize_compare('bdhe,acad,hiab,agac,hibd')
|
| 944 |
+
|
| 945 |
+
def test_collapse(self):
|
| 946 |
+
# Inner products
|
| 947 |
+
self.optimize_compare('ab,ab,c->')
|
| 948 |
+
self.optimize_compare('ab,ab,c->c')
|
| 949 |
+
self.optimize_compare('ab,ab,cd,cd->')
|
| 950 |
+
self.optimize_compare('ab,ab,cd,cd->ac')
|
| 951 |
+
self.optimize_compare('ab,ab,cd,cd->cd')
|
| 952 |
+
self.optimize_compare('ab,ab,cd,cd,ef,ef->')
|
| 953 |
+
|
| 954 |
+
def test_expand(self):
|
| 955 |
+
# Outer products
|
| 956 |
+
self.optimize_compare('ab,cd,ef->abcdef')
|
| 957 |
+
self.optimize_compare('ab,cd,ef->acdf')
|
| 958 |
+
self.optimize_compare('ab,cd,de->abcde')
|
| 959 |
+
self.optimize_compare('ab,cd,de->be')
|
| 960 |
+
self.optimize_compare('ab,bcd,cd->abcd')
|
| 961 |
+
self.optimize_compare('ab,bcd,cd->abd')
|
| 962 |
+
|
| 963 |
+
def test_edge_cases(self):
|
| 964 |
+
# Difficult edge cases for optimization
|
| 965 |
+
self.optimize_compare('eb,cb,fb->cef')
|
| 966 |
+
self.optimize_compare('dd,fb,be,cdb->cef')
|
| 967 |
+
self.optimize_compare('bca,cdb,dbf,afc->')
|
| 968 |
+
self.optimize_compare('dcc,fce,ea,dbf->ab')
|
| 969 |
+
self.optimize_compare('fdf,cdd,ccd,afe->ae')
|
| 970 |
+
self.optimize_compare('abcd,ad')
|
| 971 |
+
self.optimize_compare('ed,fcd,ff,bcf->be')
|
| 972 |
+
self.optimize_compare('baa,dcf,af,cde->be')
|
| 973 |
+
self.optimize_compare('bd,db,eac->ace')
|
| 974 |
+
self.optimize_compare('fff,fae,bef,def->abd')
|
| 975 |
+
self.optimize_compare('efc,dbc,acf,fd->abe')
|
| 976 |
+
self.optimize_compare('ba,ac,da->bcd')
|
| 977 |
+
|
| 978 |
+
def test_inner_product(self):
|
| 979 |
+
# Inner products
|
| 980 |
+
self.optimize_compare('ab,ab')
|
| 981 |
+
self.optimize_compare('ab,ba')
|
| 982 |
+
self.optimize_compare('abc,abc')
|
| 983 |
+
self.optimize_compare('abc,bac')
|
| 984 |
+
self.optimize_compare('abc,cba')
|
| 985 |
+
|
| 986 |
+
def test_random_cases(self):
|
| 987 |
+
# Randomly built test cases
|
| 988 |
+
self.optimize_compare('aab,fa,df,ecc->bde')
|
| 989 |
+
self.optimize_compare('ecb,fef,bad,ed->ac')
|
| 990 |
+
self.optimize_compare('bcf,bbb,fbf,fc->')
|
| 991 |
+
self.optimize_compare('bb,ff,be->e')
|
| 992 |
+
self.optimize_compare('bcb,bb,fc,fff->')
|
| 993 |
+
self.optimize_compare('fbb,dfd,fc,fc->')
|
| 994 |
+
self.optimize_compare('afd,ba,cc,dc->bf')
|
| 995 |
+
self.optimize_compare('adb,bc,fa,cfc->d')
|
| 996 |
+
self.optimize_compare('bbd,bda,fc,db->acf')
|
| 997 |
+
self.optimize_compare('dba,ead,cad->bce')
|
| 998 |
+
self.optimize_compare('aef,fbc,dca->bde')
|
| 999 |
+
|
| 1000 |
+
def test_combined_views_mapping(self):
|
| 1001 |
+
# gh-10792
|
| 1002 |
+
a = np.arange(9).reshape(1, 1, 3, 1, 3)
|
| 1003 |
+
b = np.einsum('bbcdc->d', a)
|
| 1004 |
+
assert_equal(b, [12])
|
| 1005 |
+
|
| 1006 |
+
def test_broadcasting_dot_cases(self):
|
| 1007 |
+
# Ensures broadcasting cases are not mistaken for GEMM
|
| 1008 |
+
|
| 1009 |
+
a = np.random.rand(1, 5, 4)
|
| 1010 |
+
b = np.random.rand(4, 6)
|
| 1011 |
+
c = np.random.rand(5, 6)
|
| 1012 |
+
d = np.random.rand(10)
|
| 1013 |
+
|
| 1014 |
+
self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
|
| 1015 |
+
self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
|
| 1016 |
+
|
| 1017 |
+
e = np.random.rand(1, 1, 5, 4)
|
| 1018 |
+
f = np.random.rand(7, 7)
|
| 1019 |
+
self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
|
| 1020 |
+
self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
|
| 1021 |
+
|
| 1022 |
+
# Edge case found in gh-11308
|
| 1023 |
+
g = np.arange(64).reshape(2, 4, 8)
|
| 1024 |
+
self.optimize_compare('obk,ijk->ioj', operands=[g, g])
|
| 1025 |
+
|
| 1026 |
+
def test_output_order(self):
|
| 1027 |
+
# Ensure output order is respected for optimize cases, the below
|
| 1028 |
+
# contraction should yield a reshaped tensor view
|
| 1029 |
+
# gh-16415
|
| 1030 |
+
|
| 1031 |
+
a = np.ones((2, 3, 5), order='F')
|
| 1032 |
+
b = np.ones((4, 3), order='F')
|
| 1033 |
+
|
| 1034 |
+
for opt in [True, False]:
|
| 1035 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
|
| 1036 |
+
assert_(tmp.flags.f_contiguous)
|
| 1037 |
+
|
| 1038 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
|
| 1039 |
+
assert_(tmp.flags.f_contiguous)
|
| 1040 |
+
|
| 1041 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
|
| 1042 |
+
assert_(tmp.flags.c_contiguous)
|
| 1043 |
+
|
| 1044 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
|
| 1045 |
+
assert_(tmp.flags.c_contiguous is False)
|
| 1046 |
+
assert_(tmp.flags.f_contiguous is False)
|
| 1047 |
+
|
| 1048 |
+
tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
|
| 1049 |
+
assert_(tmp.flags.c_contiguous is False)
|
| 1050 |
+
assert_(tmp.flags.f_contiguous is False)
|
| 1051 |
+
|
| 1052 |
+
c = np.ones((4, 3), order='C')
|
| 1053 |
+
for opt in [True, False]:
|
| 1054 |
+
tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
|
| 1055 |
+
assert_(tmp.flags.c_contiguous)
|
| 1056 |
+
|
| 1057 |
+
d = np.ones((2, 3, 5), order='C')
|
| 1058 |
+
for opt in [True, False]:
|
| 1059 |
+
tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
|
| 1060 |
+
assert_(tmp.flags.c_contiguous)
|
| 1061 |
+
|
| 1062 |
+
class TestEinsumPath:
|
| 1063 |
+
def build_operands(self, string, size_dict=global_size_dict):
|
| 1064 |
+
|
| 1065 |
+
# Builds views based off initial operands
|
| 1066 |
+
operands = [string]
|
| 1067 |
+
terms = string.split('->')[0].split(',')
|
| 1068 |
+
for term in terms:
|
| 1069 |
+
dims = [size_dict[x] for x in term]
|
| 1070 |
+
operands.append(np.random.rand(*dims))
|
| 1071 |
+
|
| 1072 |
+
return operands
|
| 1073 |
+
|
| 1074 |
+
def assert_path_equal(self, comp, benchmark):
|
| 1075 |
+
# Checks if list of tuples are equivalent
|
| 1076 |
+
ret = (len(comp) == len(benchmark))
|
| 1077 |
+
assert_(ret)
|
| 1078 |
+
for pos in range(len(comp) - 1):
|
| 1079 |
+
ret &= isinstance(comp[pos + 1], tuple)
|
| 1080 |
+
ret &= (comp[pos + 1] == benchmark[pos + 1])
|
| 1081 |
+
assert_(ret)
|
| 1082 |
+
|
| 1083 |
+
def test_memory_contraints(self):
|
| 1084 |
+
# Ensure memory constraints are satisfied
|
| 1085 |
+
|
| 1086 |
+
outer_test = self.build_operands('a,b,c->abc')
|
| 1087 |
+
|
| 1088 |
+
path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
|
| 1089 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
|
| 1090 |
+
|
| 1091 |
+
path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
|
| 1092 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
|
| 1093 |
+
|
| 1094 |
+
long_test = self.build_operands('acdf,jbje,gihb,hfac')
|
| 1095 |
+
path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
|
| 1096 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
| 1097 |
+
|
| 1098 |
+
path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
|
| 1099 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
| 1100 |
+
|
| 1101 |
+
def test_long_paths(self):
|
| 1102 |
+
# Long complex cases
|
| 1103 |
+
|
| 1104 |
+
# Long test 1
|
| 1105 |
+
long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
|
| 1106 |
+
path, path_str = np.einsum_path(*long_test1, optimize='greedy')
|
| 1107 |
+
self.assert_path_equal(path, ['einsum_path',
|
| 1108 |
+
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
|
| 1109 |
+
|
| 1110 |
+
path, path_str = np.einsum_path(*long_test1, optimize='optimal')
|
| 1111 |
+
self.assert_path_equal(path, ['einsum_path',
|
| 1112 |
+
(3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
|
| 1113 |
+
|
| 1114 |
+
# Long test 2
|
| 1115 |
+
long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
|
| 1116 |
+
path, path_str = np.einsum_path(*long_test2, optimize='greedy')
|
| 1117 |
+
self.assert_path_equal(path, ['einsum_path',
|
| 1118 |
+
(3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
|
| 1119 |
+
|
| 1120 |
+
path, path_str = np.einsum_path(*long_test2, optimize='optimal')
|
| 1121 |
+
self.assert_path_equal(path, ['einsum_path',
|
| 1122 |
+
(0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
|
| 1123 |
+
|
| 1124 |
+
def test_edge_paths(self):
|
| 1125 |
+
# Difficult edge cases
|
| 1126 |
+
|
| 1127 |
+
# Edge test1
|
| 1128 |
+
edge_test1 = self.build_operands('eb,cb,fb->cef')
|
| 1129 |
+
path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
|
| 1130 |
+
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
|
| 1131 |
+
|
| 1132 |
+
path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
|
| 1133 |
+
self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
|
| 1134 |
+
|
| 1135 |
+
# Edge test2
|
| 1136 |
+
edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
|
| 1137 |
+
path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
|
| 1138 |
+
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
|
| 1139 |
+
|
| 1140 |
+
path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
|
| 1141 |
+
self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
|
| 1142 |
+
|
| 1143 |
+
# Edge test3
|
| 1144 |
+
edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
|
| 1145 |
+
path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
|
| 1146 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
| 1147 |
+
|
| 1148 |
+
path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
|
| 1149 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
| 1150 |
+
|
| 1151 |
+
# Edge test4
|
| 1152 |
+
edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
|
| 1153 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
|
| 1154 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
|
| 1155 |
+
|
| 1156 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
|
| 1157 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
|
| 1158 |
+
|
| 1159 |
+
# Edge test5
|
| 1160 |
+
edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
|
| 1161 |
+
size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
|
| 1162 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
|
| 1163 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
|
| 1164 |
+
|
| 1165 |
+
path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
|
| 1166 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
|
| 1167 |
+
|
| 1168 |
+
def test_path_type_input(self):
|
| 1169 |
+
# Test explicit path handling
|
| 1170 |
+
path_test = self.build_operands('dcc,fce,ea,dbf->ab')
|
| 1171 |
+
|
| 1172 |
+
path, path_str = np.einsum_path(*path_test, optimize=False)
|
| 1173 |
+
self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
|
| 1174 |
+
|
| 1175 |
+
path, path_str = np.einsum_path(*path_test, optimize=True)
|
| 1176 |
+
self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
|
| 1177 |
+
|
| 1178 |
+
exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
|
| 1179 |
+
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
|
| 1180 |
+
self.assert_path_equal(path, exp_path)
|
| 1181 |
+
|
| 1182 |
+
# Double check einsum works on the input path
|
| 1183 |
+
noopt = np.einsum(*path_test, optimize=False)
|
| 1184 |
+
opt = np.einsum(*path_test, optimize=exp_path)
|
| 1185 |
+
assert_almost_equal(noopt, opt)
|
| 1186 |
+
|
| 1187 |
+
def test_path_type_input_internal_trace(self):
|
| 1188 |
+
#gh-20962
|
| 1189 |
+
path_test = self.build_operands('cab,cdd->ab')
|
| 1190 |
+
exp_path = ['einsum_path', (1,), (0, 1)]
|
| 1191 |
+
|
| 1192 |
+
path, path_str = np.einsum_path(*path_test, optimize=exp_path)
|
| 1193 |
+
self.assert_path_equal(path, exp_path)
|
| 1194 |
+
|
| 1195 |
+
# Double check einsum works on the input path
|
| 1196 |
+
noopt = np.einsum(*path_test, optimize=False)
|
| 1197 |
+
opt = np.einsum(*path_test, optimize=exp_path)
|
| 1198 |
+
assert_almost_equal(noopt, opt)
|
| 1199 |
+
|
| 1200 |
+
def test_path_type_input_invalid(self):
|
| 1201 |
+
path_test = self.build_operands('ab,bc,cd,de->ae')
|
| 1202 |
+
exp_path = ['einsum_path', (2, 3), (0, 1)]
|
| 1203 |
+
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
|
| 1204 |
+
assert_raises(
|
| 1205 |
+
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
|
| 1206 |
+
|
| 1207 |
+
path_test = self.build_operands('a,a,a->a')
|
| 1208 |
+
exp_path = ['einsum_path', (1,), (0, 1)]
|
| 1209 |
+
assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
|
| 1210 |
+
assert_raises(
|
| 1211 |
+
RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
|
| 1212 |
+
|
| 1213 |
+
def test_spaces(self):
|
| 1214 |
+
#gh-10794
|
| 1215 |
+
arr = np.array([[1]])
|
| 1216 |
+
for sp in itertools.product(['', ' '], repeat=4):
|
| 1217 |
+
# no error for any spacing
|
| 1218 |
+
np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
|
| 1219 |
+
|
| 1220 |
+
def test_overlap():
|
| 1221 |
+
a = np.arange(9, dtype=int).reshape(3, 3)
|
| 1222 |
+
b = np.arange(9, dtype=int).reshape(3, 3)
|
| 1223 |
+
d = np.dot(a, b)
|
| 1224 |
+
# sanity check
|
| 1225 |
+
c = np.einsum('ij,jk->ik', a, b)
|
| 1226 |
+
assert_equal(c, d)
|
| 1227 |
+
#gh-10080, out overlaps one of the operands
|
| 1228 |
+
c = np.einsum('ij,jk->ik', a, b, out=b)
|
| 1229 |
+
assert_equal(c, d)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_getlimits.py
ADDED
|
@@ -0,0 +1,203 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test functions for limits module.
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import types
|
| 5 |
+
import warnings
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pytest
|
| 8 |
+
from numpy._core import finfo, iinfo
|
| 9 |
+
from numpy import half, single, double, longdouble
|
| 10 |
+
from numpy.testing import assert_equal, assert_, assert_raises
|
| 11 |
+
from numpy._core.getlimits import _discovered_machar, _float_ma
|
| 12 |
+
|
| 13 |
+
##################################################
|
| 14 |
+
|
| 15 |
+
class TestPythonFloat:
|
| 16 |
+
def test_singleton(self):
|
| 17 |
+
ftype = finfo(float)
|
| 18 |
+
ftype2 = finfo(float)
|
| 19 |
+
assert_equal(id(ftype), id(ftype2))
|
| 20 |
+
|
| 21 |
+
class TestHalf:
|
| 22 |
+
def test_singleton(self):
|
| 23 |
+
ftype = finfo(half)
|
| 24 |
+
ftype2 = finfo(half)
|
| 25 |
+
assert_equal(id(ftype), id(ftype2))
|
| 26 |
+
|
| 27 |
+
class TestSingle:
|
| 28 |
+
def test_singleton(self):
|
| 29 |
+
ftype = finfo(single)
|
| 30 |
+
ftype2 = finfo(single)
|
| 31 |
+
assert_equal(id(ftype), id(ftype2))
|
| 32 |
+
|
| 33 |
+
class TestDouble:
|
| 34 |
+
def test_singleton(self):
|
| 35 |
+
ftype = finfo(double)
|
| 36 |
+
ftype2 = finfo(double)
|
| 37 |
+
assert_equal(id(ftype), id(ftype2))
|
| 38 |
+
|
| 39 |
+
class TestLongdouble:
|
| 40 |
+
def test_singleton(self):
|
| 41 |
+
ftype = finfo(longdouble)
|
| 42 |
+
ftype2 = finfo(longdouble)
|
| 43 |
+
assert_equal(id(ftype), id(ftype2))
|
| 44 |
+
|
| 45 |
+
def assert_finfo_equal(f1, f2):
|
| 46 |
+
# assert two finfo instances have the same attributes
|
| 47 |
+
for attr in ('bits', 'eps', 'epsneg', 'iexp', 'machep',
|
| 48 |
+
'max', 'maxexp', 'min', 'minexp', 'negep', 'nexp',
|
| 49 |
+
'nmant', 'precision', 'resolution', 'tiny',
|
| 50 |
+
'smallest_normal', 'smallest_subnormal'):
|
| 51 |
+
assert_equal(getattr(f1, attr), getattr(f2, attr),
|
| 52 |
+
f'finfo instances {f1} and {f2} differ on {attr}')
|
| 53 |
+
|
| 54 |
+
def assert_iinfo_equal(i1, i2):
|
| 55 |
+
# assert two iinfo instances have the same attributes
|
| 56 |
+
for attr in ('bits', 'min', 'max'):
|
| 57 |
+
assert_equal(getattr(i1, attr), getattr(i2, attr),
|
| 58 |
+
f'iinfo instances {i1} and {i2} differ on {attr}')
|
| 59 |
+
|
| 60 |
+
class TestFinfo:
|
| 61 |
+
def test_basic(self):
|
| 62 |
+
dts = list(zip(['f2', 'f4', 'f8', 'c8', 'c16'],
|
| 63 |
+
[np.float16, np.float32, np.float64, np.complex64,
|
| 64 |
+
np.complex128]))
|
| 65 |
+
for dt1, dt2 in dts:
|
| 66 |
+
assert_finfo_equal(finfo(dt1), finfo(dt2))
|
| 67 |
+
|
| 68 |
+
assert_raises(ValueError, finfo, 'i4')
|
| 69 |
+
|
| 70 |
+
def test_regression_gh23108(self):
|
| 71 |
+
# np.float32(1.0) and np.float64(1.0) have the same hash and are
|
| 72 |
+
# equal under the == operator
|
| 73 |
+
f1 = np.finfo(np.float32(1.0))
|
| 74 |
+
f2 = np.finfo(np.float64(1.0))
|
| 75 |
+
assert f1 != f2
|
| 76 |
+
|
| 77 |
+
def test_regression_gh23867(self):
|
| 78 |
+
class NonHashableWithDtype:
|
| 79 |
+
__hash__ = None
|
| 80 |
+
dtype = np.dtype('float32')
|
| 81 |
+
|
| 82 |
+
x = NonHashableWithDtype()
|
| 83 |
+
assert np.finfo(x) == np.finfo(x.dtype)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
class TestIinfo:
|
| 87 |
+
def test_basic(self):
|
| 88 |
+
dts = list(zip(['i1', 'i2', 'i4', 'i8',
|
| 89 |
+
'u1', 'u2', 'u4', 'u8'],
|
| 90 |
+
[np.int8, np.int16, np.int32, np.int64,
|
| 91 |
+
np.uint8, np.uint16, np.uint32, np.uint64]))
|
| 92 |
+
for dt1, dt2 in dts:
|
| 93 |
+
assert_iinfo_equal(iinfo(dt1), iinfo(dt2))
|
| 94 |
+
|
| 95 |
+
assert_raises(ValueError, iinfo, 'f4')
|
| 96 |
+
|
| 97 |
+
def test_unsigned_max(self):
|
| 98 |
+
types = np._core.sctypes['uint']
|
| 99 |
+
for T in types:
|
| 100 |
+
with np.errstate(over="ignore"):
|
| 101 |
+
max_calculated = T(0) - T(1)
|
| 102 |
+
assert_equal(iinfo(T).max, max_calculated)
|
| 103 |
+
|
| 104 |
+
class TestRepr:
|
| 105 |
+
def test_iinfo_repr(self):
|
| 106 |
+
expected = "iinfo(min=-32768, max=32767, dtype=int16)"
|
| 107 |
+
assert_equal(repr(np.iinfo(np.int16)), expected)
|
| 108 |
+
|
| 109 |
+
def test_finfo_repr(self):
|
| 110 |
+
expected = "finfo(resolution=1e-06, min=-3.4028235e+38," + \
|
| 111 |
+
" max=3.4028235e+38, dtype=float32)"
|
| 112 |
+
assert_equal(repr(np.finfo(np.float32)), expected)
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def test_instances():
|
| 116 |
+
# Test the finfo and iinfo results on numeric instances agree with
|
| 117 |
+
# the results on the corresponding types
|
| 118 |
+
|
| 119 |
+
for c in [int, np.int16, np.int32, np.int64]:
|
| 120 |
+
class_iinfo = iinfo(c)
|
| 121 |
+
instance_iinfo = iinfo(c(12))
|
| 122 |
+
|
| 123 |
+
assert_iinfo_equal(class_iinfo, instance_iinfo)
|
| 124 |
+
|
| 125 |
+
for c in [float, np.float16, np.float32, np.float64]:
|
| 126 |
+
class_finfo = finfo(c)
|
| 127 |
+
instance_finfo = finfo(c(1.2))
|
| 128 |
+
assert_finfo_equal(class_finfo, instance_finfo)
|
| 129 |
+
|
| 130 |
+
with pytest.raises(ValueError):
|
| 131 |
+
iinfo(10.)
|
| 132 |
+
|
| 133 |
+
with pytest.raises(ValueError):
|
| 134 |
+
iinfo('hi')
|
| 135 |
+
|
| 136 |
+
with pytest.raises(ValueError):
|
| 137 |
+
finfo(np.int64(1))
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def assert_ma_equal(discovered, ma_like):
|
| 141 |
+
# Check MachAr-like objects same as calculated MachAr instances
|
| 142 |
+
for key, value in discovered.__dict__.items():
|
| 143 |
+
assert_equal(value, getattr(ma_like, key))
|
| 144 |
+
if hasattr(value, 'shape'):
|
| 145 |
+
assert_equal(value.shape, getattr(ma_like, key).shape)
|
| 146 |
+
assert_equal(value.dtype, getattr(ma_like, key).dtype)
|
| 147 |
+
|
| 148 |
+
|
| 149 |
+
def test_known_types():
|
| 150 |
+
# Test we are correctly compiling parameters for known types
|
| 151 |
+
for ftype, ma_like in ((np.float16, _float_ma[16]),
|
| 152 |
+
(np.float32, _float_ma[32]),
|
| 153 |
+
(np.float64, _float_ma[64])):
|
| 154 |
+
assert_ma_equal(_discovered_machar(ftype), ma_like)
|
| 155 |
+
# Suppress warning for broken discovery of double double on PPC
|
| 156 |
+
with np.errstate(all='ignore'):
|
| 157 |
+
ld_ma = _discovered_machar(np.longdouble)
|
| 158 |
+
bytes = np.dtype(np.longdouble).itemsize
|
| 159 |
+
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
|
| 160 |
+
# 80-bit extended precision
|
| 161 |
+
assert_ma_equal(ld_ma, _float_ma[80])
|
| 162 |
+
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
|
| 163 |
+
# IEE 754 128-bit
|
| 164 |
+
assert_ma_equal(ld_ma, _float_ma[128])
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def test_subnormal_warning():
|
| 168 |
+
"""Test that the subnormal is zero warning is not being raised."""
|
| 169 |
+
with np.errstate(all='ignore'):
|
| 170 |
+
ld_ma = _discovered_machar(np.longdouble)
|
| 171 |
+
bytes = np.dtype(np.longdouble).itemsize
|
| 172 |
+
with warnings.catch_warnings(record=True) as w:
|
| 173 |
+
warnings.simplefilter('always')
|
| 174 |
+
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
|
| 175 |
+
# 80-bit extended precision
|
| 176 |
+
ld_ma.smallest_subnormal
|
| 177 |
+
assert len(w) == 0
|
| 178 |
+
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
|
| 179 |
+
# IEE 754 128-bit
|
| 180 |
+
ld_ma.smallest_subnormal
|
| 181 |
+
assert len(w) == 0
|
| 182 |
+
else:
|
| 183 |
+
# Double double
|
| 184 |
+
ld_ma.smallest_subnormal
|
| 185 |
+
# This test may fail on some platforms
|
| 186 |
+
assert len(w) == 0
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def test_plausible_finfo():
|
| 190 |
+
# Assert that finfo returns reasonable results for all types
|
| 191 |
+
for ftype in np._core.sctypes['float'] + np._core.sctypes['complex']:
|
| 192 |
+
info = np.finfo(ftype)
|
| 193 |
+
assert_(info.nmant > 1)
|
| 194 |
+
assert_(info.minexp < -1)
|
| 195 |
+
assert_(info.maxexp > 1)
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class TestRuntimeSubscriptable:
|
| 199 |
+
def test_finfo_generic(self):
|
| 200 |
+
assert isinstance(np.finfo[np.float64], types.GenericAlias)
|
| 201 |
+
|
| 202 |
+
def test_iinfo_generic(self):
|
| 203 |
+
assert isinstance(np.iinfo[np.int_], types.GenericAlias)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_indexing.py
ADDED
|
@@ -0,0 +1,1444 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import warnings
|
| 3 |
+
import functools
|
| 4 |
+
import operator
|
| 5 |
+
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
from numpy._core._multiarray_tests import array_indexing
|
| 10 |
+
from itertools import product
|
| 11 |
+
from numpy.exceptions import ComplexWarning, VisibleDeprecationWarning
|
| 12 |
+
from numpy.testing import (
|
| 13 |
+
assert_, assert_equal, assert_raises, assert_raises_regex,
|
| 14 |
+
assert_array_equal, assert_warns, HAS_REFCOUNT, IS_WASM
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class TestIndexing:
|
| 19 |
+
def test_index_no_floats(self):
|
| 20 |
+
a = np.array([[[5]]])
|
| 21 |
+
|
| 22 |
+
assert_raises(IndexError, lambda: a[0.0])
|
| 23 |
+
assert_raises(IndexError, lambda: a[0, 0.0])
|
| 24 |
+
assert_raises(IndexError, lambda: a[0.0, 0])
|
| 25 |
+
assert_raises(IndexError, lambda: a[0.0,:])
|
| 26 |
+
assert_raises(IndexError, lambda: a[:, 0.0])
|
| 27 |
+
assert_raises(IndexError, lambda: a[:, 0.0,:])
|
| 28 |
+
assert_raises(IndexError, lambda: a[0.0,:,:])
|
| 29 |
+
assert_raises(IndexError, lambda: a[0, 0, 0.0])
|
| 30 |
+
assert_raises(IndexError, lambda: a[0.0, 0, 0])
|
| 31 |
+
assert_raises(IndexError, lambda: a[0, 0.0, 0])
|
| 32 |
+
assert_raises(IndexError, lambda: a[-1.4])
|
| 33 |
+
assert_raises(IndexError, lambda: a[0, -1.4])
|
| 34 |
+
assert_raises(IndexError, lambda: a[-1.4, 0])
|
| 35 |
+
assert_raises(IndexError, lambda: a[-1.4,:])
|
| 36 |
+
assert_raises(IndexError, lambda: a[:, -1.4])
|
| 37 |
+
assert_raises(IndexError, lambda: a[:, -1.4,:])
|
| 38 |
+
assert_raises(IndexError, lambda: a[-1.4,:,:])
|
| 39 |
+
assert_raises(IndexError, lambda: a[0, 0, -1.4])
|
| 40 |
+
assert_raises(IndexError, lambda: a[-1.4, 0, 0])
|
| 41 |
+
assert_raises(IndexError, lambda: a[0, -1.4, 0])
|
| 42 |
+
assert_raises(IndexError, lambda: a[0.0:, 0.0])
|
| 43 |
+
assert_raises(IndexError, lambda: a[0.0:, 0.0,:])
|
| 44 |
+
|
| 45 |
+
def test_slicing_no_floats(self):
|
| 46 |
+
a = np.array([[5]])
|
| 47 |
+
|
| 48 |
+
# start as float.
|
| 49 |
+
assert_raises(TypeError, lambda: a[0.0:])
|
| 50 |
+
assert_raises(TypeError, lambda: a[0:, 0.0:2])
|
| 51 |
+
assert_raises(TypeError, lambda: a[0.0::2, :0])
|
| 52 |
+
assert_raises(TypeError, lambda: a[0.0:1:2,:])
|
| 53 |
+
assert_raises(TypeError, lambda: a[:, 0.0:])
|
| 54 |
+
# stop as float.
|
| 55 |
+
assert_raises(TypeError, lambda: a[:0.0])
|
| 56 |
+
assert_raises(TypeError, lambda: a[:0, 1:2.0])
|
| 57 |
+
assert_raises(TypeError, lambda: a[:0.0:2, :0])
|
| 58 |
+
assert_raises(TypeError, lambda: a[:0.0,:])
|
| 59 |
+
assert_raises(TypeError, lambda: a[:, 0:4.0:2])
|
| 60 |
+
# step as float.
|
| 61 |
+
assert_raises(TypeError, lambda: a[::1.0])
|
| 62 |
+
assert_raises(TypeError, lambda: a[0:, :2:2.0])
|
| 63 |
+
assert_raises(TypeError, lambda: a[1::4.0, :0])
|
| 64 |
+
assert_raises(TypeError, lambda: a[::5.0,:])
|
| 65 |
+
assert_raises(TypeError, lambda: a[:, 0:4:2.0])
|
| 66 |
+
# mixed.
|
| 67 |
+
assert_raises(TypeError, lambda: a[1.0:2:2.0])
|
| 68 |
+
assert_raises(TypeError, lambda: a[1.0::2.0])
|
| 69 |
+
assert_raises(TypeError, lambda: a[0:, :2.0:2.0])
|
| 70 |
+
assert_raises(TypeError, lambda: a[1.0:1:4.0, :0])
|
| 71 |
+
assert_raises(TypeError, lambda: a[1.0:5.0:5.0,:])
|
| 72 |
+
assert_raises(TypeError, lambda: a[:, 0.4:4.0:2.0])
|
| 73 |
+
# should still get the DeprecationWarning if step = 0.
|
| 74 |
+
assert_raises(TypeError, lambda: a[::0.0])
|
| 75 |
+
|
| 76 |
+
def test_index_no_array_to_index(self):
|
| 77 |
+
# No non-scalar arrays.
|
| 78 |
+
a = np.array([[[1]]])
|
| 79 |
+
|
| 80 |
+
assert_raises(TypeError, lambda: a[a:a:a])
|
| 81 |
+
|
| 82 |
+
def test_none_index(self):
|
| 83 |
+
# `None` index adds newaxis
|
| 84 |
+
a = np.array([1, 2, 3])
|
| 85 |
+
assert_equal(a[None], a[np.newaxis])
|
| 86 |
+
assert_equal(a[None].ndim, a.ndim + 1)
|
| 87 |
+
|
| 88 |
+
def test_empty_tuple_index(self):
|
| 89 |
+
# Empty tuple index creates a view
|
| 90 |
+
a = np.array([1, 2, 3])
|
| 91 |
+
assert_equal(a[()], a)
|
| 92 |
+
assert_(a[()].base is a)
|
| 93 |
+
a = np.array(0)
|
| 94 |
+
assert_(isinstance(a[()], np.int_))
|
| 95 |
+
|
| 96 |
+
def test_void_scalar_empty_tuple(self):
|
| 97 |
+
s = np.zeros((), dtype='V4')
|
| 98 |
+
assert_equal(s[()].dtype, s.dtype)
|
| 99 |
+
assert_equal(s[()], s)
|
| 100 |
+
assert_equal(type(s[...]), np.ndarray)
|
| 101 |
+
|
| 102 |
+
def test_same_kind_index_casting(self):
|
| 103 |
+
# Indexes should be cast with same-kind and not safe, even if that
|
| 104 |
+
# is somewhat unsafe. So test various different code paths.
|
| 105 |
+
index = np.arange(5)
|
| 106 |
+
u_index = index.astype(np.uintp)
|
| 107 |
+
arr = np.arange(10)
|
| 108 |
+
|
| 109 |
+
assert_array_equal(arr[index], arr[u_index])
|
| 110 |
+
arr[u_index] = np.arange(5)
|
| 111 |
+
assert_array_equal(arr, np.arange(10))
|
| 112 |
+
|
| 113 |
+
arr = np.arange(10).reshape(5, 2)
|
| 114 |
+
assert_array_equal(arr[index], arr[u_index])
|
| 115 |
+
|
| 116 |
+
arr[u_index] = np.arange(5)[:,None]
|
| 117 |
+
assert_array_equal(arr, np.arange(5)[:,None].repeat(2, axis=1))
|
| 118 |
+
|
| 119 |
+
arr = np.arange(25).reshape(5, 5)
|
| 120 |
+
assert_array_equal(arr[u_index, u_index], arr[index, index])
|
| 121 |
+
|
| 122 |
+
def test_empty_fancy_index(self):
|
| 123 |
+
# Empty list index creates an empty array
|
| 124 |
+
# with the same dtype (but with weird shape)
|
| 125 |
+
a = np.array([1, 2, 3])
|
| 126 |
+
assert_equal(a[[]], [])
|
| 127 |
+
assert_equal(a[[]].dtype, a.dtype)
|
| 128 |
+
|
| 129 |
+
b = np.array([], dtype=np.intp)
|
| 130 |
+
assert_equal(a[[]], [])
|
| 131 |
+
assert_equal(a[[]].dtype, a.dtype)
|
| 132 |
+
|
| 133 |
+
b = np.array([])
|
| 134 |
+
assert_raises(IndexError, a.__getitem__, b)
|
| 135 |
+
|
| 136 |
+
def test_gh_26542(self):
|
| 137 |
+
a = np.array([0, 1, 2])
|
| 138 |
+
idx = np.array([2, 1, 0])
|
| 139 |
+
a[idx] = a
|
| 140 |
+
expected = np.array([2, 1, 0])
|
| 141 |
+
assert_equal(a, expected)
|
| 142 |
+
|
| 143 |
+
def test_gh_26542_2d(self):
|
| 144 |
+
a = np.array([[0, 1, 2]])
|
| 145 |
+
idx_row = np.zeros(3, dtype=int)
|
| 146 |
+
idx_col = np.array([2, 1, 0])
|
| 147 |
+
a[idx_row, idx_col] = a
|
| 148 |
+
expected = np.array([[2, 1, 0]])
|
| 149 |
+
assert_equal(a, expected)
|
| 150 |
+
|
| 151 |
+
def test_gh_26542_index_overlap(self):
|
| 152 |
+
arr = np.arange(100)
|
| 153 |
+
expected_vals = np.copy(arr[:-10])
|
| 154 |
+
arr[10:] = arr[:-10]
|
| 155 |
+
actual_vals = arr[10:]
|
| 156 |
+
assert_equal(actual_vals, expected_vals)
|
| 157 |
+
|
| 158 |
+
def test_ellipsis_index(self):
|
| 159 |
+
a = np.array([[1, 2, 3],
|
| 160 |
+
[4, 5, 6],
|
| 161 |
+
[7, 8, 9]])
|
| 162 |
+
assert_(a[...] is not a)
|
| 163 |
+
assert_equal(a[...], a)
|
| 164 |
+
# `a[...]` was `a` in numpy <1.9.
|
| 165 |
+
assert_(a[...].base is a)
|
| 166 |
+
|
| 167 |
+
# Slicing with ellipsis can skip an
|
| 168 |
+
# arbitrary number of dimensions
|
| 169 |
+
assert_equal(a[0, ...], a[0])
|
| 170 |
+
assert_equal(a[0, ...], a[0,:])
|
| 171 |
+
assert_equal(a[..., 0], a[:, 0])
|
| 172 |
+
|
| 173 |
+
# Slicing with ellipsis always results
|
| 174 |
+
# in an array, not a scalar
|
| 175 |
+
assert_equal(a[0, ..., 1], np.array(2))
|
| 176 |
+
|
| 177 |
+
# Assignment with `(Ellipsis,)` on 0-d arrays
|
| 178 |
+
b = np.array(1)
|
| 179 |
+
b[(Ellipsis,)] = 2
|
| 180 |
+
assert_equal(b, 2)
|
| 181 |
+
|
| 182 |
+
def test_single_int_index(self):
|
| 183 |
+
# Single integer index selects one row
|
| 184 |
+
a = np.array([[1, 2, 3],
|
| 185 |
+
[4, 5, 6],
|
| 186 |
+
[7, 8, 9]])
|
| 187 |
+
|
| 188 |
+
assert_equal(a[0], [1, 2, 3])
|
| 189 |
+
assert_equal(a[-1], [7, 8, 9])
|
| 190 |
+
|
| 191 |
+
# Index out of bounds produces IndexError
|
| 192 |
+
assert_raises(IndexError, a.__getitem__, 1 << 30)
|
| 193 |
+
# Index overflow produces IndexError
|
| 194 |
+
assert_raises(IndexError, a.__getitem__, 1 << 64)
|
| 195 |
+
|
| 196 |
+
def test_single_bool_index(self):
|
| 197 |
+
# Single boolean index
|
| 198 |
+
a = np.array([[1, 2, 3],
|
| 199 |
+
[4, 5, 6],
|
| 200 |
+
[7, 8, 9]])
|
| 201 |
+
|
| 202 |
+
assert_equal(a[np.array(True)], a[None])
|
| 203 |
+
assert_equal(a[np.array(False)], a[None][0:0])
|
| 204 |
+
|
| 205 |
+
def test_boolean_shape_mismatch(self):
|
| 206 |
+
arr = np.ones((5, 4, 3))
|
| 207 |
+
|
| 208 |
+
index = np.array([True])
|
| 209 |
+
assert_raises(IndexError, arr.__getitem__, index)
|
| 210 |
+
|
| 211 |
+
index = np.array([False] * 6)
|
| 212 |
+
assert_raises(IndexError, arr.__getitem__, index)
|
| 213 |
+
|
| 214 |
+
index = np.zeros((4, 4), dtype=bool)
|
| 215 |
+
assert_raises(IndexError, arr.__getitem__, index)
|
| 216 |
+
|
| 217 |
+
assert_raises(IndexError, arr.__getitem__, (slice(None), index))
|
| 218 |
+
|
| 219 |
+
def test_boolean_indexing_onedim(self):
|
| 220 |
+
# Indexing a 2-dimensional array with
|
| 221 |
+
# boolean array of length one
|
| 222 |
+
a = np.array([[ 0., 0., 0.]])
|
| 223 |
+
b = np.array([ True], dtype=bool)
|
| 224 |
+
assert_equal(a[b], a)
|
| 225 |
+
# boolean assignment
|
| 226 |
+
a[b] = 1.
|
| 227 |
+
assert_equal(a, [[1., 1., 1.]])
|
| 228 |
+
|
| 229 |
+
def test_boolean_assignment_value_mismatch(self):
|
| 230 |
+
# A boolean assignment should fail when the shape of the values
|
| 231 |
+
# cannot be broadcast to the subscription. (see also gh-3458)
|
| 232 |
+
a = np.arange(4)
|
| 233 |
+
|
| 234 |
+
def f(a, v):
|
| 235 |
+
a[a > -1] = v
|
| 236 |
+
|
| 237 |
+
assert_raises(ValueError, f, a, [])
|
| 238 |
+
assert_raises(ValueError, f, a, [1, 2, 3])
|
| 239 |
+
assert_raises(ValueError, f, a[:1], [1, 2, 3])
|
| 240 |
+
|
| 241 |
+
def test_boolean_assignment_needs_api(self):
|
| 242 |
+
# See also gh-7666
|
| 243 |
+
# This caused a segfault on Python 2 due to the GIL not being
|
| 244 |
+
# held when the iterator does not need it, but the transfer function
|
| 245 |
+
# does
|
| 246 |
+
arr = np.zeros(1000)
|
| 247 |
+
indx = np.zeros(1000, dtype=bool)
|
| 248 |
+
indx[:100] = True
|
| 249 |
+
arr[indx] = np.ones(100, dtype=object)
|
| 250 |
+
|
| 251 |
+
expected = np.zeros(1000)
|
| 252 |
+
expected[:100] = 1
|
| 253 |
+
assert_array_equal(arr, expected)
|
| 254 |
+
|
| 255 |
+
def test_boolean_indexing_twodim(self):
|
| 256 |
+
# Indexing a 2-dimensional array with
|
| 257 |
+
# 2-dimensional boolean array
|
| 258 |
+
a = np.array([[1, 2, 3],
|
| 259 |
+
[4, 5, 6],
|
| 260 |
+
[7, 8, 9]])
|
| 261 |
+
b = np.array([[ True, False, True],
|
| 262 |
+
[False, True, False],
|
| 263 |
+
[ True, False, True]])
|
| 264 |
+
assert_equal(a[b], [1, 3, 5, 7, 9])
|
| 265 |
+
assert_equal(a[b[1]], [[4, 5, 6]])
|
| 266 |
+
assert_equal(a[b[0]], a[b[2]])
|
| 267 |
+
|
| 268 |
+
# boolean assignment
|
| 269 |
+
a[b] = 0
|
| 270 |
+
assert_equal(a, [[0, 2, 0],
|
| 271 |
+
[4, 0, 6],
|
| 272 |
+
[0, 8, 0]])
|
| 273 |
+
|
| 274 |
+
def test_boolean_indexing_list(self):
|
| 275 |
+
# Regression test for #13715. It's a use-after-free bug which the
|
| 276 |
+
# test won't directly catch, but it will show up in valgrind.
|
| 277 |
+
a = np.array([1, 2, 3])
|
| 278 |
+
b = [True, False, True]
|
| 279 |
+
# Two variants of the test because the first takes a fast path
|
| 280 |
+
assert_equal(a[b], [1, 3])
|
| 281 |
+
assert_equal(a[None, b], [[1, 3]])
|
| 282 |
+
|
| 283 |
+
def test_reverse_strides_and_subspace_bufferinit(self):
|
| 284 |
+
# This tests that the strides are not reversed for simple and
|
| 285 |
+
# subspace fancy indexing.
|
| 286 |
+
a = np.ones(5)
|
| 287 |
+
b = np.zeros(5, dtype=np.intp)[::-1]
|
| 288 |
+
c = np.arange(5)[::-1]
|
| 289 |
+
|
| 290 |
+
a[b] = c
|
| 291 |
+
# If the strides are not reversed, the 0 in the arange comes last.
|
| 292 |
+
assert_equal(a[0], 0)
|
| 293 |
+
|
| 294 |
+
# This also tests that the subspace buffer is initialized:
|
| 295 |
+
a = np.ones((5, 2))
|
| 296 |
+
c = np.arange(10).reshape(5, 2)[::-1]
|
| 297 |
+
a[b, :] = c
|
| 298 |
+
assert_equal(a[0], [0, 1])
|
| 299 |
+
|
| 300 |
+
def test_reversed_strides_result_allocation(self):
|
| 301 |
+
# Test a bug when calculating the output strides for a result array
|
| 302 |
+
# when the subspace size was 1 (and test other cases as well)
|
| 303 |
+
a = np.arange(10)[:, None]
|
| 304 |
+
i = np.arange(10)[::-1]
|
| 305 |
+
assert_array_equal(a[i], a[i.copy('C')])
|
| 306 |
+
|
| 307 |
+
a = np.arange(20).reshape(-1, 2)
|
| 308 |
+
|
| 309 |
+
def test_uncontiguous_subspace_assignment(self):
|
| 310 |
+
# During development there was a bug activating a skip logic
|
| 311 |
+
# based on ndim instead of size.
|
| 312 |
+
a = np.full((3, 4, 2), -1)
|
| 313 |
+
b = np.full((3, 4, 2), -1)
|
| 314 |
+
|
| 315 |
+
a[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T
|
| 316 |
+
b[[0, 1]] = np.arange(2 * 4 * 2).reshape(2, 4, 2).T.copy()
|
| 317 |
+
|
| 318 |
+
assert_equal(a, b)
|
| 319 |
+
|
| 320 |
+
def test_too_many_fancy_indices_special_case(self):
|
| 321 |
+
# Just documents behaviour, this is a small limitation.
|
| 322 |
+
a = np.ones((1,) * 64) # 64 is NPY_MAXDIMS
|
| 323 |
+
assert_raises(IndexError, a.__getitem__, (np.array([0]),) * 64)
|
| 324 |
+
|
| 325 |
+
def test_scalar_array_bool(self):
|
| 326 |
+
# NumPy bools can be used as boolean index (python ones as of yet not)
|
| 327 |
+
a = np.array(1)
|
| 328 |
+
assert_equal(a[np.bool(True)], a[np.array(True)])
|
| 329 |
+
assert_equal(a[np.bool(False)], a[np.array(False)])
|
| 330 |
+
|
| 331 |
+
# After deprecating bools as integers:
|
| 332 |
+
#a = np.array([0,1,2])
|
| 333 |
+
#assert_equal(a[True, :], a[None, :])
|
| 334 |
+
#assert_equal(a[:, True], a[:, None])
|
| 335 |
+
#
|
| 336 |
+
#assert_(not np.may_share_memory(a, a[True, :]))
|
| 337 |
+
|
| 338 |
+
def test_everything_returns_views(self):
|
| 339 |
+
# Before `...` would return a itself.
|
| 340 |
+
a = np.arange(5)
|
| 341 |
+
|
| 342 |
+
assert_(a is not a[()])
|
| 343 |
+
assert_(a is not a[...])
|
| 344 |
+
assert_(a is not a[:])
|
| 345 |
+
|
| 346 |
+
def test_broaderrors_indexing(self):
|
| 347 |
+
a = np.zeros((5, 5))
|
| 348 |
+
assert_raises(IndexError, a.__getitem__, ([0, 1], [0, 1, 2]))
|
| 349 |
+
assert_raises(IndexError, a.__setitem__, ([0, 1], [0, 1, 2]), 0)
|
| 350 |
+
|
| 351 |
+
def test_trivial_fancy_out_of_bounds(self):
|
| 352 |
+
a = np.zeros(5)
|
| 353 |
+
ind = np.ones(20, dtype=np.intp)
|
| 354 |
+
ind[-1] = 10
|
| 355 |
+
assert_raises(IndexError, a.__getitem__, ind)
|
| 356 |
+
assert_raises(IndexError, a.__setitem__, ind, 0)
|
| 357 |
+
ind = np.ones(20, dtype=np.intp)
|
| 358 |
+
ind[0] = 11
|
| 359 |
+
assert_raises(IndexError, a.__getitem__, ind)
|
| 360 |
+
assert_raises(IndexError, a.__setitem__, ind, 0)
|
| 361 |
+
|
| 362 |
+
def test_trivial_fancy_not_possible(self):
|
| 363 |
+
# Test that the fast path for trivial assignment is not incorrectly
|
| 364 |
+
# used when the index is not contiguous or 1D, see also gh-11467.
|
| 365 |
+
a = np.arange(6)
|
| 366 |
+
idx = np.arange(6, dtype=np.intp).reshape(2, 1, 3)[:, :, 0]
|
| 367 |
+
assert_array_equal(a[idx], idx)
|
| 368 |
+
|
| 369 |
+
# this case must not go into the fast path, note that idx is
|
| 370 |
+
# a non-contiguous none 1D array here.
|
| 371 |
+
a[idx] = -1
|
| 372 |
+
res = np.arange(6)
|
| 373 |
+
res[0] = -1
|
| 374 |
+
res[3] = -1
|
| 375 |
+
assert_array_equal(a, res)
|
| 376 |
+
|
| 377 |
+
def test_nonbaseclass_values(self):
|
| 378 |
+
class SubClass(np.ndarray):
|
| 379 |
+
def __array_finalize__(self, old):
|
| 380 |
+
# Have array finalize do funny things
|
| 381 |
+
self.fill(99)
|
| 382 |
+
|
| 383 |
+
a = np.zeros((5, 5))
|
| 384 |
+
s = a.copy().view(type=SubClass)
|
| 385 |
+
s.fill(1)
|
| 386 |
+
|
| 387 |
+
a[[0, 1, 2, 3, 4], :] = s
|
| 388 |
+
assert_((a == 1).all())
|
| 389 |
+
|
| 390 |
+
# Subspace is last, so transposing might want to finalize
|
| 391 |
+
a[:, [0, 1, 2, 3, 4]] = s
|
| 392 |
+
assert_((a == 1).all())
|
| 393 |
+
|
| 394 |
+
a.fill(0)
|
| 395 |
+
a[...] = s
|
| 396 |
+
assert_((a == 1).all())
|
| 397 |
+
|
| 398 |
+
def test_array_like_values(self):
|
| 399 |
+
# Similar to the above test, but use a memoryview instead
|
| 400 |
+
a = np.zeros((5, 5))
|
| 401 |
+
s = np.arange(25, dtype=np.float64).reshape(5, 5)
|
| 402 |
+
|
| 403 |
+
a[[0, 1, 2, 3, 4], :] = memoryview(s)
|
| 404 |
+
assert_array_equal(a, s)
|
| 405 |
+
|
| 406 |
+
a[:, [0, 1, 2, 3, 4]] = memoryview(s)
|
| 407 |
+
assert_array_equal(a, s)
|
| 408 |
+
|
| 409 |
+
a[...] = memoryview(s)
|
| 410 |
+
assert_array_equal(a, s)
|
| 411 |
+
|
| 412 |
+
@pytest.mark.parametrize("writeable", [True, False])
|
| 413 |
+
def test_subclass_writeable(self, writeable):
|
| 414 |
+
d = np.rec.array([('NGC1001', 11), ('NGC1002', 1.), ('NGC1003', 1.)],
|
| 415 |
+
dtype=[('target', 'S20'), ('V_mag', '>f4')])
|
| 416 |
+
d.flags.writeable = writeable
|
| 417 |
+
# Advanced indexing results are always writeable:
|
| 418 |
+
ind = np.array([False, True, True], dtype=bool)
|
| 419 |
+
assert d[ind].flags.writeable
|
| 420 |
+
ind = np.array([0, 1])
|
| 421 |
+
assert d[ind].flags.writeable
|
| 422 |
+
# Views should be writeable if the original array is:
|
| 423 |
+
assert d[...].flags.writeable == writeable
|
| 424 |
+
assert d[0].flags.writeable == writeable
|
| 425 |
+
|
| 426 |
+
def test_memory_order(self):
|
| 427 |
+
# This is not necessary to preserve. Memory layouts for
|
| 428 |
+
# more complex indices are not as simple.
|
| 429 |
+
a = np.arange(10)
|
| 430 |
+
b = np.arange(10).reshape(5,2).T
|
| 431 |
+
assert_(a[b].flags.f_contiguous)
|
| 432 |
+
|
| 433 |
+
# Takes a different implementation branch:
|
| 434 |
+
a = a.reshape(-1, 1)
|
| 435 |
+
assert_(a[b, 0].flags.f_contiguous)
|
| 436 |
+
|
| 437 |
+
def test_scalar_return_type(self):
|
| 438 |
+
# Full scalar indices should return scalars and object
|
| 439 |
+
# arrays should not call PyArray_Return on their items
|
| 440 |
+
class Zero:
|
| 441 |
+
# The most basic valid indexing
|
| 442 |
+
def __index__(self):
|
| 443 |
+
return 0
|
| 444 |
+
|
| 445 |
+
z = Zero()
|
| 446 |
+
|
| 447 |
+
class ArrayLike:
|
| 448 |
+
# Simple array, should behave like the array
|
| 449 |
+
def __array__(self, dtype=None, copy=None):
|
| 450 |
+
return np.array(0)
|
| 451 |
+
|
| 452 |
+
a = np.zeros(())
|
| 453 |
+
assert_(isinstance(a[()], np.float64))
|
| 454 |
+
a = np.zeros(1)
|
| 455 |
+
assert_(isinstance(a[z], np.float64))
|
| 456 |
+
a = np.zeros((1, 1))
|
| 457 |
+
assert_(isinstance(a[z, np.array(0)], np.float64))
|
| 458 |
+
assert_(isinstance(a[z, ArrayLike()], np.float64))
|
| 459 |
+
|
| 460 |
+
# And object arrays do not call it too often:
|
| 461 |
+
b = np.array(0)
|
| 462 |
+
a = np.array(0, dtype=object)
|
| 463 |
+
a[()] = b
|
| 464 |
+
assert_(isinstance(a[()], np.ndarray))
|
| 465 |
+
a = np.array([b, None])
|
| 466 |
+
assert_(isinstance(a[z], np.ndarray))
|
| 467 |
+
a = np.array([[b, None]])
|
| 468 |
+
assert_(isinstance(a[z, np.array(0)], np.ndarray))
|
| 469 |
+
assert_(isinstance(a[z, ArrayLike()], np.ndarray))
|
| 470 |
+
|
| 471 |
+
def test_small_regressions(self):
|
| 472 |
+
# Reference count of intp for index checks
|
| 473 |
+
a = np.array([0])
|
| 474 |
+
if HAS_REFCOUNT:
|
| 475 |
+
refcount = sys.getrefcount(np.dtype(np.intp))
|
| 476 |
+
# item setting always checks indices in separate function:
|
| 477 |
+
a[np.array([0], dtype=np.intp)] = 1
|
| 478 |
+
a[np.array([0], dtype=np.uint8)] = 1
|
| 479 |
+
assert_raises(IndexError, a.__setitem__,
|
| 480 |
+
np.array([1], dtype=np.intp), 1)
|
| 481 |
+
assert_raises(IndexError, a.__setitem__,
|
| 482 |
+
np.array([1], dtype=np.uint8), 1)
|
| 483 |
+
|
| 484 |
+
if HAS_REFCOUNT:
|
| 485 |
+
assert_equal(sys.getrefcount(np.dtype(np.intp)), refcount)
|
| 486 |
+
|
| 487 |
+
def test_unaligned(self):
|
| 488 |
+
v = (np.zeros(64, dtype=np.int8) + ord('a'))[1:-7]
|
| 489 |
+
d = v.view(np.dtype("S8"))
|
| 490 |
+
# unaligned source
|
| 491 |
+
x = (np.zeros(16, dtype=np.int8) + ord('a'))[1:-7]
|
| 492 |
+
x = x.view(np.dtype("S8"))
|
| 493 |
+
x[...] = np.array("b" * 8, dtype="S")
|
| 494 |
+
b = np.arange(d.size)
|
| 495 |
+
#trivial
|
| 496 |
+
assert_equal(d[b], d)
|
| 497 |
+
d[b] = x
|
| 498 |
+
# nontrivial
|
| 499 |
+
# unaligned index array
|
| 500 |
+
b = np.zeros(d.size + 1).view(np.int8)[1:-(np.intp(0).itemsize - 1)]
|
| 501 |
+
b = b.view(np.intp)[:d.size]
|
| 502 |
+
b[...] = np.arange(d.size)
|
| 503 |
+
assert_equal(d[b.astype(np.int16)], d)
|
| 504 |
+
d[b.astype(np.int16)] = x
|
| 505 |
+
# boolean
|
| 506 |
+
d[b % 2 == 0]
|
| 507 |
+
d[b % 2 == 0] = x[::2]
|
| 508 |
+
|
| 509 |
+
def test_tuple_subclass(self):
|
| 510 |
+
arr = np.ones((5, 5))
|
| 511 |
+
|
| 512 |
+
# A tuple subclass should also be an nd-index
|
| 513 |
+
class TupleSubclass(tuple):
|
| 514 |
+
pass
|
| 515 |
+
index = ([1], [1])
|
| 516 |
+
index = TupleSubclass(index)
|
| 517 |
+
assert_(arr[index].shape == (1,))
|
| 518 |
+
# Unlike the non nd-index:
|
| 519 |
+
assert_(arr[index,].shape != (1,))
|
| 520 |
+
|
| 521 |
+
def test_broken_sequence_not_nd_index(self):
|
| 522 |
+
# See gh-5063:
|
| 523 |
+
# If we have an object which claims to be a sequence, but fails
|
| 524 |
+
# on item getting, this should not be converted to an nd-index (tuple)
|
| 525 |
+
# If this object happens to be a valid index otherwise, it should work
|
| 526 |
+
# This object here is very dubious and probably bad though:
|
| 527 |
+
class SequenceLike:
|
| 528 |
+
def __index__(self):
|
| 529 |
+
return 0
|
| 530 |
+
|
| 531 |
+
def __len__(self):
|
| 532 |
+
return 1
|
| 533 |
+
|
| 534 |
+
def __getitem__(self, item):
|
| 535 |
+
raise IndexError('Not possible')
|
| 536 |
+
|
| 537 |
+
arr = np.arange(10)
|
| 538 |
+
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
|
| 539 |
+
|
| 540 |
+
# also test that field indexing does not segfault
|
| 541 |
+
# for a similar reason, by indexing a structured array
|
| 542 |
+
arr = np.zeros((1,), dtype=[('f1', 'i8'), ('f2', 'i8')])
|
| 543 |
+
assert_array_equal(arr[SequenceLike()], arr[SequenceLike(),])
|
| 544 |
+
|
| 545 |
+
def test_indexing_array_weird_strides(self):
|
| 546 |
+
# See also gh-6221
|
| 547 |
+
# the shapes used here come from the issue and create the correct
|
| 548 |
+
# size for the iterator buffering size.
|
| 549 |
+
x = np.ones(10)
|
| 550 |
+
x2 = np.ones((10, 2))
|
| 551 |
+
ind = np.arange(10)[:, None, None, None]
|
| 552 |
+
ind = np.broadcast_to(ind, (10, 55, 4, 4))
|
| 553 |
+
|
| 554 |
+
# single advanced index case
|
| 555 |
+
assert_array_equal(x[ind], x[ind.copy()])
|
| 556 |
+
# higher dimensional advanced index
|
| 557 |
+
zind = np.zeros(4, dtype=np.intp)
|
| 558 |
+
assert_array_equal(x2[ind, zind], x2[ind.copy(), zind])
|
| 559 |
+
|
| 560 |
+
def test_indexing_array_negative_strides(self):
|
| 561 |
+
# From gh-8264,
|
| 562 |
+
# core dumps if negative strides are used in iteration
|
| 563 |
+
arro = np.zeros((4, 4))
|
| 564 |
+
arr = arro[::-1, ::-1]
|
| 565 |
+
|
| 566 |
+
slices = (slice(None), [0, 1, 2, 3])
|
| 567 |
+
arr[slices] = 10
|
| 568 |
+
assert_array_equal(arr, 10.)
|
| 569 |
+
|
| 570 |
+
def test_character_assignment(self):
|
| 571 |
+
# This is an example a function going through CopyObject which
|
| 572 |
+
# used to have an untested special path for scalars
|
| 573 |
+
# (the character special dtype case, should be deprecated probably)
|
| 574 |
+
arr = np.zeros((1, 5), dtype="c")
|
| 575 |
+
arr[0] = np.str_("asdfg") # must assign as a sequence
|
| 576 |
+
assert_array_equal(arr[0], np.array("asdfg", dtype="c"))
|
| 577 |
+
assert arr[0, 1] == b"s" # make sure not all were set to "a" for both
|
| 578 |
+
|
| 579 |
+
@pytest.mark.parametrize("index",
|
| 580 |
+
[True, False, np.array([0])])
|
| 581 |
+
@pytest.mark.parametrize("num", [64, 80])
|
| 582 |
+
@pytest.mark.parametrize("original_ndim", [1, 64])
|
| 583 |
+
def test_too_many_advanced_indices(self, index, num, original_ndim):
|
| 584 |
+
# These are limitations based on the number of arguments we can process.
|
| 585 |
+
# For `num=32` (and all boolean cases), the result is actually define;
|
| 586 |
+
# but the use of NpyIter (NPY_MAXARGS) limits it for technical reasons.
|
| 587 |
+
arr = np.ones((1,) * original_ndim)
|
| 588 |
+
with pytest.raises(IndexError):
|
| 589 |
+
arr[(index,) * num]
|
| 590 |
+
with pytest.raises(IndexError):
|
| 591 |
+
arr[(index,) * num] = 1.
|
| 592 |
+
|
| 593 |
+
@pytest.mark.skipif(IS_WASM, reason="no threading")
|
| 594 |
+
def test_structured_advanced_indexing(self):
|
| 595 |
+
# Test that copyswap(n) used by integer array indexing is threadsafe
|
| 596 |
+
# for structured datatypes, see gh-15387. This test can behave randomly.
|
| 597 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 598 |
+
|
| 599 |
+
# Create a deeply nested dtype to make a failure more likely:
|
| 600 |
+
dt = np.dtype([("", "f8")])
|
| 601 |
+
dt = np.dtype([("", dt)] * 2)
|
| 602 |
+
dt = np.dtype([("", dt)] * 2)
|
| 603 |
+
# The array should be large enough to likely run into threading issues
|
| 604 |
+
arr = np.random.uniform(size=(6000, 8)).view(dt)[:, 0]
|
| 605 |
+
|
| 606 |
+
rng = np.random.default_rng()
|
| 607 |
+
def func(arr):
|
| 608 |
+
indx = rng.integers(0, len(arr), size=6000, dtype=np.intp)
|
| 609 |
+
arr[indx]
|
| 610 |
+
|
| 611 |
+
tpe = ThreadPoolExecutor(max_workers=8)
|
| 612 |
+
futures = [tpe.submit(func, arr) for _ in range(10)]
|
| 613 |
+
for f in futures:
|
| 614 |
+
f.result()
|
| 615 |
+
|
| 616 |
+
assert arr.dtype is dt
|
| 617 |
+
|
| 618 |
+
def test_nontuple_ndindex(self):
|
| 619 |
+
a = np.arange(25).reshape((5, 5))
|
| 620 |
+
assert_equal(a[[0, 1]], np.array([a[0], a[1]]))
|
| 621 |
+
assert_equal(a[[0, 1], [0, 1]], np.array([0, 6]))
|
| 622 |
+
assert_raises(IndexError, a.__getitem__, [slice(None)])
|
| 623 |
+
|
| 624 |
+
|
| 625 |
+
class TestFieldIndexing:
|
| 626 |
+
def test_scalar_return_type(self):
|
| 627 |
+
# Field access on an array should return an array, even if it
|
| 628 |
+
# is 0-d.
|
| 629 |
+
a = np.zeros((), [('a','f8')])
|
| 630 |
+
assert_(isinstance(a['a'], np.ndarray))
|
| 631 |
+
assert_(isinstance(a[['a']], np.ndarray))
|
| 632 |
+
|
| 633 |
+
|
| 634 |
+
class TestBroadcastedAssignments:
|
| 635 |
+
def assign(self, a, ind, val):
|
| 636 |
+
a[ind] = val
|
| 637 |
+
return a
|
| 638 |
+
|
| 639 |
+
def test_prepending_ones(self):
|
| 640 |
+
a = np.zeros((3, 2))
|
| 641 |
+
|
| 642 |
+
a[...] = np.ones((1, 3, 2))
|
| 643 |
+
# Fancy with subspace with and without transpose
|
| 644 |
+
a[[0, 1, 2], :] = np.ones((1, 3, 2))
|
| 645 |
+
a[:, [0, 1]] = np.ones((1, 3, 2))
|
| 646 |
+
# Fancy without subspace (with broadcasting)
|
| 647 |
+
a[[[0], [1], [2]], [0, 1]] = np.ones((1, 3, 2))
|
| 648 |
+
|
| 649 |
+
def test_prepend_not_one(self):
|
| 650 |
+
assign = self.assign
|
| 651 |
+
s_ = np.s_
|
| 652 |
+
a = np.zeros(5)
|
| 653 |
+
|
| 654 |
+
# Too large and not only ones.
|
| 655 |
+
assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1)))
|
| 656 |
+
assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1)))
|
| 657 |
+
assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2,2,1)))
|
| 658 |
+
|
| 659 |
+
def test_simple_broadcasting_errors(self):
|
| 660 |
+
assign = self.assign
|
| 661 |
+
s_ = np.s_
|
| 662 |
+
a = np.zeros((5, 1))
|
| 663 |
+
|
| 664 |
+
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 2)))
|
| 665 |
+
assert_raises(ValueError, assign, a, s_[...], np.zeros((5, 0)))
|
| 666 |
+
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 2)))
|
| 667 |
+
assert_raises(ValueError, assign, a, s_[:, [0]], np.zeros((5, 0)))
|
| 668 |
+
assert_raises(ValueError, assign, a, s_[[0], :], np.zeros((2, 1)))
|
| 669 |
+
|
| 670 |
+
@pytest.mark.parametrize("index", [
|
| 671 |
+
(..., [1, 2], slice(None)),
|
| 672 |
+
([0, 1], ..., 0),
|
| 673 |
+
(..., [1, 2], [1, 2])])
|
| 674 |
+
def test_broadcast_error_reports_correct_shape(self, index):
|
| 675 |
+
values = np.zeros((100, 100)) # will never broadcast below
|
| 676 |
+
|
| 677 |
+
arr = np.zeros((3, 4, 5, 6, 7))
|
| 678 |
+
# We currently report without any spaces (could be changed)
|
| 679 |
+
shape_str = str(arr[index].shape).replace(" ", "")
|
| 680 |
+
|
| 681 |
+
with pytest.raises(ValueError) as e:
|
| 682 |
+
arr[index] = values
|
| 683 |
+
|
| 684 |
+
assert str(e.value).endswith(shape_str)
|
| 685 |
+
|
| 686 |
+
def test_index_is_larger(self):
|
| 687 |
+
# Simple case of fancy index broadcasting of the index.
|
| 688 |
+
a = np.zeros((5, 5))
|
| 689 |
+
a[[[0], [1], [2]], [0, 1, 2]] = [2, 3, 4]
|
| 690 |
+
|
| 691 |
+
assert_((a[:3, :3] == [2, 3, 4]).all())
|
| 692 |
+
|
| 693 |
+
def test_broadcast_subspace(self):
|
| 694 |
+
a = np.zeros((100, 100))
|
| 695 |
+
v = np.arange(100)[:,None]
|
| 696 |
+
b = np.arange(100)[::-1]
|
| 697 |
+
a[b] = v
|
| 698 |
+
assert_((a[::-1] == v).all())
|
| 699 |
+
|
| 700 |
+
|
| 701 |
+
class TestSubclasses:
|
| 702 |
+
def test_basic(self):
|
| 703 |
+
# Test that indexing in various ways produces SubClass instances,
|
| 704 |
+
# and that the base is set up correctly: the original subclass
|
| 705 |
+
# instance for views, and a new ndarray for advanced/boolean indexing
|
| 706 |
+
# where a copy was made (latter a regression test for gh-11983).
|
| 707 |
+
class SubClass(np.ndarray):
|
| 708 |
+
pass
|
| 709 |
+
|
| 710 |
+
a = np.arange(5)
|
| 711 |
+
s = a.view(SubClass)
|
| 712 |
+
s_slice = s[:3]
|
| 713 |
+
assert_(type(s_slice) is SubClass)
|
| 714 |
+
assert_(s_slice.base is s)
|
| 715 |
+
assert_array_equal(s_slice, a[:3])
|
| 716 |
+
|
| 717 |
+
s_fancy = s[[0, 1, 2]]
|
| 718 |
+
assert_(type(s_fancy) is SubClass)
|
| 719 |
+
assert_(s_fancy.base is not s)
|
| 720 |
+
assert_(type(s_fancy.base) is np.ndarray)
|
| 721 |
+
assert_array_equal(s_fancy, a[[0, 1, 2]])
|
| 722 |
+
assert_array_equal(s_fancy.base, a[[0, 1, 2]])
|
| 723 |
+
|
| 724 |
+
s_bool = s[s > 0]
|
| 725 |
+
assert_(type(s_bool) is SubClass)
|
| 726 |
+
assert_(s_bool.base is not s)
|
| 727 |
+
assert_(type(s_bool.base) is np.ndarray)
|
| 728 |
+
assert_array_equal(s_bool, a[a > 0])
|
| 729 |
+
assert_array_equal(s_bool.base, a[a > 0])
|
| 730 |
+
|
| 731 |
+
def test_fancy_on_read_only(self):
|
| 732 |
+
# Test that fancy indexing on read-only SubClass does not make a
|
| 733 |
+
# read-only copy (gh-14132)
|
| 734 |
+
class SubClass(np.ndarray):
|
| 735 |
+
pass
|
| 736 |
+
|
| 737 |
+
a = np.arange(5)
|
| 738 |
+
s = a.view(SubClass)
|
| 739 |
+
s.flags.writeable = False
|
| 740 |
+
s_fancy = s[[0, 1, 2]]
|
| 741 |
+
assert_(s_fancy.flags.writeable)
|
| 742 |
+
|
| 743 |
+
|
| 744 |
+
def test_finalize_gets_full_info(self):
|
| 745 |
+
# Array finalize should be called on the filled array.
|
| 746 |
+
class SubClass(np.ndarray):
|
| 747 |
+
def __array_finalize__(self, old):
|
| 748 |
+
self.finalize_status = np.array(self)
|
| 749 |
+
self.old = old
|
| 750 |
+
|
| 751 |
+
s = np.arange(10).view(SubClass)
|
| 752 |
+
new_s = s[:3]
|
| 753 |
+
assert_array_equal(new_s.finalize_status, new_s)
|
| 754 |
+
assert_array_equal(new_s.old, s)
|
| 755 |
+
|
| 756 |
+
new_s = s[[0,1,2,3]]
|
| 757 |
+
assert_array_equal(new_s.finalize_status, new_s)
|
| 758 |
+
assert_array_equal(new_s.old, s)
|
| 759 |
+
|
| 760 |
+
new_s = s[s > 0]
|
| 761 |
+
assert_array_equal(new_s.finalize_status, new_s)
|
| 762 |
+
assert_array_equal(new_s.old, s)
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
class TestFancyIndexingCast:
|
| 766 |
+
def test_boolean_index_cast_assign(self):
|
| 767 |
+
# Setup the boolean index and float arrays.
|
| 768 |
+
shape = (8, 63)
|
| 769 |
+
bool_index = np.zeros(shape).astype(bool)
|
| 770 |
+
bool_index[0, 1] = True
|
| 771 |
+
zero_array = np.zeros(shape)
|
| 772 |
+
|
| 773 |
+
# Assigning float is fine.
|
| 774 |
+
zero_array[bool_index] = np.array([1])
|
| 775 |
+
assert_equal(zero_array[0, 1], 1)
|
| 776 |
+
|
| 777 |
+
# Fancy indexing works, although we get a cast warning.
|
| 778 |
+
assert_warns(ComplexWarning,
|
| 779 |
+
zero_array.__setitem__, ([0], [1]), np.array([2 + 1j]))
|
| 780 |
+
assert_equal(zero_array[0, 1], 2) # No complex part
|
| 781 |
+
|
| 782 |
+
# Cast complex to float, throwing away the imaginary portion.
|
| 783 |
+
assert_warns(ComplexWarning,
|
| 784 |
+
zero_array.__setitem__, bool_index, np.array([1j]))
|
| 785 |
+
assert_equal(zero_array[0, 1], 0)
|
| 786 |
+
|
| 787 |
+
class TestFancyIndexingEquivalence:
|
| 788 |
+
def test_object_assign(self):
|
| 789 |
+
# Check that the field and object special case using copyto is active.
|
| 790 |
+
# The right hand side cannot be converted to an array here.
|
| 791 |
+
a = np.arange(5, dtype=object)
|
| 792 |
+
b = a.copy()
|
| 793 |
+
a[:3] = [1, (1,2), 3]
|
| 794 |
+
b[[0, 1, 2]] = [1, (1,2), 3]
|
| 795 |
+
assert_array_equal(a, b)
|
| 796 |
+
|
| 797 |
+
# test same for subspace fancy indexing
|
| 798 |
+
b = np.arange(5, dtype=object)[None, :]
|
| 799 |
+
b[[0], :3] = [[1, (1,2), 3]]
|
| 800 |
+
assert_array_equal(a, b[0])
|
| 801 |
+
|
| 802 |
+
# Check that swapping of axes works.
|
| 803 |
+
# There was a bug that made the later assignment throw a ValueError
|
| 804 |
+
# do to an incorrectly transposed temporary right hand side (gh-5714)
|
| 805 |
+
b = b.T
|
| 806 |
+
b[:3, [0]] = [[1], [(1,2)], [3]]
|
| 807 |
+
assert_array_equal(a, b[:, 0])
|
| 808 |
+
|
| 809 |
+
# Another test for the memory order of the subspace
|
| 810 |
+
arr = np.ones((3, 4, 5), dtype=object)
|
| 811 |
+
# Equivalent slicing assignment for comparison
|
| 812 |
+
cmp_arr = arr.copy()
|
| 813 |
+
cmp_arr[:1, ...] = [[[1], [2], [3], [4]]]
|
| 814 |
+
arr[[0], ...] = [[[1], [2], [3], [4]]]
|
| 815 |
+
assert_array_equal(arr, cmp_arr)
|
| 816 |
+
arr = arr.copy('F')
|
| 817 |
+
arr[[0], ...] = [[[1], [2], [3], [4]]]
|
| 818 |
+
assert_array_equal(arr, cmp_arr)
|
| 819 |
+
|
| 820 |
+
def test_cast_equivalence(self):
|
| 821 |
+
# Yes, normal slicing uses unsafe casting.
|
| 822 |
+
a = np.arange(5)
|
| 823 |
+
b = a.copy()
|
| 824 |
+
|
| 825 |
+
a[:3] = np.array(['2', '-3', '-1'])
|
| 826 |
+
b[[0, 2, 1]] = np.array(['2', '-1', '-3'])
|
| 827 |
+
assert_array_equal(a, b)
|
| 828 |
+
|
| 829 |
+
# test the same for subspace fancy indexing
|
| 830 |
+
b = np.arange(5)[None, :]
|
| 831 |
+
b[[0], :3] = np.array([['2', '-3', '-1']])
|
| 832 |
+
assert_array_equal(a, b[0])
|
| 833 |
+
|
| 834 |
+
|
| 835 |
+
class TestMultiIndexingAutomated:
|
| 836 |
+
"""
|
| 837 |
+
These tests use code to mimic the C-Code indexing for selection.
|
| 838 |
+
|
| 839 |
+
NOTE:
|
| 840 |
+
|
| 841 |
+
* This still lacks tests for complex item setting.
|
| 842 |
+
* If you change behavior of indexing, you might want to modify
|
| 843 |
+
these tests to try more combinations.
|
| 844 |
+
* Behavior was written to match numpy version 1.8. (though a
|
| 845 |
+
first version matched 1.7.)
|
| 846 |
+
* Only tuple indices are supported by the mimicking code.
|
| 847 |
+
(and tested as of writing this)
|
| 848 |
+
* Error types should match most of the time as long as there
|
| 849 |
+
is only one error. For multiple errors, what gets raised
|
| 850 |
+
will usually not be the same one. They are *not* tested.
|
| 851 |
+
|
| 852 |
+
Update 2016-11-30: It is probably not worth maintaining this test
|
| 853 |
+
indefinitely and it can be dropped if maintenance becomes a burden.
|
| 854 |
+
|
| 855 |
+
"""
|
| 856 |
+
|
| 857 |
+
def setup_method(self):
|
| 858 |
+
self.a = np.arange(np.prod([3, 1, 5, 6])).reshape(3, 1, 5, 6)
|
| 859 |
+
self.b = np.empty((3, 0, 5, 6))
|
| 860 |
+
self.complex_indices = ['skip', Ellipsis,
|
| 861 |
+
0,
|
| 862 |
+
# Boolean indices, up to 3-d for some special cases of eating up
|
| 863 |
+
# dimensions, also need to test all False
|
| 864 |
+
np.array([True, False, False]),
|
| 865 |
+
np.array([[True, False], [False, True]]),
|
| 866 |
+
np.array([[[False, False], [False, False]]]),
|
| 867 |
+
# Some slices:
|
| 868 |
+
slice(-5, 5, 2),
|
| 869 |
+
slice(1, 1, 100),
|
| 870 |
+
slice(4, -1, -2),
|
| 871 |
+
slice(None, None, -3),
|
| 872 |
+
# Some Fancy indexes:
|
| 873 |
+
np.empty((0, 1, 1), dtype=np.intp), # empty and can be broadcast
|
| 874 |
+
np.array([0, 1, -2]),
|
| 875 |
+
np.array([[2], [0], [1]]),
|
| 876 |
+
np.array([[0, -1], [0, 1]], dtype=np.dtype('intp').newbyteorder()),
|
| 877 |
+
np.array([2, -1], dtype=np.int8),
|
| 878 |
+
np.zeros([1]*31, dtype=int), # trigger too large array.
|
| 879 |
+
np.array([0., 1.])] # invalid datatype
|
| 880 |
+
# Some simpler indices that still cover a bit more
|
| 881 |
+
self.simple_indices = [Ellipsis, None, -1, [1], np.array([True]),
|
| 882 |
+
'skip']
|
| 883 |
+
# Very simple ones to fill the rest:
|
| 884 |
+
self.fill_indices = [slice(None, None), 0]
|
| 885 |
+
|
| 886 |
+
def _get_multi_index(self, arr, indices):
|
| 887 |
+
"""Mimic multi dimensional indexing.
|
| 888 |
+
|
| 889 |
+
Parameters
|
| 890 |
+
----------
|
| 891 |
+
arr : ndarray
|
| 892 |
+
Array to be indexed.
|
| 893 |
+
indices : tuple of index objects
|
| 894 |
+
|
| 895 |
+
Returns
|
| 896 |
+
-------
|
| 897 |
+
out : ndarray
|
| 898 |
+
An array equivalent to the indexing operation (but always a copy).
|
| 899 |
+
`arr[indices]` should be identical.
|
| 900 |
+
no_copy : bool
|
| 901 |
+
Whether the indexing operation requires a copy. If this is `True`,
|
| 902 |
+
`np.may_share_memory(arr, arr[indices])` should be `True` (with
|
| 903 |
+
some exceptions for scalars and possibly 0-d arrays).
|
| 904 |
+
|
| 905 |
+
Notes
|
| 906 |
+
-----
|
| 907 |
+
While the function may mostly match the errors of normal indexing this
|
| 908 |
+
is generally not the case.
|
| 909 |
+
"""
|
| 910 |
+
in_indices = list(indices)
|
| 911 |
+
indices = []
|
| 912 |
+
# if False, this is a fancy or boolean index
|
| 913 |
+
no_copy = True
|
| 914 |
+
# number of fancy/scalar indexes that are not consecutive
|
| 915 |
+
num_fancy = 0
|
| 916 |
+
# number of dimensions indexed by a "fancy" index
|
| 917 |
+
fancy_dim = 0
|
| 918 |
+
# NOTE: This is a funny twist (and probably OK to change).
|
| 919 |
+
# The boolean array has illegal indexes, but this is
|
| 920 |
+
# allowed if the broadcast fancy-indices are 0-sized.
|
| 921 |
+
# This variable is to catch that case.
|
| 922 |
+
error_unless_broadcast_to_empty = False
|
| 923 |
+
|
| 924 |
+
# We need to handle Ellipsis and make arrays from indices, also
|
| 925 |
+
# check if this is fancy indexing (set no_copy).
|
| 926 |
+
ndim = 0
|
| 927 |
+
ellipsis_pos = None # define here mostly to replace all but first.
|
| 928 |
+
for i, indx in enumerate(in_indices):
|
| 929 |
+
if indx is None:
|
| 930 |
+
continue
|
| 931 |
+
if isinstance(indx, np.ndarray) and indx.dtype == bool:
|
| 932 |
+
no_copy = False
|
| 933 |
+
if indx.ndim == 0:
|
| 934 |
+
raise IndexError
|
| 935 |
+
# boolean indices can have higher dimensions
|
| 936 |
+
ndim += indx.ndim
|
| 937 |
+
fancy_dim += indx.ndim
|
| 938 |
+
continue
|
| 939 |
+
if indx is Ellipsis:
|
| 940 |
+
if ellipsis_pos is None:
|
| 941 |
+
ellipsis_pos = i
|
| 942 |
+
continue # do not increment ndim counter
|
| 943 |
+
raise IndexError
|
| 944 |
+
if isinstance(indx, slice):
|
| 945 |
+
ndim += 1
|
| 946 |
+
continue
|
| 947 |
+
if not isinstance(indx, np.ndarray):
|
| 948 |
+
# This could be open for changes in numpy.
|
| 949 |
+
# numpy should maybe raise an error if casting to intp
|
| 950 |
+
# is not safe. It rejects np.array([1., 2.]) but not
|
| 951 |
+
# [1., 2.] as index (same for ie. np.take).
|
| 952 |
+
# (Note the importance of empty lists if changing this here)
|
| 953 |
+
try:
|
| 954 |
+
indx = np.array(indx, dtype=np.intp)
|
| 955 |
+
except ValueError:
|
| 956 |
+
raise IndexError
|
| 957 |
+
in_indices[i] = indx
|
| 958 |
+
elif indx.dtype.kind != 'b' and indx.dtype.kind != 'i':
|
| 959 |
+
raise IndexError('arrays used as indices must be of '
|
| 960 |
+
'integer (or boolean) type')
|
| 961 |
+
if indx.ndim != 0:
|
| 962 |
+
no_copy = False
|
| 963 |
+
ndim += 1
|
| 964 |
+
fancy_dim += 1
|
| 965 |
+
|
| 966 |
+
if arr.ndim - ndim < 0:
|
| 967 |
+
# we can't take more dimensions then we have, not even for 0-d
|
| 968 |
+
# arrays. since a[()] makes sense, but not a[(),]. We will
|
| 969 |
+
# raise an error later on, unless a broadcasting error occurs
|
| 970 |
+
# first.
|
| 971 |
+
raise IndexError
|
| 972 |
+
|
| 973 |
+
if ndim == 0 and None not in in_indices:
|
| 974 |
+
# Well we have no indexes or one Ellipsis. This is legal.
|
| 975 |
+
return arr.copy(), no_copy
|
| 976 |
+
|
| 977 |
+
if ellipsis_pos is not None:
|
| 978 |
+
in_indices[ellipsis_pos:ellipsis_pos+1] = ([slice(None, None)] *
|
| 979 |
+
(arr.ndim - ndim))
|
| 980 |
+
|
| 981 |
+
for ax, indx in enumerate(in_indices):
|
| 982 |
+
if isinstance(indx, slice):
|
| 983 |
+
# convert to an index array
|
| 984 |
+
indx = np.arange(*indx.indices(arr.shape[ax]))
|
| 985 |
+
indices.append(['s', indx])
|
| 986 |
+
continue
|
| 987 |
+
elif indx is None:
|
| 988 |
+
# this is like taking a slice with one element from a new axis:
|
| 989 |
+
indices.append(['n', np.array([0], dtype=np.intp)])
|
| 990 |
+
arr = arr.reshape(arr.shape[:ax] + (1,) + arr.shape[ax:])
|
| 991 |
+
continue
|
| 992 |
+
if isinstance(indx, np.ndarray) and indx.dtype == bool:
|
| 993 |
+
if indx.shape != arr.shape[ax:ax+indx.ndim]:
|
| 994 |
+
raise IndexError
|
| 995 |
+
|
| 996 |
+
try:
|
| 997 |
+
flat_indx = np.ravel_multi_index(np.nonzero(indx),
|
| 998 |
+
arr.shape[ax:ax+indx.ndim], mode='raise')
|
| 999 |
+
except Exception:
|
| 1000 |
+
error_unless_broadcast_to_empty = True
|
| 1001 |
+
# fill with 0s instead, and raise error later
|
| 1002 |
+
flat_indx = np.array([0]*indx.sum(), dtype=np.intp)
|
| 1003 |
+
# concatenate axis into a single one:
|
| 1004 |
+
if indx.ndim != 0:
|
| 1005 |
+
arr = arr.reshape(arr.shape[:ax]
|
| 1006 |
+
+ (np.prod(arr.shape[ax:ax+indx.ndim]),)
|
| 1007 |
+
+ arr.shape[ax+indx.ndim:])
|
| 1008 |
+
indx = flat_indx
|
| 1009 |
+
else:
|
| 1010 |
+
# This could be changed, a 0-d boolean index can
|
| 1011 |
+
# make sense (even outside the 0-d indexed array case)
|
| 1012 |
+
# Note that originally this is could be interpreted as
|
| 1013 |
+
# integer in the full integer special case.
|
| 1014 |
+
raise IndexError
|
| 1015 |
+
else:
|
| 1016 |
+
# If the index is a singleton, the bounds check is done
|
| 1017 |
+
# before the broadcasting. This used to be different in <1.9
|
| 1018 |
+
if indx.ndim == 0:
|
| 1019 |
+
if indx >= arr.shape[ax] or indx < -arr.shape[ax]:
|
| 1020 |
+
raise IndexError
|
| 1021 |
+
if indx.ndim == 0:
|
| 1022 |
+
# The index is a scalar. This used to be two fold, but if
|
| 1023 |
+
# fancy indexing was active, the check was done later,
|
| 1024 |
+
# possibly after broadcasting it away (1.7. or earlier).
|
| 1025 |
+
# Now it is always done.
|
| 1026 |
+
if indx >= arr.shape[ax] or indx < - arr.shape[ax]:
|
| 1027 |
+
raise IndexError
|
| 1028 |
+
if (len(indices) > 0 and
|
| 1029 |
+
indices[-1][0] == 'f' and
|
| 1030 |
+
ax != ellipsis_pos):
|
| 1031 |
+
# NOTE: There could still have been a 0-sized Ellipsis
|
| 1032 |
+
# between them. Checked that with ellipsis_pos.
|
| 1033 |
+
indices[-1].append(indx)
|
| 1034 |
+
else:
|
| 1035 |
+
# We have a fancy index that is not after an existing one.
|
| 1036 |
+
# NOTE: A 0-d array triggers this as well, while one may
|
| 1037 |
+
# expect it to not trigger it, since a scalar would not be
|
| 1038 |
+
# considered fancy indexing.
|
| 1039 |
+
num_fancy += 1
|
| 1040 |
+
indices.append(['f', indx])
|
| 1041 |
+
|
| 1042 |
+
if num_fancy > 1 and not no_copy:
|
| 1043 |
+
# We have to flush the fancy indexes left
|
| 1044 |
+
new_indices = indices[:]
|
| 1045 |
+
axes = list(range(arr.ndim))
|
| 1046 |
+
fancy_axes = []
|
| 1047 |
+
new_indices.insert(0, ['f'])
|
| 1048 |
+
ni = 0
|
| 1049 |
+
ai = 0
|
| 1050 |
+
for indx in indices:
|
| 1051 |
+
ni += 1
|
| 1052 |
+
if indx[0] == 'f':
|
| 1053 |
+
new_indices[0].extend(indx[1:])
|
| 1054 |
+
del new_indices[ni]
|
| 1055 |
+
ni -= 1
|
| 1056 |
+
for ax in range(ai, ai + len(indx[1:])):
|
| 1057 |
+
fancy_axes.append(ax)
|
| 1058 |
+
axes.remove(ax)
|
| 1059 |
+
ai += len(indx) - 1 # axis we are at
|
| 1060 |
+
indices = new_indices
|
| 1061 |
+
# and now we need to transpose arr:
|
| 1062 |
+
arr = arr.transpose(*(fancy_axes + axes))
|
| 1063 |
+
|
| 1064 |
+
# We only have one 'f' index now and arr is transposed accordingly.
|
| 1065 |
+
# Now handle newaxis by reshaping...
|
| 1066 |
+
ax = 0
|
| 1067 |
+
for indx in indices:
|
| 1068 |
+
if indx[0] == 'f':
|
| 1069 |
+
if len(indx) == 1:
|
| 1070 |
+
continue
|
| 1071 |
+
# First of all, reshape arr to combine fancy axes into one:
|
| 1072 |
+
orig_shape = arr.shape
|
| 1073 |
+
orig_slice = orig_shape[ax:ax + len(indx[1:])]
|
| 1074 |
+
arr = arr.reshape(arr.shape[:ax]
|
| 1075 |
+
+ (np.prod(orig_slice).astype(int),)
|
| 1076 |
+
+ arr.shape[ax + len(indx[1:]):])
|
| 1077 |
+
|
| 1078 |
+
# Check if broadcasting works
|
| 1079 |
+
res = np.broadcast(*indx[1:])
|
| 1080 |
+
# unfortunately the indices might be out of bounds. So check
|
| 1081 |
+
# that first, and use mode='wrap' then. However only if
|
| 1082 |
+
# there are any indices...
|
| 1083 |
+
if res.size != 0:
|
| 1084 |
+
if error_unless_broadcast_to_empty:
|
| 1085 |
+
raise IndexError
|
| 1086 |
+
for _indx, _size in zip(indx[1:], orig_slice):
|
| 1087 |
+
if _indx.size == 0:
|
| 1088 |
+
continue
|
| 1089 |
+
if np.any(_indx >= _size) or np.any(_indx < -_size):
|
| 1090 |
+
raise IndexError
|
| 1091 |
+
if len(indx[1:]) == len(orig_slice):
|
| 1092 |
+
if np.prod(orig_slice) == 0:
|
| 1093 |
+
# Work around for a crash or IndexError with 'wrap'
|
| 1094 |
+
# in some 0-sized cases.
|
| 1095 |
+
try:
|
| 1096 |
+
mi = np.ravel_multi_index(indx[1:], orig_slice,
|
| 1097 |
+
mode='raise')
|
| 1098 |
+
except Exception:
|
| 1099 |
+
# This happens with 0-sized orig_slice (sometimes?)
|
| 1100 |
+
# here it is a ValueError, but indexing gives a:
|
| 1101 |
+
raise IndexError('invalid index into 0-sized')
|
| 1102 |
+
else:
|
| 1103 |
+
mi = np.ravel_multi_index(indx[1:], orig_slice,
|
| 1104 |
+
mode='wrap')
|
| 1105 |
+
else:
|
| 1106 |
+
# Maybe never happens...
|
| 1107 |
+
raise ValueError
|
| 1108 |
+
arr = arr.take(mi.ravel(), axis=ax)
|
| 1109 |
+
try:
|
| 1110 |
+
arr = arr.reshape(arr.shape[:ax]
|
| 1111 |
+
+ mi.shape
|
| 1112 |
+
+ arr.shape[ax+1:])
|
| 1113 |
+
except ValueError:
|
| 1114 |
+
# too many dimensions, probably
|
| 1115 |
+
raise IndexError
|
| 1116 |
+
ax += mi.ndim
|
| 1117 |
+
continue
|
| 1118 |
+
|
| 1119 |
+
# If we are here, we have a 1D array for take:
|
| 1120 |
+
arr = arr.take(indx[1], axis=ax)
|
| 1121 |
+
ax += 1
|
| 1122 |
+
|
| 1123 |
+
return arr, no_copy
|
| 1124 |
+
|
| 1125 |
+
def _check_multi_index(self, arr, index):
|
| 1126 |
+
"""Check a multi index item getting and simple setting.
|
| 1127 |
+
|
| 1128 |
+
Parameters
|
| 1129 |
+
----------
|
| 1130 |
+
arr : ndarray
|
| 1131 |
+
Array to be indexed, must be a reshaped arange.
|
| 1132 |
+
index : tuple of indexing objects
|
| 1133 |
+
Index being tested.
|
| 1134 |
+
"""
|
| 1135 |
+
# Test item getting
|
| 1136 |
+
try:
|
| 1137 |
+
mimic_get, no_copy = self._get_multi_index(arr, index)
|
| 1138 |
+
except Exception as e:
|
| 1139 |
+
if HAS_REFCOUNT:
|
| 1140 |
+
prev_refcount = sys.getrefcount(arr)
|
| 1141 |
+
assert_raises(type(e), arr.__getitem__, index)
|
| 1142 |
+
assert_raises(type(e), arr.__setitem__, index, 0)
|
| 1143 |
+
if HAS_REFCOUNT:
|
| 1144 |
+
assert_equal(prev_refcount, sys.getrefcount(arr))
|
| 1145 |
+
return
|
| 1146 |
+
|
| 1147 |
+
self._compare_index_result(arr, index, mimic_get, no_copy)
|
| 1148 |
+
|
| 1149 |
+
def _check_single_index(self, arr, index):
|
| 1150 |
+
"""Check a single index item getting and simple setting.
|
| 1151 |
+
|
| 1152 |
+
Parameters
|
| 1153 |
+
----------
|
| 1154 |
+
arr : ndarray
|
| 1155 |
+
Array to be indexed, must be an arange.
|
| 1156 |
+
index : indexing object
|
| 1157 |
+
Index being tested. Must be a single index and not a tuple
|
| 1158 |
+
of indexing objects (see also `_check_multi_index`).
|
| 1159 |
+
"""
|
| 1160 |
+
try:
|
| 1161 |
+
mimic_get, no_copy = self._get_multi_index(arr, (index,))
|
| 1162 |
+
except Exception as e:
|
| 1163 |
+
if HAS_REFCOUNT:
|
| 1164 |
+
prev_refcount = sys.getrefcount(arr)
|
| 1165 |
+
assert_raises(type(e), arr.__getitem__, index)
|
| 1166 |
+
assert_raises(type(e), arr.__setitem__, index, 0)
|
| 1167 |
+
if HAS_REFCOUNT:
|
| 1168 |
+
assert_equal(prev_refcount, sys.getrefcount(arr))
|
| 1169 |
+
return
|
| 1170 |
+
|
| 1171 |
+
self._compare_index_result(arr, index, mimic_get, no_copy)
|
| 1172 |
+
|
| 1173 |
+
def _compare_index_result(self, arr, index, mimic_get, no_copy):
|
| 1174 |
+
"""Compare mimicked result to indexing result.
|
| 1175 |
+
"""
|
| 1176 |
+
arr = arr.copy()
|
| 1177 |
+
indexed_arr = arr[index]
|
| 1178 |
+
assert_array_equal(indexed_arr, mimic_get)
|
| 1179 |
+
# Check if we got a view, unless its a 0-sized or 0-d array.
|
| 1180 |
+
# (then its not a view, and that does not matter)
|
| 1181 |
+
if indexed_arr.size != 0 and indexed_arr.ndim != 0:
|
| 1182 |
+
assert_(np.may_share_memory(indexed_arr, arr) == no_copy)
|
| 1183 |
+
# Check reference count of the original array
|
| 1184 |
+
if HAS_REFCOUNT:
|
| 1185 |
+
if no_copy:
|
| 1186 |
+
# refcount increases by one:
|
| 1187 |
+
assert_equal(sys.getrefcount(arr), 3)
|
| 1188 |
+
else:
|
| 1189 |
+
assert_equal(sys.getrefcount(arr), 2)
|
| 1190 |
+
|
| 1191 |
+
# Test non-broadcast setitem:
|
| 1192 |
+
b = arr.copy()
|
| 1193 |
+
b[index] = mimic_get + 1000
|
| 1194 |
+
if b.size == 0:
|
| 1195 |
+
return # nothing to compare here...
|
| 1196 |
+
if no_copy and indexed_arr.ndim != 0:
|
| 1197 |
+
# change indexed_arr in-place to manipulate original:
|
| 1198 |
+
indexed_arr += 1000
|
| 1199 |
+
assert_array_equal(arr, b)
|
| 1200 |
+
return
|
| 1201 |
+
# Use the fact that the array is originally an arange:
|
| 1202 |
+
arr.flat[indexed_arr.ravel()] += 1000
|
| 1203 |
+
assert_array_equal(arr, b)
|
| 1204 |
+
|
| 1205 |
+
def test_boolean(self):
|
| 1206 |
+
a = np.array(5)
|
| 1207 |
+
assert_equal(a[np.array(True)], 5)
|
| 1208 |
+
a[np.array(True)] = 1
|
| 1209 |
+
assert_equal(a, 1)
|
| 1210 |
+
# NOTE: This is different from normal broadcasting, as
|
| 1211 |
+
# arr[boolean_array] works like in a multi index. Which means
|
| 1212 |
+
# it is aligned to the left. This is probably correct for
|
| 1213 |
+
# consistency with arr[boolean_array,] also no broadcasting
|
| 1214 |
+
# is done at all
|
| 1215 |
+
self._check_multi_index(
|
| 1216 |
+
self.a, (np.zeros_like(self.a, dtype=bool),))
|
| 1217 |
+
self._check_multi_index(
|
| 1218 |
+
self.a, (np.zeros_like(self.a, dtype=bool)[..., 0],))
|
| 1219 |
+
self._check_multi_index(
|
| 1220 |
+
self.a, (np.zeros_like(self.a, dtype=bool)[None, ...],))
|
| 1221 |
+
|
| 1222 |
+
def test_multidim(self):
|
| 1223 |
+
# Automatically test combinations with complex indexes on 2nd (or 1st)
|
| 1224 |
+
# spot and the simple ones in one other spot.
|
| 1225 |
+
with warnings.catch_warnings():
|
| 1226 |
+
# This is so that np.array(True) is not accepted in a full integer
|
| 1227 |
+
# index, when running the file separately.
|
| 1228 |
+
warnings.filterwarnings('error', '', DeprecationWarning)
|
| 1229 |
+
warnings.filterwarnings('error', '', VisibleDeprecationWarning)
|
| 1230 |
+
|
| 1231 |
+
def isskip(idx):
|
| 1232 |
+
return isinstance(idx, str) and idx == "skip"
|
| 1233 |
+
|
| 1234 |
+
for simple_pos in [0, 2, 3]:
|
| 1235 |
+
tocheck = [self.fill_indices, self.complex_indices,
|
| 1236 |
+
self.fill_indices, self.fill_indices]
|
| 1237 |
+
tocheck[simple_pos] = self.simple_indices
|
| 1238 |
+
for index in product(*tocheck):
|
| 1239 |
+
index = tuple(i for i in index if not isskip(i))
|
| 1240 |
+
self._check_multi_index(self.a, index)
|
| 1241 |
+
self._check_multi_index(self.b, index)
|
| 1242 |
+
|
| 1243 |
+
# Check very simple item getting:
|
| 1244 |
+
self._check_multi_index(self.a, (0, 0, 0, 0))
|
| 1245 |
+
self._check_multi_index(self.b, (0, 0, 0, 0))
|
| 1246 |
+
# Also check (simple cases of) too many indices:
|
| 1247 |
+
assert_raises(IndexError, self.a.__getitem__, (0, 0, 0, 0, 0))
|
| 1248 |
+
assert_raises(IndexError, self.a.__setitem__, (0, 0, 0, 0, 0), 0)
|
| 1249 |
+
assert_raises(IndexError, self.a.__getitem__, (0, 0, [1], 0, 0))
|
| 1250 |
+
assert_raises(IndexError, self.a.__setitem__, (0, 0, [1], 0, 0), 0)
|
| 1251 |
+
|
| 1252 |
+
def test_1d(self):
|
| 1253 |
+
a = np.arange(10)
|
| 1254 |
+
for index in self.complex_indices:
|
| 1255 |
+
self._check_single_index(a, index)
|
| 1256 |
+
|
| 1257 |
+
class TestFloatNonIntegerArgument:
|
| 1258 |
+
"""
|
| 1259 |
+
These test that ``TypeError`` is raised when you try to use
|
| 1260 |
+
non-integers as arguments to for indexing and slicing e.g. ``a[0.0:5]``
|
| 1261 |
+
and ``a[0.5]``, or other functions like ``array.reshape(1., -1)``.
|
| 1262 |
+
|
| 1263 |
+
"""
|
| 1264 |
+
def test_valid_indexing(self):
|
| 1265 |
+
# These should raise no errors.
|
| 1266 |
+
a = np.array([[[5]]])
|
| 1267 |
+
|
| 1268 |
+
a[np.array([0])]
|
| 1269 |
+
a[[0, 0]]
|
| 1270 |
+
a[:, [0, 0]]
|
| 1271 |
+
a[:, 0,:]
|
| 1272 |
+
a[:,:,:]
|
| 1273 |
+
|
| 1274 |
+
def test_valid_slicing(self):
|
| 1275 |
+
# These should raise no errors.
|
| 1276 |
+
a = np.array([[[5]]])
|
| 1277 |
+
|
| 1278 |
+
a[::]
|
| 1279 |
+
a[0:]
|
| 1280 |
+
a[:2]
|
| 1281 |
+
a[0:2]
|
| 1282 |
+
a[::2]
|
| 1283 |
+
a[1::2]
|
| 1284 |
+
a[:2:2]
|
| 1285 |
+
a[1:2:2]
|
| 1286 |
+
|
| 1287 |
+
def test_non_integer_argument_errors(self):
|
| 1288 |
+
a = np.array([[5]])
|
| 1289 |
+
|
| 1290 |
+
assert_raises(TypeError, np.reshape, a, (1., 1., -1))
|
| 1291 |
+
assert_raises(TypeError, np.reshape, a, (np.array(1.), -1))
|
| 1292 |
+
assert_raises(TypeError, np.take, a, [0], 1.)
|
| 1293 |
+
assert_raises(TypeError, np.take, a, [0], np.float64(1.))
|
| 1294 |
+
|
| 1295 |
+
def test_non_integer_sequence_multiplication(self):
|
| 1296 |
+
# NumPy scalar sequence multiply should not work with non-integers
|
| 1297 |
+
def mult(a, b):
|
| 1298 |
+
return a * b
|
| 1299 |
+
|
| 1300 |
+
assert_raises(TypeError, mult, [1], np.float64(3))
|
| 1301 |
+
# following should be OK
|
| 1302 |
+
mult([1], np.int_(3))
|
| 1303 |
+
|
| 1304 |
+
def test_reduce_axis_float_index(self):
|
| 1305 |
+
d = np.zeros((3,3,3))
|
| 1306 |
+
assert_raises(TypeError, np.min, d, 0.5)
|
| 1307 |
+
assert_raises(TypeError, np.min, d, (0.5, 1))
|
| 1308 |
+
assert_raises(TypeError, np.min, d, (1, 2.2))
|
| 1309 |
+
assert_raises(TypeError, np.min, d, (.2, 1.2))
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
class TestBooleanIndexing:
|
| 1313 |
+
# Using a boolean as integer argument/indexing is an error.
|
| 1314 |
+
def test_bool_as_int_argument_errors(self):
|
| 1315 |
+
a = np.array([[[1]]])
|
| 1316 |
+
|
| 1317 |
+
assert_raises(TypeError, np.reshape, a, (True, -1))
|
| 1318 |
+
assert_raises(TypeError, np.reshape, a, (np.bool(True), -1))
|
| 1319 |
+
# Note that operator.index(np.array(True)) does not work, a boolean
|
| 1320 |
+
# array is thus also deprecated, but not with the same message:
|
| 1321 |
+
assert_raises(TypeError, operator.index, np.array(True))
|
| 1322 |
+
assert_warns(DeprecationWarning, operator.index, np.True_)
|
| 1323 |
+
assert_raises(TypeError, np.take, args=(a, [0], False))
|
| 1324 |
+
|
| 1325 |
+
def test_boolean_indexing_weirdness(self):
|
| 1326 |
+
# Weird boolean indexing things
|
| 1327 |
+
a = np.ones((2, 3, 4))
|
| 1328 |
+
assert a[False, True, ...].shape == (0, 2, 3, 4)
|
| 1329 |
+
assert a[True, [0, 1], True, True, [1], [[2]]].shape == (1, 2)
|
| 1330 |
+
assert_raises(IndexError, lambda: a[False, [0, 1], ...])
|
| 1331 |
+
|
| 1332 |
+
def test_boolean_indexing_fast_path(self):
|
| 1333 |
+
# These used to either give the wrong error, or incorrectly give no
|
| 1334 |
+
# error.
|
| 1335 |
+
a = np.ones((3, 3))
|
| 1336 |
+
|
| 1337 |
+
# This used to incorrectly work (and give an array of shape (0,))
|
| 1338 |
+
idx1 = np.array([[False]*9])
|
| 1339 |
+
assert_raises_regex(IndexError,
|
| 1340 |
+
"boolean index did not match indexed array along axis 0; "
|
| 1341 |
+
"size of axis is 3 but size of corresponding boolean axis is 1",
|
| 1342 |
+
lambda: a[idx1])
|
| 1343 |
+
|
| 1344 |
+
# This used to incorrectly give a ValueError: operands could not be broadcast together
|
| 1345 |
+
idx2 = np.array([[False]*8 + [True]])
|
| 1346 |
+
assert_raises_regex(IndexError,
|
| 1347 |
+
"boolean index did not match indexed array along axis 0; "
|
| 1348 |
+
"size of axis is 3 but size of corresponding boolean axis is 1",
|
| 1349 |
+
lambda: a[idx2])
|
| 1350 |
+
|
| 1351 |
+
# This is the same as it used to be. The above two should work like this.
|
| 1352 |
+
idx3 = np.array([[False]*10])
|
| 1353 |
+
assert_raises_regex(IndexError,
|
| 1354 |
+
"boolean index did not match indexed array along axis 0; "
|
| 1355 |
+
"size of axis is 3 but size of corresponding boolean axis is 1",
|
| 1356 |
+
lambda: a[idx3])
|
| 1357 |
+
|
| 1358 |
+
# This used to give ValueError: non-broadcastable operand
|
| 1359 |
+
a = np.ones((1, 1, 2))
|
| 1360 |
+
idx = np.array([[[True], [False]]])
|
| 1361 |
+
assert_raises_regex(IndexError,
|
| 1362 |
+
"boolean index did not match indexed array along axis 1; "
|
| 1363 |
+
"size of axis is 1 but size of corresponding boolean axis is 2",
|
| 1364 |
+
lambda: a[idx])
|
| 1365 |
+
|
| 1366 |
+
|
| 1367 |
+
class TestArrayToIndexDeprecation:
|
| 1368 |
+
"""Creating an index from array not 0-D is an error.
|
| 1369 |
+
|
| 1370 |
+
"""
|
| 1371 |
+
def test_array_to_index_error(self):
|
| 1372 |
+
# so no exception is expected. The raising is effectively tested above.
|
| 1373 |
+
a = np.array([[[1]]])
|
| 1374 |
+
|
| 1375 |
+
assert_raises(TypeError, operator.index, np.array([1]))
|
| 1376 |
+
assert_raises(TypeError, np.reshape, a, (a, -1))
|
| 1377 |
+
assert_raises(TypeError, np.take, a, [0], a)
|
| 1378 |
+
|
| 1379 |
+
|
| 1380 |
+
class TestNonIntegerArrayLike:
|
| 1381 |
+
"""Tests that array_likes only valid if can safely cast to integer.
|
| 1382 |
+
|
| 1383 |
+
For instance, lists give IndexError when they cannot be safely cast to
|
| 1384 |
+
an integer.
|
| 1385 |
+
|
| 1386 |
+
"""
|
| 1387 |
+
def test_basic(self):
|
| 1388 |
+
a = np.arange(10)
|
| 1389 |
+
|
| 1390 |
+
assert_raises(IndexError, a.__getitem__, [0.5, 1.5])
|
| 1391 |
+
assert_raises(IndexError, a.__getitem__, (['1', '2'],))
|
| 1392 |
+
|
| 1393 |
+
# The following is valid
|
| 1394 |
+
a.__getitem__([])
|
| 1395 |
+
|
| 1396 |
+
|
| 1397 |
+
class TestMultipleEllipsisError:
|
| 1398 |
+
"""An index can only have a single ellipsis.
|
| 1399 |
+
|
| 1400 |
+
"""
|
| 1401 |
+
def test_basic(self):
|
| 1402 |
+
a = np.arange(10)
|
| 1403 |
+
assert_raises(IndexError, lambda: a[..., ...])
|
| 1404 |
+
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 2,))
|
| 1405 |
+
assert_raises(IndexError, a.__getitem__, ((Ellipsis,) * 3,))
|
| 1406 |
+
|
| 1407 |
+
|
| 1408 |
+
class TestCApiAccess:
|
| 1409 |
+
def test_getitem(self):
|
| 1410 |
+
subscript = functools.partial(array_indexing, 0)
|
| 1411 |
+
|
| 1412 |
+
# 0-d arrays don't work:
|
| 1413 |
+
assert_raises(IndexError, subscript, np.ones(()), 0)
|
| 1414 |
+
# Out of bound values:
|
| 1415 |
+
assert_raises(IndexError, subscript, np.ones(10), 11)
|
| 1416 |
+
assert_raises(IndexError, subscript, np.ones(10), -11)
|
| 1417 |
+
assert_raises(IndexError, subscript, np.ones((10, 10)), 11)
|
| 1418 |
+
assert_raises(IndexError, subscript, np.ones((10, 10)), -11)
|
| 1419 |
+
|
| 1420 |
+
a = np.arange(10)
|
| 1421 |
+
assert_array_equal(a[4], subscript(a, 4))
|
| 1422 |
+
a = a.reshape(5, 2)
|
| 1423 |
+
assert_array_equal(a[-4], subscript(a, -4))
|
| 1424 |
+
|
| 1425 |
+
def test_setitem(self):
|
| 1426 |
+
assign = functools.partial(array_indexing, 1)
|
| 1427 |
+
|
| 1428 |
+
# Deletion is impossible:
|
| 1429 |
+
assert_raises(ValueError, assign, np.ones(10), 0)
|
| 1430 |
+
# 0-d arrays don't work:
|
| 1431 |
+
assert_raises(IndexError, assign, np.ones(()), 0, 0)
|
| 1432 |
+
# Out of bound values:
|
| 1433 |
+
assert_raises(IndexError, assign, np.ones(10), 11, 0)
|
| 1434 |
+
assert_raises(IndexError, assign, np.ones(10), -11, 0)
|
| 1435 |
+
assert_raises(IndexError, assign, np.ones((10, 10)), 11, 0)
|
| 1436 |
+
assert_raises(IndexError, assign, np.ones((10, 10)), -11, 0)
|
| 1437 |
+
|
| 1438 |
+
a = np.arange(10)
|
| 1439 |
+
assign(a, 4, 10)
|
| 1440 |
+
assert_(a[4] == 10)
|
| 1441 |
+
|
| 1442 |
+
a = a.reshape(5, 2)
|
| 1443 |
+
assign(a, 4, 10)
|
| 1444 |
+
assert_array_equal(a[-1], [10, 10])
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_mem_overlap.py
ADDED
|
@@ -0,0 +1,933 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy._core._multiarray_tests import solve_diophantine, internal_overlap
|
| 6 |
+
from numpy._core import _umath_tests
|
| 7 |
+
from numpy.lib.stride_tricks import as_strided
|
| 8 |
+
from numpy.testing import (
|
| 9 |
+
assert_, assert_raises, assert_equal, assert_array_equal
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
ndims = 2
|
| 14 |
+
size = 10
|
| 15 |
+
shape = tuple([size] * ndims)
|
| 16 |
+
|
| 17 |
+
MAY_SHARE_BOUNDS = 0
|
| 18 |
+
MAY_SHARE_EXACT = -1
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _indices_for_nelems(nelems):
|
| 22 |
+
"""Returns slices of length nelems, from start onwards, in direction sign."""
|
| 23 |
+
|
| 24 |
+
if nelems == 0:
|
| 25 |
+
return [size // 2] # int index
|
| 26 |
+
|
| 27 |
+
res = []
|
| 28 |
+
for step in (1, 2):
|
| 29 |
+
for sign in (-1, 1):
|
| 30 |
+
start = size // 2 - nelems * step * sign // 2
|
| 31 |
+
stop = start + nelems * step * sign
|
| 32 |
+
res.append(slice(start, stop, step * sign))
|
| 33 |
+
|
| 34 |
+
return res
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _indices_for_axis():
|
| 38 |
+
"""Returns (src, dst) pairs of indices."""
|
| 39 |
+
|
| 40 |
+
res = []
|
| 41 |
+
for nelems in (0, 2, 3):
|
| 42 |
+
ind = _indices_for_nelems(nelems)
|
| 43 |
+
res.extend(itertools.product(ind, ind)) # all assignments of size "nelems"
|
| 44 |
+
|
| 45 |
+
return res
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def _indices(ndims):
|
| 49 |
+
"""Returns ((axis0_src, axis0_dst), (axis1_src, axis1_dst), ... ) index pairs."""
|
| 50 |
+
|
| 51 |
+
ind = _indices_for_axis()
|
| 52 |
+
return itertools.product(ind, repeat=ndims)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _check_assignment(srcidx, dstidx):
|
| 56 |
+
"""Check assignment arr[dstidx] = arr[srcidx] works."""
|
| 57 |
+
|
| 58 |
+
arr = np.arange(np.prod(shape)).reshape(shape)
|
| 59 |
+
|
| 60 |
+
cpy = arr.copy()
|
| 61 |
+
|
| 62 |
+
cpy[dstidx] = arr[srcidx]
|
| 63 |
+
arr[dstidx] = arr[srcidx]
|
| 64 |
+
|
| 65 |
+
assert_(np.all(arr == cpy),
|
| 66 |
+
'assigning arr[%s] = arr[%s]' % (dstidx, srcidx))
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_overlapping_assignments():
|
| 70 |
+
# Test automatically generated assignments which overlap in memory.
|
| 71 |
+
|
| 72 |
+
inds = _indices(ndims)
|
| 73 |
+
|
| 74 |
+
for ind in inds:
|
| 75 |
+
srcidx = tuple([a[0] for a in ind])
|
| 76 |
+
dstidx = tuple([a[1] for a in ind])
|
| 77 |
+
|
| 78 |
+
_check_assignment(srcidx, dstidx)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@pytest.mark.slow
|
| 82 |
+
def test_diophantine_fuzz():
|
| 83 |
+
# Fuzz test the diophantine solver
|
| 84 |
+
rng = np.random.RandomState(1234)
|
| 85 |
+
|
| 86 |
+
max_int = np.iinfo(np.intp).max
|
| 87 |
+
|
| 88 |
+
for ndim in range(10):
|
| 89 |
+
feasible_count = 0
|
| 90 |
+
infeasible_count = 0
|
| 91 |
+
|
| 92 |
+
min_count = 500//(ndim + 1)
|
| 93 |
+
|
| 94 |
+
while min(feasible_count, infeasible_count) < min_count:
|
| 95 |
+
# Ensure big and small integer problems
|
| 96 |
+
A_max = 1 + rng.randint(0, 11, dtype=np.intp)**6
|
| 97 |
+
U_max = rng.randint(0, 11, dtype=np.intp)**6
|
| 98 |
+
|
| 99 |
+
A_max = min(max_int, A_max)
|
| 100 |
+
U_max = min(max_int-1, U_max)
|
| 101 |
+
|
| 102 |
+
A = tuple(int(rng.randint(1, A_max+1, dtype=np.intp))
|
| 103 |
+
for j in range(ndim))
|
| 104 |
+
U = tuple(int(rng.randint(0, U_max+2, dtype=np.intp))
|
| 105 |
+
for j in range(ndim))
|
| 106 |
+
|
| 107 |
+
b_ub = min(max_int-2, sum(a*ub for a, ub in zip(A, U)))
|
| 108 |
+
b = int(rng.randint(-1, b_ub+2, dtype=np.intp))
|
| 109 |
+
|
| 110 |
+
if ndim == 0 and feasible_count < min_count:
|
| 111 |
+
b = 0
|
| 112 |
+
|
| 113 |
+
X = solve_diophantine(A, U, b)
|
| 114 |
+
|
| 115 |
+
if X is None:
|
| 116 |
+
# Check the simplified decision problem agrees
|
| 117 |
+
X_simplified = solve_diophantine(A, U, b, simplify=1)
|
| 118 |
+
assert_(X_simplified is None, (A, U, b, X_simplified))
|
| 119 |
+
|
| 120 |
+
# Check no solution exists (provided the problem is
|
| 121 |
+
# small enough so that brute force checking doesn't
|
| 122 |
+
# take too long)
|
| 123 |
+
ranges = tuple(range(0, a*ub+1, a) for a, ub in zip(A, U))
|
| 124 |
+
|
| 125 |
+
size = 1
|
| 126 |
+
for r in ranges:
|
| 127 |
+
size *= len(r)
|
| 128 |
+
if size < 100000:
|
| 129 |
+
assert_(not any(sum(w) == b for w in itertools.product(*ranges)))
|
| 130 |
+
infeasible_count += 1
|
| 131 |
+
else:
|
| 132 |
+
# Check the simplified decision problem agrees
|
| 133 |
+
X_simplified = solve_diophantine(A, U, b, simplify=1)
|
| 134 |
+
assert_(X_simplified is not None, (A, U, b, X_simplified))
|
| 135 |
+
|
| 136 |
+
# Check validity
|
| 137 |
+
assert_(sum(a*x for a, x in zip(A, X)) == b)
|
| 138 |
+
assert_(all(0 <= x <= ub for x, ub in zip(X, U)))
|
| 139 |
+
feasible_count += 1
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
def test_diophantine_overflow():
|
| 143 |
+
# Smoke test integer overflow detection
|
| 144 |
+
max_intp = np.iinfo(np.intp).max
|
| 145 |
+
max_int64 = np.iinfo(np.int64).max
|
| 146 |
+
|
| 147 |
+
if max_int64 <= max_intp:
|
| 148 |
+
# Check that the algorithm works internally in 128-bit;
|
| 149 |
+
# solving this problem requires large intermediate numbers
|
| 150 |
+
A = (max_int64//2, max_int64//2 - 10)
|
| 151 |
+
U = (max_int64//2, max_int64//2 - 10)
|
| 152 |
+
b = 2*(max_int64//2) - 10
|
| 153 |
+
|
| 154 |
+
assert_equal(solve_diophantine(A, U, b), (1, 1))
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
def check_may_share_memory_exact(a, b):
|
| 158 |
+
got = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
|
| 159 |
+
|
| 160 |
+
assert_equal(np.may_share_memory(a, b),
|
| 161 |
+
np.may_share_memory(a, b, max_work=MAY_SHARE_BOUNDS))
|
| 162 |
+
|
| 163 |
+
a.fill(0)
|
| 164 |
+
b.fill(0)
|
| 165 |
+
a.fill(1)
|
| 166 |
+
exact = b.any()
|
| 167 |
+
|
| 168 |
+
err_msg = ""
|
| 169 |
+
if got != exact:
|
| 170 |
+
err_msg = " " + "\n ".join([
|
| 171 |
+
"base_a - base_b = %r" % (a.__array_interface__['data'][0] - b.__array_interface__['data'][0],),
|
| 172 |
+
"shape_a = %r" % (a.shape,),
|
| 173 |
+
"shape_b = %r" % (b.shape,),
|
| 174 |
+
"strides_a = %r" % (a.strides,),
|
| 175 |
+
"strides_b = %r" % (b.strides,),
|
| 176 |
+
"size_a = %r" % (a.size,),
|
| 177 |
+
"size_b = %r" % (b.size,)
|
| 178 |
+
])
|
| 179 |
+
|
| 180 |
+
assert_equal(got, exact, err_msg=err_msg)
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
def test_may_share_memory_manual():
|
| 184 |
+
# Manual test cases for may_share_memory
|
| 185 |
+
|
| 186 |
+
# Base arrays
|
| 187 |
+
xs0 = [
|
| 188 |
+
np.zeros([13, 21, 23, 22], dtype=np.int8),
|
| 189 |
+
np.zeros([13, 21, 23*2, 22], dtype=np.int8)[:,:,::2,:]
|
| 190 |
+
]
|
| 191 |
+
|
| 192 |
+
# Generate all negative stride combinations
|
| 193 |
+
xs = []
|
| 194 |
+
for x in xs0:
|
| 195 |
+
for ss in itertools.product(*(([slice(None), slice(None, None, -1)],)*4)):
|
| 196 |
+
xp = x[ss]
|
| 197 |
+
xs.append(xp)
|
| 198 |
+
|
| 199 |
+
for x in xs:
|
| 200 |
+
# The default is a simple extent check
|
| 201 |
+
assert_(np.may_share_memory(x[:,0,:], x[:,1,:]))
|
| 202 |
+
assert_(np.may_share_memory(x[:,0,:], x[:,1,:], max_work=None))
|
| 203 |
+
|
| 204 |
+
# Exact checks
|
| 205 |
+
check_may_share_memory_exact(x[:,0,:], x[:,1,:])
|
| 206 |
+
check_may_share_memory_exact(x[:,::7], x[:,3::3])
|
| 207 |
+
|
| 208 |
+
try:
|
| 209 |
+
xp = x.ravel()
|
| 210 |
+
if xp.flags.owndata:
|
| 211 |
+
continue
|
| 212 |
+
xp = xp.view(np.int16)
|
| 213 |
+
except ValueError:
|
| 214 |
+
continue
|
| 215 |
+
|
| 216 |
+
# 0-size arrays cannot overlap
|
| 217 |
+
check_may_share_memory_exact(x.ravel()[6:6],
|
| 218 |
+
xp.reshape(13, 21, 23, 11)[:,::7])
|
| 219 |
+
|
| 220 |
+
# Test itemsize is dealt with
|
| 221 |
+
check_may_share_memory_exact(x[:,::7],
|
| 222 |
+
xp.reshape(13, 21, 23, 11))
|
| 223 |
+
check_may_share_memory_exact(x[:,::7],
|
| 224 |
+
xp.reshape(13, 21, 23, 11)[:,3::3])
|
| 225 |
+
check_may_share_memory_exact(x.ravel()[6:7],
|
| 226 |
+
xp.reshape(13, 21, 23, 11)[:,::7])
|
| 227 |
+
|
| 228 |
+
# Check unit size
|
| 229 |
+
x = np.zeros([1], dtype=np.int8)
|
| 230 |
+
check_may_share_memory_exact(x, x)
|
| 231 |
+
check_may_share_memory_exact(x, x.copy())
|
| 232 |
+
|
| 233 |
+
|
| 234 |
+
def iter_random_view_pairs(x, same_steps=True, equal_size=False):
|
| 235 |
+
rng = np.random.RandomState(1234)
|
| 236 |
+
|
| 237 |
+
if equal_size and same_steps:
|
| 238 |
+
raise ValueError
|
| 239 |
+
|
| 240 |
+
def random_slice(n, step):
|
| 241 |
+
start = rng.randint(0, n+1, dtype=np.intp)
|
| 242 |
+
stop = rng.randint(start, n+1, dtype=np.intp)
|
| 243 |
+
if rng.randint(0, 2, dtype=np.intp) == 0:
|
| 244 |
+
stop, start = start, stop
|
| 245 |
+
step *= -1
|
| 246 |
+
return slice(start, stop, step)
|
| 247 |
+
|
| 248 |
+
def random_slice_fixed_size(n, step, size):
|
| 249 |
+
start = rng.randint(0, n+1 - size*step)
|
| 250 |
+
stop = start + (size-1)*step + 1
|
| 251 |
+
if rng.randint(0, 2) == 0:
|
| 252 |
+
stop, start = start-1, stop-1
|
| 253 |
+
if stop < 0:
|
| 254 |
+
stop = None
|
| 255 |
+
step *= -1
|
| 256 |
+
return slice(start, stop, step)
|
| 257 |
+
|
| 258 |
+
# First a few regular views
|
| 259 |
+
yield x, x
|
| 260 |
+
for j in range(1, 7, 3):
|
| 261 |
+
yield x[j:], x[:-j]
|
| 262 |
+
yield x[...,j:], x[...,:-j]
|
| 263 |
+
|
| 264 |
+
# An array with zero stride internal overlap
|
| 265 |
+
strides = list(x.strides)
|
| 266 |
+
strides[0] = 0
|
| 267 |
+
xp = as_strided(x, shape=x.shape, strides=strides)
|
| 268 |
+
yield x, xp
|
| 269 |
+
yield xp, xp
|
| 270 |
+
|
| 271 |
+
# An array with non-zero stride internal overlap
|
| 272 |
+
strides = list(x.strides)
|
| 273 |
+
if strides[0] > 1:
|
| 274 |
+
strides[0] = 1
|
| 275 |
+
xp = as_strided(x, shape=x.shape, strides=strides)
|
| 276 |
+
yield x, xp
|
| 277 |
+
yield xp, xp
|
| 278 |
+
|
| 279 |
+
# Then discontiguous views
|
| 280 |
+
while True:
|
| 281 |
+
steps = tuple(rng.randint(1, 11, dtype=np.intp)
|
| 282 |
+
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
|
| 283 |
+
for j in range(x.ndim))
|
| 284 |
+
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
|
| 285 |
+
|
| 286 |
+
t1 = np.arange(x.ndim)
|
| 287 |
+
rng.shuffle(t1)
|
| 288 |
+
|
| 289 |
+
if equal_size:
|
| 290 |
+
t2 = t1
|
| 291 |
+
else:
|
| 292 |
+
t2 = np.arange(x.ndim)
|
| 293 |
+
rng.shuffle(t2)
|
| 294 |
+
|
| 295 |
+
a = x[s1]
|
| 296 |
+
|
| 297 |
+
if equal_size:
|
| 298 |
+
if a.size == 0:
|
| 299 |
+
continue
|
| 300 |
+
|
| 301 |
+
steps2 = tuple(rng.randint(1, max(2, p//(1+pa)))
|
| 302 |
+
if rng.randint(0, 5) == 0 else 1
|
| 303 |
+
for p, s, pa in zip(x.shape, s1, a.shape))
|
| 304 |
+
s2 = tuple(random_slice_fixed_size(p, s, pa)
|
| 305 |
+
for p, s, pa in zip(x.shape, steps2, a.shape))
|
| 306 |
+
elif same_steps:
|
| 307 |
+
steps2 = steps
|
| 308 |
+
else:
|
| 309 |
+
steps2 = tuple(rng.randint(1, 11, dtype=np.intp)
|
| 310 |
+
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
|
| 311 |
+
for j in range(x.ndim))
|
| 312 |
+
|
| 313 |
+
if not equal_size:
|
| 314 |
+
s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2))
|
| 315 |
+
|
| 316 |
+
a = a.transpose(t1)
|
| 317 |
+
b = x[s2].transpose(t2)
|
| 318 |
+
|
| 319 |
+
yield a, b
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
def check_may_share_memory_easy_fuzz(get_max_work, same_steps, min_count):
|
| 323 |
+
# Check that overlap problems with common strides are solved with
|
| 324 |
+
# little work.
|
| 325 |
+
x = np.zeros([17,34,71,97], dtype=np.int16)
|
| 326 |
+
|
| 327 |
+
feasible = 0
|
| 328 |
+
infeasible = 0
|
| 329 |
+
|
| 330 |
+
pair_iter = iter_random_view_pairs(x, same_steps)
|
| 331 |
+
|
| 332 |
+
while min(feasible, infeasible) < min_count:
|
| 333 |
+
a, b = next(pair_iter)
|
| 334 |
+
|
| 335 |
+
bounds_overlap = np.may_share_memory(a, b)
|
| 336 |
+
may_share_answer = np.may_share_memory(a, b)
|
| 337 |
+
easy_answer = np.may_share_memory(a, b, max_work=get_max_work(a, b))
|
| 338 |
+
exact_answer = np.may_share_memory(a, b, max_work=MAY_SHARE_EXACT)
|
| 339 |
+
|
| 340 |
+
if easy_answer != exact_answer:
|
| 341 |
+
# assert_equal is slow...
|
| 342 |
+
assert_equal(easy_answer, exact_answer)
|
| 343 |
+
|
| 344 |
+
if may_share_answer != bounds_overlap:
|
| 345 |
+
assert_equal(may_share_answer, bounds_overlap)
|
| 346 |
+
|
| 347 |
+
if bounds_overlap:
|
| 348 |
+
if exact_answer:
|
| 349 |
+
feasible += 1
|
| 350 |
+
else:
|
| 351 |
+
infeasible += 1
|
| 352 |
+
|
| 353 |
+
|
| 354 |
+
@pytest.mark.slow
|
| 355 |
+
def test_may_share_memory_easy_fuzz():
|
| 356 |
+
# Check that overlap problems with common strides are always
|
| 357 |
+
# solved with little work.
|
| 358 |
+
|
| 359 |
+
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: 1,
|
| 360 |
+
same_steps=True,
|
| 361 |
+
min_count=2000)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
@pytest.mark.slow
|
| 365 |
+
def test_may_share_memory_harder_fuzz():
|
| 366 |
+
# Overlap problems with not necessarily common strides take more
|
| 367 |
+
# work.
|
| 368 |
+
#
|
| 369 |
+
# The work bound below can't be reduced much. Harder problems can
|
| 370 |
+
# also exist but not be detected here, as the set of problems
|
| 371 |
+
# comes from RNG.
|
| 372 |
+
|
| 373 |
+
check_may_share_memory_easy_fuzz(get_max_work=lambda a, b: max(a.size, b.size)//2,
|
| 374 |
+
same_steps=False,
|
| 375 |
+
min_count=2000)
|
| 376 |
+
|
| 377 |
+
|
| 378 |
+
def test_shares_memory_api():
|
| 379 |
+
x = np.zeros([4, 5, 6], dtype=np.int8)
|
| 380 |
+
|
| 381 |
+
assert_equal(np.shares_memory(x, x), True)
|
| 382 |
+
assert_equal(np.shares_memory(x, x.copy()), False)
|
| 383 |
+
|
| 384 |
+
a = x[:,::2,::3]
|
| 385 |
+
b = x[:,::3,::2]
|
| 386 |
+
assert_equal(np.shares_memory(a, b), True)
|
| 387 |
+
assert_equal(np.shares_memory(a, b, max_work=None), True)
|
| 388 |
+
assert_raises(
|
| 389 |
+
np.exceptions.TooHardError, np.shares_memory, a, b, max_work=1
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
|
| 393 |
+
def test_may_share_memory_bad_max_work():
|
| 394 |
+
x = np.zeros([1])
|
| 395 |
+
assert_raises(OverflowError, np.may_share_memory, x, x, max_work=10**100)
|
| 396 |
+
assert_raises(OverflowError, np.shares_memory, x, x, max_work=10**100)
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
def test_internal_overlap_diophantine():
|
| 400 |
+
def check(A, U, exists=None):
|
| 401 |
+
X = solve_diophantine(A, U, 0, require_ub_nontrivial=1)
|
| 402 |
+
|
| 403 |
+
if exists is None:
|
| 404 |
+
exists = (X is not None)
|
| 405 |
+
|
| 406 |
+
if X is not None:
|
| 407 |
+
assert_(sum(a*x for a, x in zip(A, X)) == sum(a*u//2 for a, u in zip(A, U)))
|
| 408 |
+
assert_(all(0 <= x <= u for x, u in zip(X, U)))
|
| 409 |
+
assert_(any(x != u//2 for x, u in zip(X, U)))
|
| 410 |
+
|
| 411 |
+
if exists:
|
| 412 |
+
assert_(X is not None, repr(X))
|
| 413 |
+
else:
|
| 414 |
+
assert_(X is None, repr(X))
|
| 415 |
+
|
| 416 |
+
# Smoke tests
|
| 417 |
+
check((3, 2), (2*2, 3*2), exists=True)
|
| 418 |
+
check((3*2, 2), (15*2, (3-1)*2), exists=False)
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
def test_internal_overlap_slices():
|
| 422 |
+
# Slicing an array never generates internal overlap
|
| 423 |
+
|
| 424 |
+
x = np.zeros([17,34,71,97], dtype=np.int16)
|
| 425 |
+
|
| 426 |
+
rng = np.random.RandomState(1234)
|
| 427 |
+
|
| 428 |
+
def random_slice(n, step):
|
| 429 |
+
start = rng.randint(0, n+1, dtype=np.intp)
|
| 430 |
+
stop = rng.randint(start, n+1, dtype=np.intp)
|
| 431 |
+
if rng.randint(0, 2, dtype=np.intp) == 0:
|
| 432 |
+
stop, start = start, stop
|
| 433 |
+
step *= -1
|
| 434 |
+
return slice(start, stop, step)
|
| 435 |
+
|
| 436 |
+
cases = 0
|
| 437 |
+
min_count = 5000
|
| 438 |
+
|
| 439 |
+
while cases < min_count:
|
| 440 |
+
steps = tuple(rng.randint(1, 11, dtype=np.intp)
|
| 441 |
+
if rng.randint(0, 5, dtype=np.intp) == 0 else 1
|
| 442 |
+
for j in range(x.ndim))
|
| 443 |
+
t1 = np.arange(x.ndim)
|
| 444 |
+
rng.shuffle(t1)
|
| 445 |
+
s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps))
|
| 446 |
+
a = x[s1].transpose(t1)
|
| 447 |
+
|
| 448 |
+
assert_(not internal_overlap(a))
|
| 449 |
+
cases += 1
|
| 450 |
+
|
| 451 |
+
|
| 452 |
+
def check_internal_overlap(a, manual_expected=None):
|
| 453 |
+
got = internal_overlap(a)
|
| 454 |
+
|
| 455 |
+
# Brute-force check
|
| 456 |
+
m = set()
|
| 457 |
+
ranges = tuple(range(n) for n in a.shape)
|
| 458 |
+
for v in itertools.product(*ranges):
|
| 459 |
+
offset = sum(s*w for s, w in zip(a.strides, v))
|
| 460 |
+
if offset in m:
|
| 461 |
+
expected = True
|
| 462 |
+
break
|
| 463 |
+
else:
|
| 464 |
+
m.add(offset)
|
| 465 |
+
else:
|
| 466 |
+
expected = False
|
| 467 |
+
|
| 468 |
+
# Compare
|
| 469 |
+
if got != expected:
|
| 470 |
+
assert_equal(got, expected, err_msg=repr((a.strides, a.shape)))
|
| 471 |
+
if manual_expected is not None and expected != manual_expected:
|
| 472 |
+
assert_equal(expected, manual_expected)
|
| 473 |
+
return got
|
| 474 |
+
|
| 475 |
+
|
| 476 |
+
def test_internal_overlap_manual():
|
| 477 |
+
# Stride tricks can construct arrays with internal overlap
|
| 478 |
+
|
| 479 |
+
# We don't care about memory bounds, the array is not
|
| 480 |
+
# read/write accessed
|
| 481 |
+
x = np.arange(1).astype(np.int8)
|
| 482 |
+
|
| 483 |
+
# Check low-dimensional special cases
|
| 484 |
+
|
| 485 |
+
check_internal_overlap(x, False) # 1-dim
|
| 486 |
+
check_internal_overlap(x.reshape([]), False) # 0-dim
|
| 487 |
+
|
| 488 |
+
a = as_strided(x, strides=(3, 4), shape=(4, 4))
|
| 489 |
+
check_internal_overlap(a, False)
|
| 490 |
+
|
| 491 |
+
a = as_strided(x, strides=(3, 4), shape=(5, 4))
|
| 492 |
+
check_internal_overlap(a, True)
|
| 493 |
+
|
| 494 |
+
a = as_strided(x, strides=(0,), shape=(0,))
|
| 495 |
+
check_internal_overlap(a, False)
|
| 496 |
+
|
| 497 |
+
a = as_strided(x, strides=(0,), shape=(1,))
|
| 498 |
+
check_internal_overlap(a, False)
|
| 499 |
+
|
| 500 |
+
a = as_strided(x, strides=(0,), shape=(2,))
|
| 501 |
+
check_internal_overlap(a, True)
|
| 502 |
+
|
| 503 |
+
a = as_strided(x, strides=(0, -9993), shape=(87, 22))
|
| 504 |
+
check_internal_overlap(a, True)
|
| 505 |
+
|
| 506 |
+
a = as_strided(x, strides=(0, -9993), shape=(1, 22))
|
| 507 |
+
check_internal_overlap(a, False)
|
| 508 |
+
|
| 509 |
+
a = as_strided(x, strides=(0, -9993), shape=(0, 22))
|
| 510 |
+
check_internal_overlap(a, False)
|
| 511 |
+
|
| 512 |
+
|
| 513 |
+
def test_internal_overlap_fuzz():
|
| 514 |
+
# Fuzz check; the brute-force check is fairly slow
|
| 515 |
+
|
| 516 |
+
x = np.arange(1).astype(np.int8)
|
| 517 |
+
|
| 518 |
+
overlap = 0
|
| 519 |
+
no_overlap = 0
|
| 520 |
+
min_count = 100
|
| 521 |
+
|
| 522 |
+
rng = np.random.RandomState(1234)
|
| 523 |
+
|
| 524 |
+
while min(overlap, no_overlap) < min_count:
|
| 525 |
+
ndim = rng.randint(1, 4, dtype=np.intp)
|
| 526 |
+
|
| 527 |
+
strides = tuple(rng.randint(-1000, 1000, dtype=np.intp)
|
| 528 |
+
for j in range(ndim))
|
| 529 |
+
shape = tuple(rng.randint(1, 30, dtype=np.intp)
|
| 530 |
+
for j in range(ndim))
|
| 531 |
+
|
| 532 |
+
a = as_strided(x, strides=strides, shape=shape)
|
| 533 |
+
result = check_internal_overlap(a)
|
| 534 |
+
|
| 535 |
+
if result:
|
| 536 |
+
overlap += 1
|
| 537 |
+
else:
|
| 538 |
+
no_overlap += 1
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
def test_non_ndarray_inputs():
|
| 542 |
+
# Regression check for gh-5604
|
| 543 |
+
|
| 544 |
+
class MyArray:
|
| 545 |
+
def __init__(self, data):
|
| 546 |
+
self.data = data
|
| 547 |
+
|
| 548 |
+
@property
|
| 549 |
+
def __array_interface__(self):
|
| 550 |
+
return self.data.__array_interface__
|
| 551 |
+
|
| 552 |
+
class MyArray2:
|
| 553 |
+
def __init__(self, data):
|
| 554 |
+
self.data = data
|
| 555 |
+
|
| 556 |
+
def __array__(self, dtype=None, copy=None):
|
| 557 |
+
return self.data
|
| 558 |
+
|
| 559 |
+
for cls in [MyArray, MyArray2]:
|
| 560 |
+
x = np.arange(5)
|
| 561 |
+
|
| 562 |
+
assert_(np.may_share_memory(cls(x[::2]), x[1::2]))
|
| 563 |
+
assert_(not np.shares_memory(cls(x[::2]), x[1::2]))
|
| 564 |
+
|
| 565 |
+
assert_(np.shares_memory(cls(x[1::3]), x[::2]))
|
| 566 |
+
assert_(np.may_share_memory(cls(x[1::3]), x[::2]))
|
| 567 |
+
|
| 568 |
+
|
| 569 |
+
def view_element_first_byte(x):
|
| 570 |
+
"""Construct an array viewing the first byte of each element of `x`"""
|
| 571 |
+
from numpy.lib._stride_tricks_impl import DummyArray
|
| 572 |
+
interface = dict(x.__array_interface__)
|
| 573 |
+
interface['typestr'] = '|b1'
|
| 574 |
+
interface['descr'] = [('', '|b1')]
|
| 575 |
+
return np.asarray(DummyArray(interface, x))
|
| 576 |
+
|
| 577 |
+
|
| 578 |
+
def assert_copy_equivalent(operation, args, out, **kwargs):
|
| 579 |
+
"""
|
| 580 |
+
Check that operation(*args, out=out) produces results
|
| 581 |
+
equivalent to out[...] = operation(*args, out=out.copy())
|
| 582 |
+
"""
|
| 583 |
+
|
| 584 |
+
kwargs['out'] = out
|
| 585 |
+
kwargs2 = dict(kwargs)
|
| 586 |
+
kwargs2['out'] = out.copy()
|
| 587 |
+
|
| 588 |
+
out_orig = out.copy()
|
| 589 |
+
out[...] = operation(*args, **kwargs2)
|
| 590 |
+
expected = out.copy()
|
| 591 |
+
out[...] = out_orig
|
| 592 |
+
|
| 593 |
+
got = operation(*args, **kwargs).copy()
|
| 594 |
+
|
| 595 |
+
if (got != expected).any():
|
| 596 |
+
assert_equal(got, expected)
|
| 597 |
+
|
| 598 |
+
|
| 599 |
+
class TestUFunc:
|
| 600 |
+
"""
|
| 601 |
+
Test ufunc call memory overlap handling
|
| 602 |
+
"""
|
| 603 |
+
|
| 604 |
+
def check_unary_fuzz(self, operation, get_out_axis_size, dtype=np.int16,
|
| 605 |
+
count=5000):
|
| 606 |
+
shapes = [7, 13, 8, 21, 29, 32]
|
| 607 |
+
|
| 608 |
+
rng = np.random.RandomState(1234)
|
| 609 |
+
|
| 610 |
+
for ndim in range(1, 6):
|
| 611 |
+
x = rng.randint(0, 2**16, size=shapes[:ndim]).astype(dtype)
|
| 612 |
+
|
| 613 |
+
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
|
| 614 |
+
|
| 615 |
+
min_count = count // (ndim + 1)**2
|
| 616 |
+
|
| 617 |
+
overlapping = 0
|
| 618 |
+
while overlapping < min_count:
|
| 619 |
+
a, b = next(it)
|
| 620 |
+
|
| 621 |
+
a_orig = a.copy()
|
| 622 |
+
b_orig = b.copy()
|
| 623 |
+
|
| 624 |
+
if get_out_axis_size is None:
|
| 625 |
+
assert_copy_equivalent(operation, [a], out=b)
|
| 626 |
+
|
| 627 |
+
if np.shares_memory(a, b):
|
| 628 |
+
overlapping += 1
|
| 629 |
+
else:
|
| 630 |
+
for axis in itertools.chain(range(ndim), [None]):
|
| 631 |
+
a[...] = a_orig
|
| 632 |
+
b[...] = b_orig
|
| 633 |
+
|
| 634 |
+
# Determine size for reduction axis (None if scalar)
|
| 635 |
+
outsize, scalarize = get_out_axis_size(a, b, axis)
|
| 636 |
+
if outsize == 'skip':
|
| 637 |
+
continue
|
| 638 |
+
|
| 639 |
+
# Slice b to get an output array of the correct size
|
| 640 |
+
sl = [slice(None)] * ndim
|
| 641 |
+
if axis is None:
|
| 642 |
+
if outsize is None:
|
| 643 |
+
sl = [slice(0, 1)] + [0]*(ndim - 1)
|
| 644 |
+
else:
|
| 645 |
+
sl = [slice(0, outsize)] + [0]*(ndim - 1)
|
| 646 |
+
else:
|
| 647 |
+
if outsize is None:
|
| 648 |
+
k = b.shape[axis]//2
|
| 649 |
+
if ndim == 1:
|
| 650 |
+
sl[axis] = slice(k, k + 1)
|
| 651 |
+
else:
|
| 652 |
+
sl[axis] = k
|
| 653 |
+
else:
|
| 654 |
+
assert b.shape[axis] >= outsize
|
| 655 |
+
sl[axis] = slice(0, outsize)
|
| 656 |
+
b_out = b[tuple(sl)]
|
| 657 |
+
|
| 658 |
+
if scalarize:
|
| 659 |
+
b_out = b_out.reshape([])
|
| 660 |
+
|
| 661 |
+
if np.shares_memory(a, b_out):
|
| 662 |
+
overlapping += 1
|
| 663 |
+
|
| 664 |
+
# Check result
|
| 665 |
+
assert_copy_equivalent(operation, [a], out=b_out, axis=axis)
|
| 666 |
+
|
| 667 |
+
@pytest.mark.slow
|
| 668 |
+
def test_unary_ufunc_call_fuzz(self):
|
| 669 |
+
self.check_unary_fuzz(np.invert, None, np.int16)
|
| 670 |
+
|
| 671 |
+
@pytest.mark.slow
|
| 672 |
+
def test_unary_ufunc_call_complex_fuzz(self):
|
| 673 |
+
# Complex typically has a smaller alignment than itemsize
|
| 674 |
+
self.check_unary_fuzz(np.negative, None, np.complex128, count=500)
|
| 675 |
+
|
| 676 |
+
def test_binary_ufunc_accumulate_fuzz(self):
|
| 677 |
+
def get_out_axis_size(a, b, axis):
|
| 678 |
+
if axis is None:
|
| 679 |
+
if a.ndim == 1:
|
| 680 |
+
return a.size, False
|
| 681 |
+
else:
|
| 682 |
+
return 'skip', False # accumulate doesn't support this
|
| 683 |
+
else:
|
| 684 |
+
return a.shape[axis], False
|
| 685 |
+
|
| 686 |
+
self.check_unary_fuzz(np.add.accumulate, get_out_axis_size,
|
| 687 |
+
dtype=np.int16, count=500)
|
| 688 |
+
|
| 689 |
+
def test_binary_ufunc_reduce_fuzz(self):
|
| 690 |
+
def get_out_axis_size(a, b, axis):
|
| 691 |
+
return None, (axis is None or a.ndim == 1)
|
| 692 |
+
|
| 693 |
+
self.check_unary_fuzz(np.add.reduce, get_out_axis_size,
|
| 694 |
+
dtype=np.int16, count=500)
|
| 695 |
+
|
| 696 |
+
def test_binary_ufunc_reduceat_fuzz(self):
|
| 697 |
+
def get_out_axis_size(a, b, axis):
|
| 698 |
+
if axis is None:
|
| 699 |
+
if a.ndim == 1:
|
| 700 |
+
return a.size, False
|
| 701 |
+
else:
|
| 702 |
+
return 'skip', False # reduceat doesn't support this
|
| 703 |
+
else:
|
| 704 |
+
return a.shape[axis], False
|
| 705 |
+
|
| 706 |
+
def do_reduceat(a, out, axis):
|
| 707 |
+
if axis is None:
|
| 708 |
+
size = len(a)
|
| 709 |
+
step = size//len(out)
|
| 710 |
+
else:
|
| 711 |
+
size = a.shape[axis]
|
| 712 |
+
step = a.shape[axis] // out.shape[axis]
|
| 713 |
+
idx = np.arange(0, size, step)
|
| 714 |
+
return np.add.reduceat(a, idx, out=out, axis=axis)
|
| 715 |
+
|
| 716 |
+
self.check_unary_fuzz(do_reduceat, get_out_axis_size,
|
| 717 |
+
dtype=np.int16, count=500)
|
| 718 |
+
|
| 719 |
+
def test_binary_ufunc_reduceat_manual(self):
|
| 720 |
+
def check(ufunc, a, ind, out):
|
| 721 |
+
c1 = ufunc.reduceat(a.copy(), ind.copy(), out=out.copy())
|
| 722 |
+
c2 = ufunc.reduceat(a, ind, out=out)
|
| 723 |
+
assert_array_equal(c1, c2)
|
| 724 |
+
|
| 725 |
+
# Exactly same input/output arrays
|
| 726 |
+
a = np.arange(10000, dtype=np.int16)
|
| 727 |
+
check(np.add, a, a[::-1].copy(), a)
|
| 728 |
+
|
| 729 |
+
# Overlap with index
|
| 730 |
+
a = np.arange(10000, dtype=np.int16)
|
| 731 |
+
check(np.add, a, a[::-1], a)
|
| 732 |
+
|
| 733 |
+
@pytest.mark.slow
|
| 734 |
+
def test_unary_gufunc_fuzz(self):
|
| 735 |
+
shapes = [7, 13, 8, 21, 29, 32]
|
| 736 |
+
gufunc = _umath_tests.euclidean_pdist
|
| 737 |
+
|
| 738 |
+
rng = np.random.RandomState(1234)
|
| 739 |
+
|
| 740 |
+
for ndim in range(2, 6):
|
| 741 |
+
x = rng.rand(*shapes[:ndim])
|
| 742 |
+
|
| 743 |
+
it = iter_random_view_pairs(x, same_steps=False, equal_size=True)
|
| 744 |
+
|
| 745 |
+
min_count = 500 // (ndim + 1)**2
|
| 746 |
+
|
| 747 |
+
overlapping = 0
|
| 748 |
+
while overlapping < min_count:
|
| 749 |
+
a, b = next(it)
|
| 750 |
+
|
| 751 |
+
if min(a.shape[-2:]) < 2 or min(b.shape[-2:]) < 2 or a.shape[-1] < 2:
|
| 752 |
+
continue
|
| 753 |
+
|
| 754 |
+
# Ensure the shapes are so that euclidean_pdist is happy
|
| 755 |
+
if b.shape[-1] > b.shape[-2]:
|
| 756 |
+
b = b[...,0,:]
|
| 757 |
+
else:
|
| 758 |
+
b = b[...,:,0]
|
| 759 |
+
|
| 760 |
+
n = a.shape[-2]
|
| 761 |
+
p = n * (n - 1) // 2
|
| 762 |
+
if p <= b.shape[-1] and p > 0:
|
| 763 |
+
b = b[...,:p]
|
| 764 |
+
else:
|
| 765 |
+
n = max(2, int(np.sqrt(b.shape[-1]))//2)
|
| 766 |
+
p = n * (n - 1) // 2
|
| 767 |
+
a = a[...,:n,:]
|
| 768 |
+
b = b[...,:p]
|
| 769 |
+
|
| 770 |
+
# Call
|
| 771 |
+
if np.shares_memory(a, b):
|
| 772 |
+
overlapping += 1
|
| 773 |
+
|
| 774 |
+
with np.errstate(over='ignore', invalid='ignore'):
|
| 775 |
+
assert_copy_equivalent(gufunc, [a], out=b)
|
| 776 |
+
|
| 777 |
+
def test_ufunc_at_manual(self):
|
| 778 |
+
def check(ufunc, a, ind, b=None):
|
| 779 |
+
a0 = a.copy()
|
| 780 |
+
if b is None:
|
| 781 |
+
ufunc.at(a0, ind.copy())
|
| 782 |
+
c1 = a0.copy()
|
| 783 |
+
ufunc.at(a, ind)
|
| 784 |
+
c2 = a.copy()
|
| 785 |
+
else:
|
| 786 |
+
ufunc.at(a0, ind.copy(), b.copy())
|
| 787 |
+
c1 = a0.copy()
|
| 788 |
+
ufunc.at(a, ind, b)
|
| 789 |
+
c2 = a.copy()
|
| 790 |
+
assert_array_equal(c1, c2)
|
| 791 |
+
|
| 792 |
+
# Overlap with index
|
| 793 |
+
a = np.arange(10000, dtype=np.int16)
|
| 794 |
+
check(np.invert, a[::-1], a)
|
| 795 |
+
|
| 796 |
+
# Overlap with second data array
|
| 797 |
+
a = np.arange(100, dtype=np.int16)
|
| 798 |
+
ind = np.arange(0, 100, 2, dtype=np.int16)
|
| 799 |
+
check(np.add, a, ind, a[25:75])
|
| 800 |
+
|
| 801 |
+
def test_unary_ufunc_1d_manual(self):
|
| 802 |
+
# Exercise ufunc fast-paths (that avoid creation of an `np.nditer`)
|
| 803 |
+
|
| 804 |
+
def check(a, b):
|
| 805 |
+
a_orig = a.copy()
|
| 806 |
+
b_orig = b.copy()
|
| 807 |
+
|
| 808 |
+
b0 = b.copy()
|
| 809 |
+
c1 = ufunc(a, out=b0)
|
| 810 |
+
c2 = ufunc(a, out=b)
|
| 811 |
+
assert_array_equal(c1, c2)
|
| 812 |
+
|
| 813 |
+
# Trigger "fancy ufunc loop" code path
|
| 814 |
+
mask = view_element_first_byte(b).view(np.bool)
|
| 815 |
+
|
| 816 |
+
a[...] = a_orig
|
| 817 |
+
b[...] = b_orig
|
| 818 |
+
c1 = ufunc(a, out=b.copy(), where=mask.copy()).copy()
|
| 819 |
+
|
| 820 |
+
a[...] = a_orig
|
| 821 |
+
b[...] = b_orig
|
| 822 |
+
c2 = ufunc(a, out=b, where=mask.copy()).copy()
|
| 823 |
+
|
| 824 |
+
# Also, mask overlapping with output
|
| 825 |
+
a[...] = a_orig
|
| 826 |
+
b[...] = b_orig
|
| 827 |
+
c3 = ufunc(a, out=b, where=mask).copy()
|
| 828 |
+
|
| 829 |
+
assert_array_equal(c1, c2)
|
| 830 |
+
assert_array_equal(c1, c3)
|
| 831 |
+
|
| 832 |
+
dtypes = [np.int8, np.int16, np.int32, np.int64, np.float32,
|
| 833 |
+
np.float64, np.complex64, np.complex128]
|
| 834 |
+
dtypes = [np.dtype(x) for x in dtypes]
|
| 835 |
+
|
| 836 |
+
for dtype in dtypes:
|
| 837 |
+
if np.issubdtype(dtype, np.integer):
|
| 838 |
+
ufunc = np.invert
|
| 839 |
+
else:
|
| 840 |
+
ufunc = np.reciprocal
|
| 841 |
+
|
| 842 |
+
n = 1000
|
| 843 |
+
k = 10
|
| 844 |
+
indices = [
|
| 845 |
+
np.index_exp[:n],
|
| 846 |
+
np.index_exp[k:k+n],
|
| 847 |
+
np.index_exp[n-1::-1],
|
| 848 |
+
np.index_exp[k+n-1:k-1:-1],
|
| 849 |
+
np.index_exp[:2*n:2],
|
| 850 |
+
np.index_exp[k:k+2*n:2],
|
| 851 |
+
np.index_exp[2*n-1::-2],
|
| 852 |
+
np.index_exp[k+2*n-1:k-1:-2],
|
| 853 |
+
]
|
| 854 |
+
|
| 855 |
+
for xi, yi in itertools.product(indices, indices):
|
| 856 |
+
v = np.arange(1, 1 + n*2 + k, dtype=dtype)
|
| 857 |
+
x = v[xi]
|
| 858 |
+
y = v[yi]
|
| 859 |
+
|
| 860 |
+
with np.errstate(all='ignore'):
|
| 861 |
+
check(x, y)
|
| 862 |
+
|
| 863 |
+
# Scalar cases
|
| 864 |
+
check(x[:1], y)
|
| 865 |
+
check(x[-1:], y)
|
| 866 |
+
check(x[:1].reshape([]), y)
|
| 867 |
+
check(x[-1:].reshape([]), y)
|
| 868 |
+
|
| 869 |
+
def test_unary_ufunc_where_same(self):
|
| 870 |
+
# Check behavior at wheremask overlap
|
| 871 |
+
ufunc = np.invert
|
| 872 |
+
|
| 873 |
+
def check(a, out, mask):
|
| 874 |
+
c1 = ufunc(a, out=out.copy(), where=mask.copy())
|
| 875 |
+
c2 = ufunc(a, out=out, where=mask)
|
| 876 |
+
assert_array_equal(c1, c2)
|
| 877 |
+
|
| 878 |
+
# Check behavior with same input and output arrays
|
| 879 |
+
x = np.arange(100).astype(np.bool)
|
| 880 |
+
check(x, x, x)
|
| 881 |
+
check(x, x.copy(), x)
|
| 882 |
+
check(x, x, x.copy())
|
| 883 |
+
|
| 884 |
+
@pytest.mark.slow
|
| 885 |
+
def test_binary_ufunc_1d_manual(self):
|
| 886 |
+
ufunc = np.add
|
| 887 |
+
|
| 888 |
+
def check(a, b, c):
|
| 889 |
+
c0 = c.copy()
|
| 890 |
+
c1 = ufunc(a, b, out=c0)
|
| 891 |
+
c2 = ufunc(a, b, out=c)
|
| 892 |
+
assert_array_equal(c1, c2)
|
| 893 |
+
|
| 894 |
+
for dtype in [np.int8, np.int16, np.int32, np.int64,
|
| 895 |
+
np.float32, np.float64, np.complex64, np.complex128]:
|
| 896 |
+
# Check different data dependency orders
|
| 897 |
+
|
| 898 |
+
n = 1000
|
| 899 |
+
k = 10
|
| 900 |
+
|
| 901 |
+
indices = []
|
| 902 |
+
for p in [1, 2]:
|
| 903 |
+
indices.extend([
|
| 904 |
+
np.index_exp[:p*n:p],
|
| 905 |
+
np.index_exp[k:k+p*n:p],
|
| 906 |
+
np.index_exp[p*n-1::-p],
|
| 907 |
+
np.index_exp[k+p*n-1:k-1:-p],
|
| 908 |
+
])
|
| 909 |
+
|
| 910 |
+
for x, y, z in itertools.product(indices, indices, indices):
|
| 911 |
+
v = np.arange(6*n).astype(dtype)
|
| 912 |
+
x = v[x]
|
| 913 |
+
y = v[y]
|
| 914 |
+
z = v[z]
|
| 915 |
+
|
| 916 |
+
check(x, y, z)
|
| 917 |
+
|
| 918 |
+
# Scalar cases
|
| 919 |
+
check(x[:1], y, z)
|
| 920 |
+
check(x[-1:], y, z)
|
| 921 |
+
check(x[:1].reshape([]), y, z)
|
| 922 |
+
check(x[-1:].reshape([]), y, z)
|
| 923 |
+
check(x, y[:1], z)
|
| 924 |
+
check(x, y[-1:], z)
|
| 925 |
+
check(x, y[:1].reshape([]), z)
|
| 926 |
+
check(x, y[-1:].reshape([]), z)
|
| 927 |
+
|
| 928 |
+
def test_inplace_op_simple_manual(self):
|
| 929 |
+
rng = np.random.RandomState(1234)
|
| 930 |
+
x = rng.rand(200, 200) # bigger than bufsize
|
| 931 |
+
|
| 932 |
+
x += x.T
|
| 933 |
+
assert_array_equal(x - x.T, 0)
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_mem_policy.py
ADDED
|
@@ -0,0 +1,449 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import gc
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import threading
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from numpy.testing import extbuild, assert_warns, IS_WASM, IS_EDITABLE
|
| 11 |
+
from numpy._core.multiarray import get_handler_name
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@pytest.fixture
|
| 15 |
+
def get_module(tmp_path):
|
| 16 |
+
""" Add a memory policy that returns a false pointer 64 bytes into the
|
| 17 |
+
actual allocation, and fill the prefix with some text. Then check at each
|
| 18 |
+
memory manipulation that the prefix exists, to make sure all alloc/realloc/
|
| 19 |
+
free/calloc go via the functions here.
|
| 20 |
+
"""
|
| 21 |
+
if sys.platform.startswith('cygwin'):
|
| 22 |
+
pytest.skip('link fails on cygwin')
|
| 23 |
+
if IS_WASM:
|
| 24 |
+
pytest.skip("Can't build module inside Wasm")
|
| 25 |
+
if IS_EDITABLE:
|
| 26 |
+
pytest.skip("Can't build module for editable install")
|
| 27 |
+
|
| 28 |
+
functions = [
|
| 29 |
+
("get_default_policy", "METH_NOARGS", """
|
| 30 |
+
Py_INCREF(PyDataMem_DefaultHandler);
|
| 31 |
+
return PyDataMem_DefaultHandler;
|
| 32 |
+
"""),
|
| 33 |
+
("set_secret_data_policy", "METH_NOARGS", """
|
| 34 |
+
PyObject *secret_data =
|
| 35 |
+
PyCapsule_New(&secret_data_handler, "mem_handler", NULL);
|
| 36 |
+
if (secret_data == NULL) {
|
| 37 |
+
return NULL;
|
| 38 |
+
}
|
| 39 |
+
PyObject *old = PyDataMem_SetHandler(secret_data);
|
| 40 |
+
Py_DECREF(secret_data);
|
| 41 |
+
return old;
|
| 42 |
+
"""),
|
| 43 |
+
("set_wrong_capsule_name_data_policy", "METH_NOARGS", """
|
| 44 |
+
PyObject *wrong_name_capsule =
|
| 45 |
+
PyCapsule_New(&secret_data_handler, "not_mem_handler", NULL);
|
| 46 |
+
if (wrong_name_capsule == NULL) {
|
| 47 |
+
return NULL;
|
| 48 |
+
}
|
| 49 |
+
PyObject *old = PyDataMem_SetHandler(wrong_name_capsule);
|
| 50 |
+
Py_DECREF(wrong_name_capsule);
|
| 51 |
+
return old;
|
| 52 |
+
"""),
|
| 53 |
+
("set_old_policy", "METH_O", """
|
| 54 |
+
PyObject *old;
|
| 55 |
+
if (args != NULL && PyCapsule_CheckExact(args)) {
|
| 56 |
+
old = PyDataMem_SetHandler(args);
|
| 57 |
+
}
|
| 58 |
+
else {
|
| 59 |
+
old = PyDataMem_SetHandler(NULL);
|
| 60 |
+
}
|
| 61 |
+
return old;
|
| 62 |
+
"""),
|
| 63 |
+
("get_array", "METH_NOARGS", """
|
| 64 |
+
char *buf = (char *)malloc(20);
|
| 65 |
+
npy_intp dims[1];
|
| 66 |
+
dims[0] = 20;
|
| 67 |
+
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
|
| 68 |
+
return PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims, NULL,
|
| 69 |
+
buf, NPY_ARRAY_WRITEABLE, NULL);
|
| 70 |
+
"""),
|
| 71 |
+
("set_own", "METH_O", """
|
| 72 |
+
if (!PyArray_Check(args)) {
|
| 73 |
+
PyErr_SetString(PyExc_ValueError,
|
| 74 |
+
"need an ndarray");
|
| 75 |
+
return NULL;
|
| 76 |
+
}
|
| 77 |
+
PyArray_ENABLEFLAGS((PyArrayObject*)args, NPY_ARRAY_OWNDATA);
|
| 78 |
+
// Maybe try this too?
|
| 79 |
+
// PyArray_BASE(PyArrayObject *)args) = NULL;
|
| 80 |
+
Py_RETURN_NONE;
|
| 81 |
+
"""),
|
| 82 |
+
("get_array_with_base", "METH_NOARGS", """
|
| 83 |
+
char *buf = (char *)malloc(20);
|
| 84 |
+
npy_intp dims[1];
|
| 85 |
+
dims[0] = 20;
|
| 86 |
+
PyArray_Descr *descr = PyArray_DescrNewFromType(NPY_UINT8);
|
| 87 |
+
PyObject *arr = PyArray_NewFromDescr(&PyArray_Type, descr, 1, dims,
|
| 88 |
+
NULL, buf,
|
| 89 |
+
NPY_ARRAY_WRITEABLE, NULL);
|
| 90 |
+
if (arr == NULL) return NULL;
|
| 91 |
+
PyObject *obj = PyCapsule_New(buf, "buf capsule",
|
| 92 |
+
(PyCapsule_Destructor)&warn_on_free);
|
| 93 |
+
if (obj == NULL) {
|
| 94 |
+
Py_DECREF(arr);
|
| 95 |
+
return NULL;
|
| 96 |
+
}
|
| 97 |
+
if (PyArray_SetBaseObject((PyArrayObject *)arr, obj) < 0) {
|
| 98 |
+
Py_DECREF(arr);
|
| 99 |
+
Py_DECREF(obj);
|
| 100 |
+
return NULL;
|
| 101 |
+
}
|
| 102 |
+
return arr;
|
| 103 |
+
|
| 104 |
+
"""),
|
| 105 |
+
]
|
| 106 |
+
prologue = '''
|
| 107 |
+
#define NPY_TARGET_VERSION NPY_1_22_API_VERSION
|
| 108 |
+
#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
|
| 109 |
+
#include <numpy/arrayobject.h>
|
| 110 |
+
/*
|
| 111 |
+
* This struct allows the dynamic configuration of the allocator funcs
|
| 112 |
+
* of the `secret_data_allocator`. It is provided here for
|
| 113 |
+
* demonstration purposes, as a valid `ctx` use-case scenario.
|
| 114 |
+
*/
|
| 115 |
+
typedef struct {
|
| 116 |
+
void *(*malloc)(size_t);
|
| 117 |
+
void *(*calloc)(size_t, size_t);
|
| 118 |
+
void *(*realloc)(void *, size_t);
|
| 119 |
+
void (*free)(void *);
|
| 120 |
+
} SecretDataAllocatorFuncs;
|
| 121 |
+
|
| 122 |
+
NPY_NO_EXPORT void *
|
| 123 |
+
shift_alloc(void *ctx, size_t sz) {
|
| 124 |
+
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
|
| 125 |
+
char *real = (char *)funcs->malloc(sz + 64);
|
| 126 |
+
if (real == NULL) {
|
| 127 |
+
return NULL;
|
| 128 |
+
}
|
| 129 |
+
snprintf(real, 64, "originally allocated %ld", (unsigned long)sz);
|
| 130 |
+
return (void *)(real + 64);
|
| 131 |
+
}
|
| 132 |
+
NPY_NO_EXPORT void *
|
| 133 |
+
shift_zero(void *ctx, size_t sz, size_t cnt) {
|
| 134 |
+
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
|
| 135 |
+
char *real = (char *)funcs->calloc(sz + 64, cnt);
|
| 136 |
+
if (real == NULL) {
|
| 137 |
+
return NULL;
|
| 138 |
+
}
|
| 139 |
+
snprintf(real, 64, "originally allocated %ld via zero",
|
| 140 |
+
(unsigned long)sz);
|
| 141 |
+
return (void *)(real + 64);
|
| 142 |
+
}
|
| 143 |
+
NPY_NO_EXPORT void
|
| 144 |
+
shift_free(void *ctx, void * p, npy_uintp sz) {
|
| 145 |
+
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
|
| 146 |
+
if (p == NULL) {
|
| 147 |
+
return ;
|
| 148 |
+
}
|
| 149 |
+
char *real = (char *)p - 64;
|
| 150 |
+
if (strncmp(real, "originally allocated", 20) != 0) {
|
| 151 |
+
fprintf(stdout, "uh-oh, unmatched shift_free, "
|
| 152 |
+
"no appropriate prefix\\n");
|
| 153 |
+
/* Make C runtime crash by calling free on the wrong address */
|
| 154 |
+
funcs->free((char *)p + 10);
|
| 155 |
+
/* funcs->free(real); */
|
| 156 |
+
}
|
| 157 |
+
else {
|
| 158 |
+
npy_uintp i = (npy_uintp)atoi(real +20);
|
| 159 |
+
if (i != sz) {
|
| 160 |
+
fprintf(stderr, "uh-oh, unmatched shift_free"
|
| 161 |
+
"(ptr, %ld) but allocated %ld\\n", sz, i);
|
| 162 |
+
/* This happens in some places, only print */
|
| 163 |
+
funcs->free(real);
|
| 164 |
+
}
|
| 165 |
+
else {
|
| 166 |
+
funcs->free(real);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
}
|
| 170 |
+
NPY_NO_EXPORT void *
|
| 171 |
+
shift_realloc(void *ctx, void * p, npy_uintp sz) {
|
| 172 |
+
SecretDataAllocatorFuncs *funcs = (SecretDataAllocatorFuncs *)ctx;
|
| 173 |
+
if (p != NULL) {
|
| 174 |
+
char *real = (char *)p - 64;
|
| 175 |
+
if (strncmp(real, "originally allocated", 20) != 0) {
|
| 176 |
+
fprintf(stdout, "uh-oh, unmatched shift_realloc\\n");
|
| 177 |
+
return realloc(p, sz);
|
| 178 |
+
}
|
| 179 |
+
return (void *)((char *)funcs->realloc(real, sz + 64) + 64);
|
| 180 |
+
}
|
| 181 |
+
else {
|
| 182 |
+
char *real = (char *)funcs->realloc(p, sz + 64);
|
| 183 |
+
if (real == NULL) {
|
| 184 |
+
return NULL;
|
| 185 |
+
}
|
| 186 |
+
snprintf(real, 64, "originally allocated "
|
| 187 |
+
"%ld via realloc", (unsigned long)sz);
|
| 188 |
+
return (void *)(real + 64);
|
| 189 |
+
}
|
| 190 |
+
}
|
| 191 |
+
/* As an example, we use the standard {m|c|re}alloc/free funcs. */
|
| 192 |
+
static SecretDataAllocatorFuncs secret_data_handler_ctx = {
|
| 193 |
+
malloc,
|
| 194 |
+
calloc,
|
| 195 |
+
realloc,
|
| 196 |
+
free
|
| 197 |
+
};
|
| 198 |
+
static PyDataMem_Handler secret_data_handler = {
|
| 199 |
+
"secret_data_allocator",
|
| 200 |
+
1,
|
| 201 |
+
{
|
| 202 |
+
&secret_data_handler_ctx, /* ctx */
|
| 203 |
+
shift_alloc, /* malloc */
|
| 204 |
+
shift_zero, /* calloc */
|
| 205 |
+
shift_realloc, /* realloc */
|
| 206 |
+
shift_free /* free */
|
| 207 |
+
}
|
| 208 |
+
};
|
| 209 |
+
void warn_on_free(void *capsule) {
|
| 210 |
+
PyErr_WarnEx(PyExc_UserWarning, "in warn_on_free", 1);
|
| 211 |
+
void * obj = PyCapsule_GetPointer(capsule,
|
| 212 |
+
PyCapsule_GetName(capsule));
|
| 213 |
+
free(obj);
|
| 214 |
+
};
|
| 215 |
+
'''
|
| 216 |
+
more_init = "import_array();"
|
| 217 |
+
try:
|
| 218 |
+
import mem_policy
|
| 219 |
+
return mem_policy
|
| 220 |
+
except ImportError:
|
| 221 |
+
pass
|
| 222 |
+
# if it does not exist, build and load it
|
| 223 |
+
return extbuild.build_and_import_extension('mem_policy',
|
| 224 |
+
functions,
|
| 225 |
+
prologue=prologue,
|
| 226 |
+
include_dirs=[np.get_include()],
|
| 227 |
+
build_dir=tmp_path,
|
| 228 |
+
more_init=more_init)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def test_set_policy(get_module):
|
| 232 |
+
|
| 233 |
+
get_handler_name = np._core.multiarray.get_handler_name
|
| 234 |
+
get_handler_version = np._core.multiarray.get_handler_version
|
| 235 |
+
orig_policy_name = get_handler_name()
|
| 236 |
+
|
| 237 |
+
a = np.arange(10).reshape((2, 5)) # a doesn't own its own data
|
| 238 |
+
assert get_handler_name(a) is None
|
| 239 |
+
assert get_handler_version(a) is None
|
| 240 |
+
assert get_handler_name(a.base) == orig_policy_name
|
| 241 |
+
assert get_handler_version(a.base) == 1
|
| 242 |
+
|
| 243 |
+
orig_policy = get_module.set_secret_data_policy()
|
| 244 |
+
|
| 245 |
+
b = np.arange(10).reshape((2, 5)) # b doesn't own its own data
|
| 246 |
+
assert get_handler_name(b) is None
|
| 247 |
+
assert get_handler_version(b) is None
|
| 248 |
+
assert get_handler_name(b.base) == 'secret_data_allocator'
|
| 249 |
+
assert get_handler_version(b.base) == 1
|
| 250 |
+
|
| 251 |
+
if orig_policy_name == 'default_allocator':
|
| 252 |
+
get_module.set_old_policy(None) # tests PyDataMem_SetHandler(NULL)
|
| 253 |
+
assert get_handler_name() == 'default_allocator'
|
| 254 |
+
else:
|
| 255 |
+
get_module.set_old_policy(orig_policy)
|
| 256 |
+
assert get_handler_name() == orig_policy_name
|
| 257 |
+
|
| 258 |
+
with pytest.raises(ValueError,
|
| 259 |
+
match="Capsule must be named 'mem_handler'"):
|
| 260 |
+
get_module.set_wrong_capsule_name_data_policy()
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
def test_default_policy_singleton(get_module):
|
| 264 |
+
get_handler_name = np._core.multiarray.get_handler_name
|
| 265 |
+
|
| 266 |
+
# set the policy to default
|
| 267 |
+
orig_policy = get_module.set_old_policy(None)
|
| 268 |
+
|
| 269 |
+
assert get_handler_name() == 'default_allocator'
|
| 270 |
+
|
| 271 |
+
# re-set the policy to default
|
| 272 |
+
def_policy_1 = get_module.set_old_policy(None)
|
| 273 |
+
|
| 274 |
+
assert get_handler_name() == 'default_allocator'
|
| 275 |
+
|
| 276 |
+
# set the policy to original
|
| 277 |
+
def_policy_2 = get_module.set_old_policy(orig_policy)
|
| 278 |
+
|
| 279 |
+
# since default policy is a singleton,
|
| 280 |
+
# these should be the same object
|
| 281 |
+
assert def_policy_1 is def_policy_2 is get_module.get_default_policy()
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
def test_policy_propagation(get_module):
|
| 285 |
+
# The memory policy goes hand-in-hand with flags.owndata
|
| 286 |
+
|
| 287 |
+
class MyArr(np.ndarray):
|
| 288 |
+
pass
|
| 289 |
+
|
| 290 |
+
get_handler_name = np._core.multiarray.get_handler_name
|
| 291 |
+
orig_policy_name = get_handler_name()
|
| 292 |
+
a = np.arange(10).view(MyArr).reshape((2, 5))
|
| 293 |
+
assert get_handler_name(a) is None
|
| 294 |
+
assert a.flags.owndata is False
|
| 295 |
+
|
| 296 |
+
assert get_handler_name(a.base) is None
|
| 297 |
+
assert a.base.flags.owndata is False
|
| 298 |
+
|
| 299 |
+
assert get_handler_name(a.base.base) == orig_policy_name
|
| 300 |
+
assert a.base.base.flags.owndata is True
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
async def concurrent_context1(get_module, orig_policy_name, event):
|
| 304 |
+
if orig_policy_name == 'default_allocator':
|
| 305 |
+
get_module.set_secret_data_policy()
|
| 306 |
+
assert get_handler_name() == 'secret_data_allocator'
|
| 307 |
+
else:
|
| 308 |
+
get_module.set_old_policy(None)
|
| 309 |
+
assert get_handler_name() == 'default_allocator'
|
| 310 |
+
event.set()
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
async def concurrent_context2(get_module, orig_policy_name, event):
|
| 314 |
+
await event.wait()
|
| 315 |
+
# the policy is not affected by changes in parallel contexts
|
| 316 |
+
assert get_handler_name() == orig_policy_name
|
| 317 |
+
# change policy in the child context
|
| 318 |
+
if orig_policy_name == 'default_allocator':
|
| 319 |
+
get_module.set_secret_data_policy()
|
| 320 |
+
assert get_handler_name() == 'secret_data_allocator'
|
| 321 |
+
else:
|
| 322 |
+
get_module.set_old_policy(None)
|
| 323 |
+
assert get_handler_name() == 'default_allocator'
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
async def async_test_context_locality(get_module):
|
| 327 |
+
orig_policy_name = np._core.multiarray.get_handler_name()
|
| 328 |
+
|
| 329 |
+
event = asyncio.Event()
|
| 330 |
+
# the child contexts inherit the parent policy
|
| 331 |
+
concurrent_task1 = asyncio.create_task(
|
| 332 |
+
concurrent_context1(get_module, orig_policy_name, event))
|
| 333 |
+
concurrent_task2 = asyncio.create_task(
|
| 334 |
+
concurrent_context2(get_module, orig_policy_name, event))
|
| 335 |
+
await concurrent_task1
|
| 336 |
+
await concurrent_task2
|
| 337 |
+
|
| 338 |
+
# the parent context is not affected by child policy changes
|
| 339 |
+
assert np._core.multiarray.get_handler_name() == orig_policy_name
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def test_context_locality(get_module):
|
| 343 |
+
if (sys.implementation.name == 'pypy'
|
| 344 |
+
and sys.pypy_version_info[:3] < (7, 3, 6)):
|
| 345 |
+
pytest.skip('no context-locality support in PyPy < 7.3.6')
|
| 346 |
+
asyncio.run(async_test_context_locality(get_module))
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
def concurrent_thread1(get_module, event):
|
| 350 |
+
get_module.set_secret_data_policy()
|
| 351 |
+
assert np._core.multiarray.get_handler_name() == 'secret_data_allocator'
|
| 352 |
+
event.set()
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
def concurrent_thread2(get_module, event):
|
| 356 |
+
event.wait()
|
| 357 |
+
# the policy is not affected by changes in parallel threads
|
| 358 |
+
assert np._core.multiarray.get_handler_name() == 'default_allocator'
|
| 359 |
+
# change policy in the child thread
|
| 360 |
+
get_module.set_secret_data_policy()
|
| 361 |
+
|
| 362 |
+
|
| 363 |
+
def test_thread_locality(get_module):
|
| 364 |
+
orig_policy_name = np._core.multiarray.get_handler_name()
|
| 365 |
+
|
| 366 |
+
event = threading.Event()
|
| 367 |
+
# the child threads do not inherit the parent policy
|
| 368 |
+
concurrent_task1 = threading.Thread(target=concurrent_thread1,
|
| 369 |
+
args=(get_module, event))
|
| 370 |
+
concurrent_task2 = threading.Thread(target=concurrent_thread2,
|
| 371 |
+
args=(get_module, event))
|
| 372 |
+
concurrent_task1.start()
|
| 373 |
+
concurrent_task2.start()
|
| 374 |
+
concurrent_task1.join()
|
| 375 |
+
concurrent_task2.join()
|
| 376 |
+
|
| 377 |
+
# the parent thread is not affected by child policy changes
|
| 378 |
+
assert np._core.multiarray.get_handler_name() == orig_policy_name
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
@pytest.mark.skip(reason="too slow, see gh-23975")
|
| 382 |
+
def test_new_policy(get_module):
|
| 383 |
+
a = np.arange(10)
|
| 384 |
+
orig_policy_name = np._core.multiarray.get_handler_name(a)
|
| 385 |
+
|
| 386 |
+
orig_policy = get_module.set_secret_data_policy()
|
| 387 |
+
|
| 388 |
+
b = np.arange(10)
|
| 389 |
+
assert np._core.multiarray.get_handler_name(b) == 'secret_data_allocator'
|
| 390 |
+
|
| 391 |
+
# test array manipulation. This is slow
|
| 392 |
+
if orig_policy_name == 'default_allocator':
|
| 393 |
+
# when the np._core.test tests recurse into this test, the
|
| 394 |
+
# policy will be set so this "if" will be false, preventing
|
| 395 |
+
# infinite recursion
|
| 396 |
+
#
|
| 397 |
+
# if needed, debug this by
|
| 398 |
+
# - running tests with -- -s (to not capture stdout/stderr
|
| 399 |
+
# - setting verbose=2
|
| 400 |
+
# - setting extra_argv=['-vv'] here
|
| 401 |
+
assert np._core.test('full', verbose=1, extra_argv=[])
|
| 402 |
+
# also try the ma tests, the pickling test is quite tricky
|
| 403 |
+
assert np.ma.test('full', verbose=1, extra_argv=[])
|
| 404 |
+
|
| 405 |
+
get_module.set_old_policy(orig_policy)
|
| 406 |
+
|
| 407 |
+
c = np.arange(10)
|
| 408 |
+
assert np._core.multiarray.get_handler_name(c) == orig_policy_name
|
| 409 |
+
|
| 410 |
+
|
| 411 |
+
@pytest.mark.xfail(sys.implementation.name == "pypy",
|
| 412 |
+
reason=("bad interaction between getenv and "
|
| 413 |
+
"os.environ inside pytest"))
|
| 414 |
+
@pytest.mark.parametrize("policy", ["0", "1", None])
|
| 415 |
+
def test_switch_owner(get_module, policy):
|
| 416 |
+
a = get_module.get_array()
|
| 417 |
+
assert np._core.multiarray.get_handler_name(a) is None
|
| 418 |
+
get_module.set_own(a)
|
| 419 |
+
|
| 420 |
+
if policy is None:
|
| 421 |
+
# See what we expect to be set based on the env variable
|
| 422 |
+
policy = os.getenv("NUMPY_WARN_IF_NO_MEM_POLICY", "0") == "1"
|
| 423 |
+
oldval = None
|
| 424 |
+
else:
|
| 425 |
+
policy = policy == "1"
|
| 426 |
+
oldval = np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(
|
| 427 |
+
policy)
|
| 428 |
+
try:
|
| 429 |
+
# The policy should be NULL, so we have to assume we can call
|
| 430 |
+
# "free". A warning is given if the policy == "1"
|
| 431 |
+
if policy:
|
| 432 |
+
with assert_warns(RuntimeWarning) as w:
|
| 433 |
+
del a
|
| 434 |
+
gc.collect()
|
| 435 |
+
else:
|
| 436 |
+
del a
|
| 437 |
+
gc.collect()
|
| 438 |
+
|
| 439 |
+
finally:
|
| 440 |
+
if oldval is not None:
|
| 441 |
+
np._core._multiarray_umath._set_numpy_warn_if_no_mem_policy(oldval)
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
def test_owner_is_base(get_module):
|
| 445 |
+
a = get_module.get_array_with_base()
|
| 446 |
+
with pytest.warns(UserWarning, match='warn_on_free'):
|
| 447 |
+
del a
|
| 448 |
+
gc.collect()
|
| 449 |
+
gc.collect()
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_memmap.py
ADDED
|
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import mmap
|
| 4 |
+
import pytest
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
from tempfile import NamedTemporaryFile, TemporaryFile
|
| 7 |
+
|
| 8 |
+
from numpy import (
|
| 9 |
+
memmap, sum, average, prod, ndarray, isscalar, add, subtract, multiply)
|
| 10 |
+
|
| 11 |
+
from numpy import arange, allclose, asarray
|
| 12 |
+
from numpy.testing import (
|
| 13 |
+
assert_, assert_equal, assert_array_equal, suppress_warnings, IS_PYPY,
|
| 14 |
+
break_cycles
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
class TestMemmap:
|
| 18 |
+
def setup_method(self):
|
| 19 |
+
self.tmpfp = NamedTemporaryFile(prefix='mmap')
|
| 20 |
+
self.shape = (3, 4)
|
| 21 |
+
self.dtype = 'float32'
|
| 22 |
+
self.data = arange(12, dtype=self.dtype)
|
| 23 |
+
self.data.resize(self.shape)
|
| 24 |
+
|
| 25 |
+
def teardown_method(self):
|
| 26 |
+
self.tmpfp.close()
|
| 27 |
+
self.data = None
|
| 28 |
+
if IS_PYPY:
|
| 29 |
+
break_cycles()
|
| 30 |
+
break_cycles()
|
| 31 |
+
|
| 32 |
+
def test_roundtrip(self):
|
| 33 |
+
# Write data to file
|
| 34 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 35 |
+
shape=self.shape)
|
| 36 |
+
fp[:] = self.data[:]
|
| 37 |
+
del fp # Test __del__ machinery, which handles cleanup
|
| 38 |
+
|
| 39 |
+
# Read data back from file
|
| 40 |
+
newfp = memmap(self.tmpfp, dtype=self.dtype, mode='r',
|
| 41 |
+
shape=self.shape)
|
| 42 |
+
assert_(allclose(self.data, newfp))
|
| 43 |
+
assert_array_equal(self.data, newfp)
|
| 44 |
+
assert_equal(newfp.flags.writeable, False)
|
| 45 |
+
|
| 46 |
+
def test_open_with_filename(self, tmp_path):
|
| 47 |
+
tmpname = tmp_path / 'mmap'
|
| 48 |
+
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
|
| 49 |
+
shape=self.shape)
|
| 50 |
+
fp[:] = self.data[:]
|
| 51 |
+
del fp
|
| 52 |
+
|
| 53 |
+
def test_unnamed_file(self):
|
| 54 |
+
with TemporaryFile() as f:
|
| 55 |
+
fp = memmap(f, dtype=self.dtype, shape=self.shape)
|
| 56 |
+
del fp
|
| 57 |
+
|
| 58 |
+
def test_attributes(self):
|
| 59 |
+
offset = 1
|
| 60 |
+
mode = "w+"
|
| 61 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode=mode,
|
| 62 |
+
shape=self.shape, offset=offset)
|
| 63 |
+
assert_equal(offset, fp.offset)
|
| 64 |
+
assert_equal(mode, fp.mode)
|
| 65 |
+
del fp
|
| 66 |
+
|
| 67 |
+
def test_filename(self, tmp_path):
|
| 68 |
+
tmpname = tmp_path / "mmap"
|
| 69 |
+
fp = memmap(tmpname, dtype=self.dtype, mode='w+',
|
| 70 |
+
shape=self.shape)
|
| 71 |
+
abspath = Path(os.path.abspath(tmpname))
|
| 72 |
+
fp[:] = self.data[:]
|
| 73 |
+
assert_equal(abspath, fp.filename)
|
| 74 |
+
b = fp[:1]
|
| 75 |
+
assert_equal(abspath, b.filename)
|
| 76 |
+
del b
|
| 77 |
+
del fp
|
| 78 |
+
|
| 79 |
+
def test_path(self, tmp_path):
|
| 80 |
+
tmpname = tmp_path / "mmap"
|
| 81 |
+
fp = memmap(Path(tmpname), dtype=self.dtype, mode='w+',
|
| 82 |
+
shape=self.shape)
|
| 83 |
+
# os.path.realpath does not resolve symlinks on Windows
|
| 84 |
+
# see: https://bugs.python.org/issue9949
|
| 85 |
+
# use Path.resolve, just as memmap class does internally
|
| 86 |
+
abspath = str(Path(tmpname).resolve())
|
| 87 |
+
fp[:] = self.data[:]
|
| 88 |
+
assert_equal(abspath, str(fp.filename.resolve()))
|
| 89 |
+
b = fp[:1]
|
| 90 |
+
assert_equal(abspath, str(b.filename.resolve()))
|
| 91 |
+
del b
|
| 92 |
+
del fp
|
| 93 |
+
|
| 94 |
+
def test_filename_fileobj(self):
|
| 95 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode="w+",
|
| 96 |
+
shape=self.shape)
|
| 97 |
+
assert_equal(fp.filename, self.tmpfp.name)
|
| 98 |
+
|
| 99 |
+
@pytest.mark.skipif(sys.platform == 'gnu0',
|
| 100 |
+
reason="Known to fail on hurd")
|
| 101 |
+
def test_flush(self):
|
| 102 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 103 |
+
shape=self.shape)
|
| 104 |
+
fp[:] = self.data[:]
|
| 105 |
+
assert_equal(fp[0], self.data[0])
|
| 106 |
+
fp.flush()
|
| 107 |
+
|
| 108 |
+
def test_del(self):
|
| 109 |
+
# Make sure a view does not delete the underlying mmap
|
| 110 |
+
fp_base = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 111 |
+
shape=self.shape)
|
| 112 |
+
fp_base[0] = 5
|
| 113 |
+
fp_view = fp_base[0:1]
|
| 114 |
+
assert_equal(fp_view[0], 5)
|
| 115 |
+
del fp_view
|
| 116 |
+
# Should still be able to access and assign values after
|
| 117 |
+
# deleting the view
|
| 118 |
+
assert_equal(fp_base[0], 5)
|
| 119 |
+
fp_base[0] = 6
|
| 120 |
+
assert_equal(fp_base[0], 6)
|
| 121 |
+
|
| 122 |
+
def test_arithmetic_drops_references(self):
|
| 123 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 124 |
+
shape=self.shape)
|
| 125 |
+
tmp = (fp + 10)
|
| 126 |
+
if isinstance(tmp, memmap):
|
| 127 |
+
assert_(tmp._mmap is not fp._mmap)
|
| 128 |
+
|
| 129 |
+
def test_indexing_drops_references(self):
|
| 130 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 131 |
+
shape=self.shape)
|
| 132 |
+
tmp = fp[(1, 2), (2, 3)]
|
| 133 |
+
if isinstance(tmp, memmap):
|
| 134 |
+
assert_(tmp._mmap is not fp._mmap)
|
| 135 |
+
|
| 136 |
+
def test_slicing_keeps_references(self):
|
| 137 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, mode='w+',
|
| 138 |
+
shape=self.shape)
|
| 139 |
+
assert_(fp[:2, :2]._mmap is fp._mmap)
|
| 140 |
+
|
| 141 |
+
def test_view(self):
|
| 142 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
|
| 143 |
+
new1 = fp.view()
|
| 144 |
+
new2 = new1.view()
|
| 145 |
+
assert_(new1.base is fp)
|
| 146 |
+
assert_(new2.base is fp)
|
| 147 |
+
new_array = asarray(fp)
|
| 148 |
+
assert_(new_array.base is fp)
|
| 149 |
+
|
| 150 |
+
def test_ufunc_return_ndarray(self):
|
| 151 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
|
| 152 |
+
fp[:] = self.data
|
| 153 |
+
|
| 154 |
+
with suppress_warnings() as sup:
|
| 155 |
+
sup.filter(FutureWarning, "np.average currently does not preserve")
|
| 156 |
+
for unary_op in [sum, average, prod]:
|
| 157 |
+
result = unary_op(fp)
|
| 158 |
+
assert_(isscalar(result))
|
| 159 |
+
assert_(result.__class__ is self.data[0, 0].__class__)
|
| 160 |
+
|
| 161 |
+
assert_(unary_op(fp, axis=0).__class__ is ndarray)
|
| 162 |
+
assert_(unary_op(fp, axis=1).__class__ is ndarray)
|
| 163 |
+
|
| 164 |
+
for binary_op in [add, subtract, multiply]:
|
| 165 |
+
assert_(binary_op(fp, self.data).__class__ is ndarray)
|
| 166 |
+
assert_(binary_op(self.data, fp).__class__ is ndarray)
|
| 167 |
+
assert_(binary_op(fp, fp).__class__ is ndarray)
|
| 168 |
+
|
| 169 |
+
fp += 1
|
| 170 |
+
assert(fp.__class__ is memmap)
|
| 171 |
+
add(fp, 1, out=fp)
|
| 172 |
+
assert(fp.__class__ is memmap)
|
| 173 |
+
|
| 174 |
+
def test_getitem(self):
|
| 175 |
+
fp = memmap(self.tmpfp, dtype=self.dtype, shape=self.shape)
|
| 176 |
+
fp[:] = self.data
|
| 177 |
+
|
| 178 |
+
assert_(fp[1:, :-1].__class__ is memmap)
|
| 179 |
+
# Fancy indexing returns a copy that is not memmapped
|
| 180 |
+
assert_(fp[[0, 1]].__class__ is ndarray)
|
| 181 |
+
|
| 182 |
+
def test_memmap_subclass(self):
|
| 183 |
+
class MemmapSubClass(memmap):
|
| 184 |
+
pass
|
| 185 |
+
|
| 186 |
+
fp = MemmapSubClass(self.tmpfp, dtype=self.dtype, shape=self.shape)
|
| 187 |
+
fp[:] = self.data
|
| 188 |
+
|
| 189 |
+
# We keep previous behavior for subclasses of memmap, i.e. the
|
| 190 |
+
# ufunc and __getitem__ output is never turned into a ndarray
|
| 191 |
+
assert_(sum(fp, axis=0).__class__ is MemmapSubClass)
|
| 192 |
+
assert_(sum(fp).__class__ is MemmapSubClass)
|
| 193 |
+
assert_(fp[1:, :-1].__class__ is MemmapSubClass)
|
| 194 |
+
assert(fp[[0, 1]].__class__ is MemmapSubClass)
|
| 195 |
+
|
| 196 |
+
def test_mmap_offset_greater_than_allocation_granularity(self):
|
| 197 |
+
size = 5 * mmap.ALLOCATIONGRANULARITY
|
| 198 |
+
offset = mmap.ALLOCATIONGRANULARITY + 1
|
| 199 |
+
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
|
| 200 |
+
assert_(fp.offset == offset)
|
| 201 |
+
|
| 202 |
+
def test_empty_array_with_offset_multiple_of_allocation_granularity(self):
|
| 203 |
+
self.tmpfp.write(b'a'*mmap.ALLOCATIONGRANULARITY)
|
| 204 |
+
size = 0
|
| 205 |
+
offset = mmap.ALLOCATIONGRANULARITY
|
| 206 |
+
fp = memmap(self.tmpfp, shape=size, mode='w+', offset=offset)
|
| 207 |
+
assert_equal(fp.offset, offset)
|
| 208 |
+
|
| 209 |
+
def test_no_shape(self):
|
| 210 |
+
self.tmpfp.write(b'a'*16)
|
| 211 |
+
mm = memmap(self.tmpfp, dtype='float64')
|
| 212 |
+
assert_equal(mm.shape, (2,))
|
| 213 |
+
|
| 214 |
+
def test_empty_array(self):
|
| 215 |
+
# gh-12653
|
| 216 |
+
with pytest.raises(ValueError, match='empty file'):
|
| 217 |
+
memmap(self.tmpfp, shape=(0, 4), mode='r')
|
| 218 |
+
|
| 219 |
+
# gh-27723
|
| 220 |
+
# empty memmap works with mode in ('w+','r+')
|
| 221 |
+
memmap(self.tmpfp, shape=(0, 4), mode='w+')
|
| 222 |
+
|
| 223 |
+
# ok now the file is not empty
|
| 224 |
+
memmap(self.tmpfp, shape=(0, 4), mode='w+')
|
| 225 |
+
|
| 226 |
+
def test_shape_type(self):
|
| 227 |
+
memmap(self.tmpfp, shape=3, mode='w+')
|
| 228 |
+
memmap(self.tmpfp, shape=self.shape, mode='w+')
|
| 229 |
+
memmap(self.tmpfp, shape=list(self.shape), mode='w+')
|
| 230 |
+
memmap(self.tmpfp, shape=asarray(self.shape), mode='w+')
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_numerictypes.py
ADDED
|
@@ -0,0 +1,620 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import itertools
|
| 3 |
+
|
| 4 |
+
import pytest
|
| 5 |
+
import numpy as np
|
| 6 |
+
import numpy._core.numerictypes as nt
|
| 7 |
+
from numpy._core.numerictypes import (
|
| 8 |
+
issctype, sctype2char, maximum_sctype, sctypes
|
| 9 |
+
)
|
| 10 |
+
from numpy.testing import (
|
| 11 |
+
assert_, assert_equal, assert_raises, assert_raises_regex, IS_PYPY
|
| 12 |
+
)
|
| 13 |
+
|
| 14 |
+
# This is the structure of the table used for plain objects:
|
| 15 |
+
#
|
| 16 |
+
# +-+-+-+
|
| 17 |
+
# |x|y|z|
|
| 18 |
+
# +-+-+-+
|
| 19 |
+
|
| 20 |
+
# Structure of a plain array description:
|
| 21 |
+
Pdescr = [
|
| 22 |
+
('x', 'i4', (2,)),
|
| 23 |
+
('y', 'f8', (2, 2)),
|
| 24 |
+
('z', 'u1')]
|
| 25 |
+
|
| 26 |
+
# A plain list of tuples with values for testing:
|
| 27 |
+
PbufferT = [
|
| 28 |
+
# x y z
|
| 29 |
+
([3, 2], [[6., 4.], [6., 4.]], 8),
|
| 30 |
+
([4, 3], [[7., 5.], [7., 5.]], 9),
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# This is the structure of the table used for nested objects (DON'T PANIC!):
|
| 35 |
+
#
|
| 36 |
+
# +-+---------------------------------+-----+----------+-+-+
|
| 37 |
+
# |x|Info |color|info |y|z|
|
| 38 |
+
# | +-----+--+----------------+----+--+ +----+-----+ | |
|
| 39 |
+
# | |value|y2|Info2 |name|z2| |Name|Value| | |
|
| 40 |
+
# | | | +----+-----+--+--+ | | | | | | |
|
| 41 |
+
# | | | |name|value|y3|z3| | | | | | | |
|
| 42 |
+
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
|
| 43 |
+
#
|
| 44 |
+
|
| 45 |
+
# The corresponding nested array description:
|
| 46 |
+
Ndescr = [
|
| 47 |
+
('x', 'i4', (2,)),
|
| 48 |
+
('Info', [
|
| 49 |
+
('value', 'c16'),
|
| 50 |
+
('y2', 'f8'),
|
| 51 |
+
('Info2', [
|
| 52 |
+
('name', 'S2'),
|
| 53 |
+
('value', 'c16', (2,)),
|
| 54 |
+
('y3', 'f8', (2,)),
|
| 55 |
+
('z3', 'u4', (2,))]),
|
| 56 |
+
('name', 'S2'),
|
| 57 |
+
('z2', 'b1')]),
|
| 58 |
+
('color', 'S2'),
|
| 59 |
+
('info', [
|
| 60 |
+
('Name', 'U8'),
|
| 61 |
+
('Value', 'c16')]),
|
| 62 |
+
('y', 'f8', (2, 2)),
|
| 63 |
+
('z', 'u1')]
|
| 64 |
+
|
| 65 |
+
NbufferT = [
|
| 66 |
+
# x Info color info y z
|
| 67 |
+
# value y2 Info2 name z2 Name Value
|
| 68 |
+
# name value y3 z3
|
| 69 |
+
([3, 2], (6j, 6., (b'nn', [6j, 4j], [6., 4.], [1, 2]), b'NN', True),
|
| 70 |
+
b'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
|
| 71 |
+
([4, 3], (7j, 7., (b'oo', [7j, 5j], [7., 5.], [2, 1]), b'OO', False),
|
| 72 |
+
b'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
|
| 73 |
+
]
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
byteorder = {'little':'<', 'big':'>'}[sys.byteorder]
|
| 77 |
+
|
| 78 |
+
def normalize_descr(descr):
|
| 79 |
+
"Normalize a description adding the platform byteorder."
|
| 80 |
+
|
| 81 |
+
out = []
|
| 82 |
+
for item in descr:
|
| 83 |
+
dtype = item[1]
|
| 84 |
+
if isinstance(dtype, str):
|
| 85 |
+
if dtype[0] not in ['|', '<', '>']:
|
| 86 |
+
onebyte = dtype[1:] == "1"
|
| 87 |
+
if onebyte or dtype[0] in ['S', 'V', 'b']:
|
| 88 |
+
dtype = "|" + dtype
|
| 89 |
+
else:
|
| 90 |
+
dtype = byteorder + dtype
|
| 91 |
+
if len(item) > 2 and np.prod(item[2]) > 1:
|
| 92 |
+
nitem = (item[0], dtype, item[2])
|
| 93 |
+
else:
|
| 94 |
+
nitem = (item[0], dtype)
|
| 95 |
+
out.append(nitem)
|
| 96 |
+
elif isinstance(dtype, list):
|
| 97 |
+
l = normalize_descr(dtype)
|
| 98 |
+
out.append((item[0], l))
|
| 99 |
+
else:
|
| 100 |
+
raise ValueError("Expected a str or list and got %s" %
|
| 101 |
+
(type(item)))
|
| 102 |
+
return out
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
############################################################
|
| 106 |
+
# Creation tests
|
| 107 |
+
############################################################
|
| 108 |
+
|
| 109 |
+
class CreateZeros:
|
| 110 |
+
"""Check the creation of heterogeneous arrays zero-valued"""
|
| 111 |
+
|
| 112 |
+
def test_zeros0D(self):
|
| 113 |
+
"""Check creation of 0-dimensional objects"""
|
| 114 |
+
h = np.zeros((), dtype=self._descr)
|
| 115 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 116 |
+
assert_(h.dtype.fields['x'][0].name[:4] == 'void')
|
| 117 |
+
assert_(h.dtype.fields['x'][0].char == 'V')
|
| 118 |
+
assert_(h.dtype.fields['x'][0].type == np.void)
|
| 119 |
+
# A small check that data is ok
|
| 120 |
+
assert_equal(h['z'], np.zeros((), dtype='u1'))
|
| 121 |
+
|
| 122 |
+
def test_zerosSD(self):
|
| 123 |
+
"""Check creation of single-dimensional objects"""
|
| 124 |
+
h = np.zeros((2,), dtype=self._descr)
|
| 125 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 126 |
+
assert_(h.dtype['y'].name[:4] == 'void')
|
| 127 |
+
assert_(h.dtype['y'].char == 'V')
|
| 128 |
+
assert_(h.dtype['y'].type == np.void)
|
| 129 |
+
# A small check that data is ok
|
| 130 |
+
assert_equal(h['z'], np.zeros((2,), dtype='u1'))
|
| 131 |
+
|
| 132 |
+
def test_zerosMD(self):
|
| 133 |
+
"""Check creation of multi-dimensional objects"""
|
| 134 |
+
h = np.zeros((2, 3), dtype=self._descr)
|
| 135 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 136 |
+
assert_(h.dtype['z'].name == 'uint8')
|
| 137 |
+
assert_(h.dtype['z'].char == 'B')
|
| 138 |
+
assert_(h.dtype['z'].type == np.uint8)
|
| 139 |
+
# A small check that data is ok
|
| 140 |
+
assert_equal(h['z'], np.zeros((2, 3), dtype='u1'))
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class TestCreateZerosPlain(CreateZeros):
|
| 144 |
+
"""Check the creation of heterogeneous arrays zero-valued (plain)"""
|
| 145 |
+
_descr = Pdescr
|
| 146 |
+
|
| 147 |
+
class TestCreateZerosNested(CreateZeros):
|
| 148 |
+
"""Check the creation of heterogeneous arrays zero-valued (nested)"""
|
| 149 |
+
_descr = Ndescr
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class CreateValues:
|
| 153 |
+
"""Check the creation of heterogeneous arrays with values"""
|
| 154 |
+
|
| 155 |
+
def test_tuple(self):
|
| 156 |
+
"""Check creation from tuples"""
|
| 157 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 158 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 159 |
+
if self.multiple_rows:
|
| 160 |
+
assert_(h.shape == (2,))
|
| 161 |
+
else:
|
| 162 |
+
assert_(h.shape == ())
|
| 163 |
+
|
| 164 |
+
def test_list_of_tuple(self):
|
| 165 |
+
"""Check creation from list of tuples"""
|
| 166 |
+
h = np.array([self._buffer], dtype=self._descr)
|
| 167 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 168 |
+
if self.multiple_rows:
|
| 169 |
+
assert_(h.shape == (1, 2))
|
| 170 |
+
else:
|
| 171 |
+
assert_(h.shape == (1,))
|
| 172 |
+
|
| 173 |
+
def test_list_of_list_of_tuple(self):
|
| 174 |
+
"""Check creation from list of list of tuples"""
|
| 175 |
+
h = np.array([[self._buffer]], dtype=self._descr)
|
| 176 |
+
assert_(normalize_descr(self._descr) == h.dtype.descr)
|
| 177 |
+
if self.multiple_rows:
|
| 178 |
+
assert_(h.shape == (1, 1, 2))
|
| 179 |
+
else:
|
| 180 |
+
assert_(h.shape == (1, 1))
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
class TestCreateValuesPlainSingle(CreateValues):
|
| 184 |
+
"""Check the creation of heterogeneous arrays (plain, single row)"""
|
| 185 |
+
_descr = Pdescr
|
| 186 |
+
multiple_rows = 0
|
| 187 |
+
_buffer = PbufferT[0]
|
| 188 |
+
|
| 189 |
+
class TestCreateValuesPlainMultiple(CreateValues):
|
| 190 |
+
"""Check the creation of heterogeneous arrays (plain, multiple rows)"""
|
| 191 |
+
_descr = Pdescr
|
| 192 |
+
multiple_rows = 1
|
| 193 |
+
_buffer = PbufferT
|
| 194 |
+
|
| 195 |
+
class TestCreateValuesNestedSingle(CreateValues):
|
| 196 |
+
"""Check the creation of heterogeneous arrays (nested, single row)"""
|
| 197 |
+
_descr = Ndescr
|
| 198 |
+
multiple_rows = 0
|
| 199 |
+
_buffer = NbufferT[0]
|
| 200 |
+
|
| 201 |
+
class TestCreateValuesNestedMultiple(CreateValues):
|
| 202 |
+
"""Check the creation of heterogeneous arrays (nested, multiple rows)"""
|
| 203 |
+
_descr = Ndescr
|
| 204 |
+
multiple_rows = 1
|
| 205 |
+
_buffer = NbufferT
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
############################################################
|
| 209 |
+
# Reading tests
|
| 210 |
+
############################################################
|
| 211 |
+
|
| 212 |
+
class ReadValuesPlain:
|
| 213 |
+
"""Check the reading of values in heterogeneous arrays (plain)"""
|
| 214 |
+
|
| 215 |
+
def test_access_fields(self):
|
| 216 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 217 |
+
if not self.multiple_rows:
|
| 218 |
+
assert_(h.shape == ())
|
| 219 |
+
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
|
| 220 |
+
assert_equal(h['y'], np.array(self._buffer[1], dtype='f8'))
|
| 221 |
+
assert_equal(h['z'], np.array(self._buffer[2], dtype='u1'))
|
| 222 |
+
else:
|
| 223 |
+
assert_(len(h) == 2)
|
| 224 |
+
assert_equal(h['x'], np.array([self._buffer[0][0],
|
| 225 |
+
self._buffer[1][0]], dtype='i4'))
|
| 226 |
+
assert_equal(h['y'], np.array([self._buffer[0][1],
|
| 227 |
+
self._buffer[1][1]], dtype='f8'))
|
| 228 |
+
assert_equal(h['z'], np.array([self._buffer[0][2],
|
| 229 |
+
self._buffer[1][2]], dtype='u1'))
|
| 230 |
+
|
| 231 |
+
|
| 232 |
+
class TestReadValuesPlainSingle(ReadValuesPlain):
|
| 233 |
+
"""Check the creation of heterogeneous arrays (plain, single row)"""
|
| 234 |
+
_descr = Pdescr
|
| 235 |
+
multiple_rows = 0
|
| 236 |
+
_buffer = PbufferT[0]
|
| 237 |
+
|
| 238 |
+
class TestReadValuesPlainMultiple(ReadValuesPlain):
|
| 239 |
+
"""Check the values of heterogeneous arrays (plain, multiple rows)"""
|
| 240 |
+
_descr = Pdescr
|
| 241 |
+
multiple_rows = 1
|
| 242 |
+
_buffer = PbufferT
|
| 243 |
+
|
| 244 |
+
class ReadValuesNested:
|
| 245 |
+
"""Check the reading of values in heterogeneous arrays (nested)"""
|
| 246 |
+
|
| 247 |
+
def test_access_top_fields(self):
|
| 248 |
+
"""Check reading the top fields of a nested array"""
|
| 249 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 250 |
+
if not self.multiple_rows:
|
| 251 |
+
assert_(h.shape == ())
|
| 252 |
+
assert_equal(h['x'], np.array(self._buffer[0], dtype='i4'))
|
| 253 |
+
assert_equal(h['y'], np.array(self._buffer[4], dtype='f8'))
|
| 254 |
+
assert_equal(h['z'], np.array(self._buffer[5], dtype='u1'))
|
| 255 |
+
else:
|
| 256 |
+
assert_(len(h) == 2)
|
| 257 |
+
assert_equal(h['x'], np.array([self._buffer[0][0],
|
| 258 |
+
self._buffer[1][0]], dtype='i4'))
|
| 259 |
+
assert_equal(h['y'], np.array([self._buffer[0][4],
|
| 260 |
+
self._buffer[1][4]], dtype='f8'))
|
| 261 |
+
assert_equal(h['z'], np.array([self._buffer[0][5],
|
| 262 |
+
self._buffer[1][5]], dtype='u1'))
|
| 263 |
+
|
| 264 |
+
def test_nested1_acessors(self):
|
| 265 |
+
"""Check reading the nested fields of a nested array (1st level)"""
|
| 266 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 267 |
+
if not self.multiple_rows:
|
| 268 |
+
assert_equal(h['Info']['value'],
|
| 269 |
+
np.array(self._buffer[1][0], dtype='c16'))
|
| 270 |
+
assert_equal(h['Info']['y2'],
|
| 271 |
+
np.array(self._buffer[1][1], dtype='f8'))
|
| 272 |
+
assert_equal(h['info']['Name'],
|
| 273 |
+
np.array(self._buffer[3][0], dtype='U2'))
|
| 274 |
+
assert_equal(h['info']['Value'],
|
| 275 |
+
np.array(self._buffer[3][1], dtype='c16'))
|
| 276 |
+
else:
|
| 277 |
+
assert_equal(h['Info']['value'],
|
| 278 |
+
np.array([self._buffer[0][1][0],
|
| 279 |
+
self._buffer[1][1][0]],
|
| 280 |
+
dtype='c16'))
|
| 281 |
+
assert_equal(h['Info']['y2'],
|
| 282 |
+
np.array([self._buffer[0][1][1],
|
| 283 |
+
self._buffer[1][1][1]],
|
| 284 |
+
dtype='f8'))
|
| 285 |
+
assert_equal(h['info']['Name'],
|
| 286 |
+
np.array([self._buffer[0][3][0],
|
| 287 |
+
self._buffer[1][3][0]],
|
| 288 |
+
dtype='U2'))
|
| 289 |
+
assert_equal(h['info']['Value'],
|
| 290 |
+
np.array([self._buffer[0][3][1],
|
| 291 |
+
self._buffer[1][3][1]],
|
| 292 |
+
dtype='c16'))
|
| 293 |
+
|
| 294 |
+
def test_nested2_acessors(self):
|
| 295 |
+
"""Check reading the nested fields of a nested array (2nd level)"""
|
| 296 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 297 |
+
if not self.multiple_rows:
|
| 298 |
+
assert_equal(h['Info']['Info2']['value'],
|
| 299 |
+
np.array(self._buffer[1][2][1], dtype='c16'))
|
| 300 |
+
assert_equal(h['Info']['Info2']['z3'],
|
| 301 |
+
np.array(self._buffer[1][2][3], dtype='u4'))
|
| 302 |
+
else:
|
| 303 |
+
assert_equal(h['Info']['Info2']['value'],
|
| 304 |
+
np.array([self._buffer[0][1][2][1],
|
| 305 |
+
self._buffer[1][1][2][1]],
|
| 306 |
+
dtype='c16'))
|
| 307 |
+
assert_equal(h['Info']['Info2']['z3'],
|
| 308 |
+
np.array([self._buffer[0][1][2][3],
|
| 309 |
+
self._buffer[1][1][2][3]],
|
| 310 |
+
dtype='u4'))
|
| 311 |
+
|
| 312 |
+
def test_nested1_descriptor(self):
|
| 313 |
+
"""Check access nested descriptors of a nested array (1st level)"""
|
| 314 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 315 |
+
assert_(h.dtype['Info']['value'].name == 'complex128')
|
| 316 |
+
assert_(h.dtype['Info']['y2'].name == 'float64')
|
| 317 |
+
assert_(h.dtype['info']['Name'].name == 'str256')
|
| 318 |
+
assert_(h.dtype['info']['Value'].name == 'complex128')
|
| 319 |
+
|
| 320 |
+
def test_nested2_descriptor(self):
|
| 321 |
+
"""Check access nested descriptors of a nested array (2nd level)"""
|
| 322 |
+
h = np.array(self._buffer, dtype=self._descr)
|
| 323 |
+
assert_(h.dtype['Info']['Info2']['value'].name == 'void256')
|
| 324 |
+
assert_(h.dtype['Info']['Info2']['z3'].name == 'void64')
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class TestReadValuesNestedSingle(ReadValuesNested):
|
| 328 |
+
"""Check the values of heterogeneous arrays (nested, single row)"""
|
| 329 |
+
_descr = Ndescr
|
| 330 |
+
multiple_rows = False
|
| 331 |
+
_buffer = NbufferT[0]
|
| 332 |
+
|
| 333 |
+
class TestReadValuesNestedMultiple(ReadValuesNested):
|
| 334 |
+
"""Check the values of heterogeneous arrays (nested, multiple rows)"""
|
| 335 |
+
_descr = Ndescr
|
| 336 |
+
multiple_rows = True
|
| 337 |
+
_buffer = NbufferT
|
| 338 |
+
|
| 339 |
+
class TestEmptyField:
|
| 340 |
+
def test_assign(self):
|
| 341 |
+
a = np.arange(10, dtype=np.float32)
|
| 342 |
+
a.dtype = [("int", "<0i4"), ("float", "<2f4")]
|
| 343 |
+
assert_(a['int'].shape == (5, 0))
|
| 344 |
+
assert_(a['float'].shape == (5, 2))
|
| 345 |
+
|
| 346 |
+
|
| 347 |
+
class TestMultipleFields:
|
| 348 |
+
def setup_method(self):
|
| 349 |
+
self.ary = np.array([(1, 2, 3, 4), (5, 6, 7, 8)], dtype='i4,f4,i2,c8')
|
| 350 |
+
|
| 351 |
+
def _bad_call(self):
|
| 352 |
+
return self.ary['f0', 'f1']
|
| 353 |
+
|
| 354 |
+
def test_no_tuple(self):
|
| 355 |
+
assert_raises(IndexError, self._bad_call)
|
| 356 |
+
|
| 357 |
+
def test_return(self):
|
| 358 |
+
res = self.ary[['f0', 'f2']].tolist()
|
| 359 |
+
assert_(res == [(1, 3), (5, 7)])
|
| 360 |
+
|
| 361 |
+
|
| 362 |
+
class TestIsSubDType:
|
| 363 |
+
# scalar types can be promoted into dtypes
|
| 364 |
+
wrappers = [np.dtype, lambda x: x]
|
| 365 |
+
|
| 366 |
+
def test_both_abstract(self):
|
| 367 |
+
assert_(np.issubdtype(np.floating, np.inexact))
|
| 368 |
+
assert_(not np.issubdtype(np.inexact, np.floating))
|
| 369 |
+
|
| 370 |
+
def test_same(self):
|
| 371 |
+
for cls in (np.float32, np.int32):
|
| 372 |
+
for w1, w2 in itertools.product(self.wrappers, repeat=2):
|
| 373 |
+
assert_(np.issubdtype(w1(cls), w2(cls)))
|
| 374 |
+
|
| 375 |
+
def test_subclass(self):
|
| 376 |
+
# note we cannot promote floating to a dtype, as it would turn into a
|
| 377 |
+
# concrete type
|
| 378 |
+
for w in self.wrappers:
|
| 379 |
+
assert_(np.issubdtype(w(np.float32), np.floating))
|
| 380 |
+
assert_(np.issubdtype(w(np.float64), np.floating))
|
| 381 |
+
|
| 382 |
+
def test_subclass_backwards(self):
|
| 383 |
+
for w in self.wrappers:
|
| 384 |
+
assert_(not np.issubdtype(np.floating, w(np.float32)))
|
| 385 |
+
assert_(not np.issubdtype(np.floating, w(np.float64)))
|
| 386 |
+
|
| 387 |
+
def test_sibling_class(self):
|
| 388 |
+
for w1, w2 in itertools.product(self.wrappers, repeat=2):
|
| 389 |
+
assert_(not np.issubdtype(w1(np.float32), w2(np.float64)))
|
| 390 |
+
assert_(not np.issubdtype(w1(np.float64), w2(np.float32)))
|
| 391 |
+
|
| 392 |
+
def test_nondtype_nonscalartype(self):
|
| 393 |
+
# See gh-14619 and gh-9505 which introduced the deprecation to fix
|
| 394 |
+
# this. These tests are directly taken from gh-9505
|
| 395 |
+
assert not np.issubdtype(np.float32, 'float64')
|
| 396 |
+
assert not np.issubdtype(np.float32, 'f8')
|
| 397 |
+
assert not np.issubdtype(np.int32, str)
|
| 398 |
+
assert not np.issubdtype(np.int32, 'int64')
|
| 399 |
+
assert not np.issubdtype(np.str_, 'void')
|
| 400 |
+
# for the following the correct spellings are
|
| 401 |
+
# np.integer, np.floating, or np.complexfloating respectively:
|
| 402 |
+
assert not np.issubdtype(np.int8, int) # np.int8 is never np.int_
|
| 403 |
+
assert not np.issubdtype(np.float32, float)
|
| 404 |
+
assert not np.issubdtype(np.complex64, complex)
|
| 405 |
+
assert not np.issubdtype(np.float32, "float")
|
| 406 |
+
assert not np.issubdtype(np.float64, "f")
|
| 407 |
+
|
| 408 |
+
# Test the same for the correct first datatype and abstract one
|
| 409 |
+
# in the case of int, float, complex:
|
| 410 |
+
assert np.issubdtype(np.float64, 'float64')
|
| 411 |
+
assert np.issubdtype(np.float64, 'f8')
|
| 412 |
+
assert np.issubdtype(np.str_, str)
|
| 413 |
+
assert np.issubdtype(np.int64, 'int64')
|
| 414 |
+
assert np.issubdtype(np.void, 'void')
|
| 415 |
+
assert np.issubdtype(np.int8, np.integer)
|
| 416 |
+
assert np.issubdtype(np.float32, np.floating)
|
| 417 |
+
assert np.issubdtype(np.complex64, np.complexfloating)
|
| 418 |
+
assert np.issubdtype(np.float64, "float")
|
| 419 |
+
assert np.issubdtype(np.float32, "f")
|
| 420 |
+
|
| 421 |
+
|
| 422 |
+
class TestIsDType:
|
| 423 |
+
"""
|
| 424 |
+
Check correctness of `np.isdtype`. The test considers different argument
|
| 425 |
+
configurations: `np.isdtype(dtype, k1)` and `np.isdtype(dtype, (k1, k2))`
|
| 426 |
+
with concrete dtypes and dtype groups.
|
| 427 |
+
"""
|
| 428 |
+
dtype_group_dict = {
|
| 429 |
+
"signed integer": sctypes["int"],
|
| 430 |
+
"unsigned integer": sctypes["uint"],
|
| 431 |
+
"integral": sctypes["int"] + sctypes["uint"],
|
| 432 |
+
"real floating": sctypes["float"],
|
| 433 |
+
"complex floating": sctypes["complex"],
|
| 434 |
+
"numeric": (
|
| 435 |
+
sctypes["int"] + sctypes["uint"] + sctypes["float"] +
|
| 436 |
+
sctypes["complex"]
|
| 437 |
+
)
|
| 438 |
+
}
|
| 439 |
+
|
| 440 |
+
@pytest.mark.parametrize(
|
| 441 |
+
"dtype,close_dtype",
|
| 442 |
+
[
|
| 443 |
+
(np.int64, np.int32), (np.uint64, np.uint32),
|
| 444 |
+
(np.float64, np.float32), (np.complex128, np.complex64)
|
| 445 |
+
]
|
| 446 |
+
)
|
| 447 |
+
@pytest.mark.parametrize(
|
| 448 |
+
"dtype_group",
|
| 449 |
+
[
|
| 450 |
+
None, "signed integer", "unsigned integer", "integral",
|
| 451 |
+
"real floating", "complex floating", "numeric"
|
| 452 |
+
]
|
| 453 |
+
)
|
| 454 |
+
def test_isdtype(self, dtype, close_dtype, dtype_group):
|
| 455 |
+
# First check if same dtypes return `true` and different ones
|
| 456 |
+
# give `false` (even if they're close in the dtype hierarchy!)
|
| 457 |
+
if dtype_group is None:
|
| 458 |
+
assert np.isdtype(dtype, dtype)
|
| 459 |
+
assert not np.isdtype(dtype, close_dtype)
|
| 460 |
+
assert np.isdtype(dtype, (dtype, close_dtype))
|
| 461 |
+
|
| 462 |
+
# Check that dtype and a dtype group that it belongs to
|
| 463 |
+
# return `true`, and `false` otherwise.
|
| 464 |
+
elif dtype in self.dtype_group_dict[dtype_group]:
|
| 465 |
+
assert np.isdtype(dtype, dtype_group)
|
| 466 |
+
assert np.isdtype(dtype, (close_dtype, dtype_group))
|
| 467 |
+
else:
|
| 468 |
+
assert not np.isdtype(dtype, dtype_group)
|
| 469 |
+
|
| 470 |
+
def test_isdtype_invalid_args(self):
|
| 471 |
+
with assert_raises_regex(TypeError, r".*must be a NumPy dtype.*"):
|
| 472 |
+
np.isdtype("int64", np.int64)
|
| 473 |
+
with assert_raises_regex(TypeError, r".*kind argument must.*"):
|
| 474 |
+
np.isdtype(np.int64, 1)
|
| 475 |
+
with assert_raises_regex(ValueError, r".*not a known kind name.*"):
|
| 476 |
+
np.isdtype(np.int64, "int64")
|
| 477 |
+
|
| 478 |
+
def test_sctypes_complete(self):
|
| 479 |
+
# issue 26439: int32/intc were masking each other on 32-bit builds
|
| 480 |
+
assert np.int32 in sctypes['int']
|
| 481 |
+
assert np.intc in sctypes['int']
|
| 482 |
+
assert np.int64 in sctypes['int']
|
| 483 |
+
assert np.uint32 in sctypes['uint']
|
| 484 |
+
assert np.uintc in sctypes['uint']
|
| 485 |
+
assert np.uint64 in sctypes['uint']
|
| 486 |
+
|
| 487 |
+
class TestSctypeDict:
|
| 488 |
+
def test_longdouble(self):
|
| 489 |
+
assert_(np._core.sctypeDict['float64'] is not np.longdouble)
|
| 490 |
+
assert_(np._core.sctypeDict['complex128'] is not np.clongdouble)
|
| 491 |
+
|
| 492 |
+
def test_ulong(self):
|
| 493 |
+
assert np._core.sctypeDict['ulong'] is np.ulong
|
| 494 |
+
assert np.dtype(np.ulong) is np.dtype("ulong")
|
| 495 |
+
assert np.dtype(np.ulong).itemsize == np.dtype(np.long).itemsize
|
| 496 |
+
|
| 497 |
+
|
| 498 |
+
@pytest.mark.filterwarnings("ignore:.*maximum_sctype.*:DeprecationWarning")
|
| 499 |
+
class TestMaximumSctype:
|
| 500 |
+
|
| 501 |
+
# note that parametrizing with sctype['int'] and similar would skip types
|
| 502 |
+
# with the same size (gh-11923)
|
| 503 |
+
|
| 504 |
+
@pytest.mark.parametrize(
|
| 505 |
+
't', [np.byte, np.short, np.intc, np.long, np.longlong]
|
| 506 |
+
)
|
| 507 |
+
def test_int(self, t):
|
| 508 |
+
assert_equal(maximum_sctype(t), np._core.sctypes['int'][-1])
|
| 509 |
+
|
| 510 |
+
@pytest.mark.parametrize(
|
| 511 |
+
't', [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong]
|
| 512 |
+
)
|
| 513 |
+
def test_uint(self, t):
|
| 514 |
+
assert_equal(maximum_sctype(t), np._core.sctypes['uint'][-1])
|
| 515 |
+
|
| 516 |
+
@pytest.mark.parametrize('t', [np.half, np.single, np.double, np.longdouble])
|
| 517 |
+
def test_float(self, t):
|
| 518 |
+
assert_equal(maximum_sctype(t), np._core.sctypes['float'][-1])
|
| 519 |
+
|
| 520 |
+
@pytest.mark.parametrize('t', [np.csingle, np.cdouble, np.clongdouble])
|
| 521 |
+
def test_complex(self, t):
|
| 522 |
+
assert_equal(maximum_sctype(t), np._core.sctypes['complex'][-1])
|
| 523 |
+
|
| 524 |
+
@pytest.mark.parametrize('t', [np.bool, np.object_, np.str_, np.bytes_,
|
| 525 |
+
np.void])
|
| 526 |
+
def test_other(self, t):
|
| 527 |
+
assert_equal(maximum_sctype(t), t)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
class Test_sctype2char:
|
| 531 |
+
# This function is old enough that we're really just documenting the quirks
|
| 532 |
+
# at this point.
|
| 533 |
+
|
| 534 |
+
def test_scalar_type(self):
|
| 535 |
+
assert_equal(sctype2char(np.double), 'd')
|
| 536 |
+
assert_equal(sctype2char(np.long), 'l')
|
| 537 |
+
assert_equal(sctype2char(np.int_), np.array(0).dtype.char)
|
| 538 |
+
assert_equal(sctype2char(np.str_), 'U')
|
| 539 |
+
assert_equal(sctype2char(np.bytes_), 'S')
|
| 540 |
+
|
| 541 |
+
def test_other_type(self):
|
| 542 |
+
assert_equal(sctype2char(float), 'd')
|
| 543 |
+
assert_equal(sctype2char(list), 'O')
|
| 544 |
+
assert_equal(sctype2char(np.ndarray), 'O')
|
| 545 |
+
|
| 546 |
+
def test_third_party_scalar_type(self):
|
| 547 |
+
from numpy._core._rational_tests import rational
|
| 548 |
+
assert_raises(KeyError, sctype2char, rational)
|
| 549 |
+
assert_raises(KeyError, sctype2char, rational(1))
|
| 550 |
+
|
| 551 |
+
def test_array_instance(self):
|
| 552 |
+
assert_equal(sctype2char(np.array([1.0, 2.0])), 'd')
|
| 553 |
+
|
| 554 |
+
def test_abstract_type(self):
|
| 555 |
+
assert_raises(KeyError, sctype2char, np.floating)
|
| 556 |
+
|
| 557 |
+
def test_non_type(self):
|
| 558 |
+
assert_raises(ValueError, sctype2char, 1)
|
| 559 |
+
|
| 560 |
+
@pytest.mark.parametrize("rep, expected", [
|
| 561 |
+
(np.int32, True),
|
| 562 |
+
(list, False),
|
| 563 |
+
(1.1, False),
|
| 564 |
+
(str, True),
|
| 565 |
+
(np.dtype(np.float64), True),
|
| 566 |
+
(np.dtype((np.int16, (3, 4))), True),
|
| 567 |
+
(np.dtype([('a', np.int8)]), True),
|
| 568 |
+
])
|
| 569 |
+
def test_issctype(rep, expected):
|
| 570 |
+
# ensure proper identification of scalar
|
| 571 |
+
# data-types by issctype()
|
| 572 |
+
actual = issctype(rep)
|
| 573 |
+
assert type(actual) is bool
|
| 574 |
+
assert_equal(actual, expected)
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
@pytest.mark.skipif(sys.flags.optimize > 1,
|
| 578 |
+
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
|
| 579 |
+
@pytest.mark.xfail(IS_PYPY,
|
| 580 |
+
reason="PyPy cannot modify tp_doc after PyType_Ready")
|
| 581 |
+
class TestDocStrings:
|
| 582 |
+
def test_platform_dependent_aliases(self):
|
| 583 |
+
if np.int64 is np.int_:
|
| 584 |
+
assert_('int64' in np.int_.__doc__)
|
| 585 |
+
elif np.int64 is np.longlong:
|
| 586 |
+
assert_('int64' in np.longlong.__doc__)
|
| 587 |
+
|
| 588 |
+
|
| 589 |
+
class TestScalarTypeNames:
|
| 590 |
+
# gh-9799
|
| 591 |
+
|
| 592 |
+
numeric_types = [
|
| 593 |
+
np.byte, np.short, np.intc, np.long, np.longlong,
|
| 594 |
+
np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong,
|
| 595 |
+
np.half, np.single, np.double, np.longdouble,
|
| 596 |
+
np.csingle, np.cdouble, np.clongdouble,
|
| 597 |
+
]
|
| 598 |
+
|
| 599 |
+
def test_names_are_unique(self):
|
| 600 |
+
# none of the above may be aliases for each other
|
| 601 |
+
assert len(set(self.numeric_types)) == len(self.numeric_types)
|
| 602 |
+
|
| 603 |
+
# names must be unique
|
| 604 |
+
names = [t.__name__ for t in self.numeric_types]
|
| 605 |
+
assert len(set(names)) == len(names)
|
| 606 |
+
|
| 607 |
+
@pytest.mark.parametrize('t', numeric_types)
|
| 608 |
+
def test_names_reflect_attributes(self, t):
|
| 609 |
+
""" Test that names correspond to where the type is under ``np.`` """
|
| 610 |
+
assert getattr(np, t.__name__) is t
|
| 611 |
+
|
| 612 |
+
@pytest.mark.parametrize('t', numeric_types)
|
| 613 |
+
def test_names_are_undersood_by_dtype(self, t):
|
| 614 |
+
""" Test the dtype constructor maps names back to the type """
|
| 615 |
+
assert np.dtype(t.__name__).type is t
|
| 616 |
+
|
| 617 |
+
|
| 618 |
+
class TestBoolDefinition:
|
| 619 |
+
def test_bool_definition(self):
|
| 620 |
+
assert nt.bool is np.bool
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_overrides.py
ADDED
|
@@ -0,0 +1,797 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import sys
|
| 3 |
+
import os
|
| 4 |
+
import tempfile
|
| 5 |
+
from io import StringIO
|
| 6 |
+
from unittest import mock
|
| 7 |
+
import pickle
|
| 8 |
+
|
| 9 |
+
import pytest
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
from numpy.testing import (
|
| 13 |
+
assert_, assert_equal, assert_raises, assert_raises_regex)
|
| 14 |
+
from numpy.testing.overrides import get_overridable_numpy_array_functions
|
| 15 |
+
from numpy._core.overrides import (
|
| 16 |
+
_get_implementing_args, array_function_dispatch,
|
| 17 |
+
verify_matching_signatures)
|
| 18 |
+
|
| 19 |
+
def _return_not_implemented(self, *args, **kwargs):
|
| 20 |
+
return NotImplemented
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# need to define this at the top level to test pickling
|
| 24 |
+
@array_function_dispatch(lambda array: (array,))
|
| 25 |
+
def dispatched_one_arg(array):
|
| 26 |
+
"""Docstring."""
|
| 27 |
+
return 'original'
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@array_function_dispatch(lambda array1, array2: (array1, array2))
|
| 31 |
+
def dispatched_two_arg(array1, array2):
|
| 32 |
+
"""Docstring."""
|
| 33 |
+
return 'original'
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TestGetImplementingArgs:
|
| 37 |
+
|
| 38 |
+
def test_ndarray(self):
|
| 39 |
+
array = np.array(1)
|
| 40 |
+
|
| 41 |
+
args = _get_implementing_args([array])
|
| 42 |
+
assert_equal(list(args), [array])
|
| 43 |
+
|
| 44 |
+
args = _get_implementing_args([array, array])
|
| 45 |
+
assert_equal(list(args), [array])
|
| 46 |
+
|
| 47 |
+
args = _get_implementing_args([array, 1])
|
| 48 |
+
assert_equal(list(args), [array])
|
| 49 |
+
|
| 50 |
+
args = _get_implementing_args([1, array])
|
| 51 |
+
assert_equal(list(args), [array])
|
| 52 |
+
|
| 53 |
+
def test_ndarray_subclasses(self):
|
| 54 |
+
|
| 55 |
+
class OverrideSub(np.ndarray):
|
| 56 |
+
__array_function__ = _return_not_implemented
|
| 57 |
+
|
| 58 |
+
class NoOverrideSub(np.ndarray):
|
| 59 |
+
pass
|
| 60 |
+
|
| 61 |
+
array = np.array(1).view(np.ndarray)
|
| 62 |
+
override_sub = np.array(1).view(OverrideSub)
|
| 63 |
+
no_override_sub = np.array(1).view(NoOverrideSub)
|
| 64 |
+
|
| 65 |
+
args = _get_implementing_args([array, override_sub])
|
| 66 |
+
assert_equal(list(args), [override_sub, array])
|
| 67 |
+
|
| 68 |
+
args = _get_implementing_args([array, no_override_sub])
|
| 69 |
+
assert_equal(list(args), [no_override_sub, array])
|
| 70 |
+
|
| 71 |
+
args = _get_implementing_args(
|
| 72 |
+
[override_sub, no_override_sub])
|
| 73 |
+
assert_equal(list(args), [override_sub, no_override_sub])
|
| 74 |
+
|
| 75 |
+
def test_ndarray_and_duck_array(self):
|
| 76 |
+
|
| 77 |
+
class Other:
|
| 78 |
+
__array_function__ = _return_not_implemented
|
| 79 |
+
|
| 80 |
+
array = np.array(1)
|
| 81 |
+
other = Other()
|
| 82 |
+
|
| 83 |
+
args = _get_implementing_args([other, array])
|
| 84 |
+
assert_equal(list(args), [other, array])
|
| 85 |
+
|
| 86 |
+
args = _get_implementing_args([array, other])
|
| 87 |
+
assert_equal(list(args), [array, other])
|
| 88 |
+
|
| 89 |
+
def test_ndarray_subclass_and_duck_array(self):
|
| 90 |
+
|
| 91 |
+
class OverrideSub(np.ndarray):
|
| 92 |
+
__array_function__ = _return_not_implemented
|
| 93 |
+
|
| 94 |
+
class Other:
|
| 95 |
+
__array_function__ = _return_not_implemented
|
| 96 |
+
|
| 97 |
+
array = np.array(1)
|
| 98 |
+
subarray = np.array(1).view(OverrideSub)
|
| 99 |
+
other = Other()
|
| 100 |
+
|
| 101 |
+
assert_equal(_get_implementing_args([array, subarray, other]),
|
| 102 |
+
[subarray, array, other])
|
| 103 |
+
assert_equal(_get_implementing_args([array, other, subarray]),
|
| 104 |
+
[subarray, array, other])
|
| 105 |
+
|
| 106 |
+
def test_many_duck_arrays(self):
|
| 107 |
+
|
| 108 |
+
class A:
|
| 109 |
+
__array_function__ = _return_not_implemented
|
| 110 |
+
|
| 111 |
+
class B(A):
|
| 112 |
+
__array_function__ = _return_not_implemented
|
| 113 |
+
|
| 114 |
+
class C(A):
|
| 115 |
+
__array_function__ = _return_not_implemented
|
| 116 |
+
|
| 117 |
+
class D:
|
| 118 |
+
__array_function__ = _return_not_implemented
|
| 119 |
+
|
| 120 |
+
a = A()
|
| 121 |
+
b = B()
|
| 122 |
+
c = C()
|
| 123 |
+
d = D()
|
| 124 |
+
|
| 125 |
+
assert_equal(_get_implementing_args([1]), [])
|
| 126 |
+
assert_equal(_get_implementing_args([a]), [a])
|
| 127 |
+
assert_equal(_get_implementing_args([a, 1]), [a])
|
| 128 |
+
assert_equal(_get_implementing_args([a, a, a]), [a])
|
| 129 |
+
assert_equal(_get_implementing_args([a, d, a]), [a, d])
|
| 130 |
+
assert_equal(_get_implementing_args([a, b]), [b, a])
|
| 131 |
+
assert_equal(_get_implementing_args([b, a]), [b, a])
|
| 132 |
+
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
|
| 133 |
+
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
|
| 134 |
+
|
| 135 |
+
def test_too_many_duck_arrays(self):
|
| 136 |
+
namespace = dict(__array_function__=_return_not_implemented)
|
| 137 |
+
types = [type('A' + str(i), (object,), namespace) for i in range(65)]
|
| 138 |
+
relevant_args = [t() for t in types]
|
| 139 |
+
|
| 140 |
+
actual = _get_implementing_args(relevant_args[:64])
|
| 141 |
+
assert_equal(actual, relevant_args[:64])
|
| 142 |
+
|
| 143 |
+
with assert_raises_regex(TypeError, 'distinct argument types'):
|
| 144 |
+
_get_implementing_args(relevant_args)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class TestNDArrayArrayFunction:
|
| 148 |
+
|
| 149 |
+
def test_method(self):
|
| 150 |
+
|
| 151 |
+
class Other:
|
| 152 |
+
__array_function__ = _return_not_implemented
|
| 153 |
+
|
| 154 |
+
class NoOverrideSub(np.ndarray):
|
| 155 |
+
pass
|
| 156 |
+
|
| 157 |
+
class OverrideSub(np.ndarray):
|
| 158 |
+
__array_function__ = _return_not_implemented
|
| 159 |
+
|
| 160 |
+
array = np.array([1])
|
| 161 |
+
other = Other()
|
| 162 |
+
no_override_sub = array.view(NoOverrideSub)
|
| 163 |
+
override_sub = array.view(OverrideSub)
|
| 164 |
+
|
| 165 |
+
result = array.__array_function__(func=dispatched_two_arg,
|
| 166 |
+
types=(np.ndarray,),
|
| 167 |
+
args=(array, 1.), kwargs={})
|
| 168 |
+
assert_equal(result, 'original')
|
| 169 |
+
|
| 170 |
+
result = array.__array_function__(func=dispatched_two_arg,
|
| 171 |
+
types=(np.ndarray, Other),
|
| 172 |
+
args=(array, other), kwargs={})
|
| 173 |
+
assert_(result is NotImplemented)
|
| 174 |
+
|
| 175 |
+
result = array.__array_function__(func=dispatched_two_arg,
|
| 176 |
+
types=(np.ndarray, NoOverrideSub),
|
| 177 |
+
args=(array, no_override_sub),
|
| 178 |
+
kwargs={})
|
| 179 |
+
assert_equal(result, 'original')
|
| 180 |
+
|
| 181 |
+
result = array.__array_function__(func=dispatched_two_arg,
|
| 182 |
+
types=(np.ndarray, OverrideSub),
|
| 183 |
+
args=(array, override_sub),
|
| 184 |
+
kwargs={})
|
| 185 |
+
assert_equal(result, 'original')
|
| 186 |
+
|
| 187 |
+
with assert_raises_regex(TypeError, 'no implementation found'):
|
| 188 |
+
np.concatenate((array, other))
|
| 189 |
+
|
| 190 |
+
expected = np.concatenate((array, array))
|
| 191 |
+
result = np.concatenate((array, no_override_sub))
|
| 192 |
+
assert_equal(result, expected.view(NoOverrideSub))
|
| 193 |
+
result = np.concatenate((array, override_sub))
|
| 194 |
+
assert_equal(result, expected.view(OverrideSub))
|
| 195 |
+
|
| 196 |
+
def test_no_wrapper(self):
|
| 197 |
+
# Regular numpy functions have wrappers, but do not presume
|
| 198 |
+
# all functions do (array creation ones do not): check that
|
| 199 |
+
# we just call the function in that case.
|
| 200 |
+
array = np.array(1)
|
| 201 |
+
func = lambda x: x * 2
|
| 202 |
+
result = array.__array_function__(func=func, types=(np.ndarray,),
|
| 203 |
+
args=(array,), kwargs={})
|
| 204 |
+
assert_equal(result, array * 2)
|
| 205 |
+
|
| 206 |
+
def test_wrong_arguments(self):
|
| 207 |
+
# Check our implementation guards against wrong arguments.
|
| 208 |
+
a = np.array([1, 2])
|
| 209 |
+
with pytest.raises(TypeError, match="args must be a tuple"):
|
| 210 |
+
a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1))
|
| 211 |
+
with pytest.raises(TypeError, match="kwargs must be a dict"):
|
| 212 |
+
a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1))
|
| 213 |
+
|
| 214 |
+
def test_wrong_arguments(self):
|
| 215 |
+
# Check our implementation guards against wrong arguments.
|
| 216 |
+
a = np.array([1, 2])
|
| 217 |
+
with pytest.raises(TypeError, match="args must be a tuple"):
|
| 218 |
+
a.__array_function__(np.reshape, (np.ndarray,), a, (2, 1))
|
| 219 |
+
with pytest.raises(TypeError, match="kwargs must be a dict"):
|
| 220 |
+
a.__array_function__(np.reshape, (np.ndarray,), (a,), (2, 1))
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
class TestArrayFunctionDispatch:
|
| 224 |
+
|
| 225 |
+
def test_pickle(self):
|
| 226 |
+
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
|
| 227 |
+
roundtripped = pickle.loads(
|
| 228 |
+
pickle.dumps(dispatched_one_arg, protocol=proto))
|
| 229 |
+
assert_(roundtripped is dispatched_one_arg)
|
| 230 |
+
|
| 231 |
+
def test_name_and_docstring(self):
|
| 232 |
+
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
|
| 233 |
+
if sys.flags.optimize < 2:
|
| 234 |
+
assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
|
| 235 |
+
|
| 236 |
+
def test_interface(self):
|
| 237 |
+
|
| 238 |
+
class MyArray:
|
| 239 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 240 |
+
return (self, func, types, args, kwargs)
|
| 241 |
+
|
| 242 |
+
original = MyArray()
|
| 243 |
+
(obj, func, types, args, kwargs) = dispatched_one_arg(original)
|
| 244 |
+
assert_(obj is original)
|
| 245 |
+
assert_(func is dispatched_one_arg)
|
| 246 |
+
assert_equal(set(types), {MyArray})
|
| 247 |
+
# assert_equal uses the overloaded np.iscomplexobj() internally
|
| 248 |
+
assert_(args == (original,))
|
| 249 |
+
assert_equal(kwargs, {})
|
| 250 |
+
|
| 251 |
+
def test_not_implemented(self):
|
| 252 |
+
|
| 253 |
+
class MyArray:
|
| 254 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 255 |
+
return NotImplemented
|
| 256 |
+
|
| 257 |
+
array = MyArray()
|
| 258 |
+
with assert_raises_regex(TypeError, 'no implementation found'):
|
| 259 |
+
dispatched_one_arg(array)
|
| 260 |
+
|
| 261 |
+
def test_where_dispatch(self):
|
| 262 |
+
|
| 263 |
+
class DuckArray:
|
| 264 |
+
def __array_function__(self, ufunc, method, *inputs, **kwargs):
|
| 265 |
+
return "overridden"
|
| 266 |
+
|
| 267 |
+
array = np.array(1)
|
| 268 |
+
duck_array = DuckArray()
|
| 269 |
+
|
| 270 |
+
result = np.std(array, where=duck_array)
|
| 271 |
+
|
| 272 |
+
assert_equal(result, "overridden")
|
| 273 |
+
|
| 274 |
+
|
| 275 |
+
class TestVerifyMatchingSignatures:
|
| 276 |
+
|
| 277 |
+
def test_verify_matching_signatures(self):
|
| 278 |
+
|
| 279 |
+
verify_matching_signatures(lambda x: 0, lambda x: 0)
|
| 280 |
+
verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
|
| 281 |
+
verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
|
| 282 |
+
|
| 283 |
+
with assert_raises(RuntimeError):
|
| 284 |
+
verify_matching_signatures(lambda a: 0, lambda b: 0)
|
| 285 |
+
with assert_raises(RuntimeError):
|
| 286 |
+
verify_matching_signatures(lambda x: 0, lambda x=None: 0)
|
| 287 |
+
with assert_raises(RuntimeError):
|
| 288 |
+
verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
|
| 289 |
+
with assert_raises(RuntimeError):
|
| 290 |
+
verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
|
| 291 |
+
|
| 292 |
+
def test_array_function_dispatch(self):
|
| 293 |
+
|
| 294 |
+
with assert_raises(RuntimeError):
|
| 295 |
+
@array_function_dispatch(lambda x: (x,))
|
| 296 |
+
def f(y):
|
| 297 |
+
pass
|
| 298 |
+
|
| 299 |
+
# should not raise
|
| 300 |
+
@array_function_dispatch(lambda x: (x,), verify=False)
|
| 301 |
+
def f(y):
|
| 302 |
+
pass
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
def _new_duck_type_and_implements():
|
| 306 |
+
"""Create a duck array type and implements functions."""
|
| 307 |
+
HANDLED_FUNCTIONS = {}
|
| 308 |
+
|
| 309 |
+
class MyArray:
|
| 310 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 311 |
+
if func not in HANDLED_FUNCTIONS:
|
| 312 |
+
return NotImplemented
|
| 313 |
+
if not all(issubclass(t, MyArray) for t in types):
|
| 314 |
+
return NotImplemented
|
| 315 |
+
return HANDLED_FUNCTIONS[func](*args, **kwargs)
|
| 316 |
+
|
| 317 |
+
def implements(numpy_function):
|
| 318 |
+
"""Register an __array_function__ implementations."""
|
| 319 |
+
def decorator(func):
|
| 320 |
+
HANDLED_FUNCTIONS[numpy_function] = func
|
| 321 |
+
return func
|
| 322 |
+
return decorator
|
| 323 |
+
|
| 324 |
+
return (MyArray, implements)
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
class TestArrayFunctionImplementation:
|
| 328 |
+
|
| 329 |
+
def test_one_arg(self):
|
| 330 |
+
MyArray, implements = _new_duck_type_and_implements()
|
| 331 |
+
|
| 332 |
+
@implements(dispatched_one_arg)
|
| 333 |
+
def _(array):
|
| 334 |
+
return 'myarray'
|
| 335 |
+
|
| 336 |
+
assert_equal(dispatched_one_arg(1), 'original')
|
| 337 |
+
assert_equal(dispatched_one_arg(MyArray()), 'myarray')
|
| 338 |
+
|
| 339 |
+
def test_optional_args(self):
|
| 340 |
+
MyArray, implements = _new_duck_type_and_implements()
|
| 341 |
+
|
| 342 |
+
@array_function_dispatch(lambda array, option=None: (array,))
|
| 343 |
+
def func_with_option(array, option='default'):
|
| 344 |
+
return option
|
| 345 |
+
|
| 346 |
+
@implements(func_with_option)
|
| 347 |
+
def my_array_func_with_option(array, new_option='myarray'):
|
| 348 |
+
return new_option
|
| 349 |
+
|
| 350 |
+
# we don't need to implement every option on __array_function__
|
| 351 |
+
# implementations
|
| 352 |
+
assert_equal(func_with_option(1), 'default')
|
| 353 |
+
assert_equal(func_with_option(1, option='extra'), 'extra')
|
| 354 |
+
assert_equal(func_with_option(MyArray()), 'myarray')
|
| 355 |
+
with assert_raises(TypeError):
|
| 356 |
+
func_with_option(MyArray(), option='extra')
|
| 357 |
+
|
| 358 |
+
# but new options on implementations can't be used
|
| 359 |
+
result = my_array_func_with_option(MyArray(), new_option='yes')
|
| 360 |
+
assert_equal(result, 'yes')
|
| 361 |
+
with assert_raises(TypeError):
|
| 362 |
+
func_with_option(MyArray(), new_option='no')
|
| 363 |
+
|
| 364 |
+
def test_not_implemented(self):
|
| 365 |
+
MyArray, implements = _new_duck_type_and_implements()
|
| 366 |
+
|
| 367 |
+
@array_function_dispatch(lambda array: (array,), module='my')
|
| 368 |
+
def func(array):
|
| 369 |
+
return array
|
| 370 |
+
|
| 371 |
+
array = np.array(1)
|
| 372 |
+
assert_(func(array) is array)
|
| 373 |
+
assert_equal(func.__module__, 'my')
|
| 374 |
+
|
| 375 |
+
with assert_raises_regex(
|
| 376 |
+
TypeError, "no implementation found for 'my.func'"):
|
| 377 |
+
func(MyArray())
|
| 378 |
+
|
| 379 |
+
@pytest.mark.parametrize("name", ["concatenate", "mean", "asarray"])
|
| 380 |
+
def test_signature_error_message_simple(self, name):
|
| 381 |
+
func = getattr(np, name)
|
| 382 |
+
try:
|
| 383 |
+
# all of these functions need an argument:
|
| 384 |
+
func()
|
| 385 |
+
except TypeError as e:
|
| 386 |
+
exc = e
|
| 387 |
+
|
| 388 |
+
assert exc.args[0].startswith(f"{name}()")
|
| 389 |
+
|
| 390 |
+
def test_signature_error_message(self):
|
| 391 |
+
# The lambda function will be named "<lambda>", but the TypeError
|
| 392 |
+
# should show the name as "func"
|
| 393 |
+
def _dispatcher():
|
| 394 |
+
return ()
|
| 395 |
+
|
| 396 |
+
@array_function_dispatch(_dispatcher)
|
| 397 |
+
def func():
|
| 398 |
+
pass
|
| 399 |
+
|
| 400 |
+
try:
|
| 401 |
+
func._implementation(bad_arg=3)
|
| 402 |
+
except TypeError as e:
|
| 403 |
+
expected_exception = e
|
| 404 |
+
|
| 405 |
+
try:
|
| 406 |
+
func(bad_arg=3)
|
| 407 |
+
raise AssertionError("must fail")
|
| 408 |
+
except TypeError as exc:
|
| 409 |
+
if exc.args[0].startswith("_dispatcher"):
|
| 410 |
+
# We replace the qualname currently, but it used `__name__`
|
| 411 |
+
# (relevant functions have the same name and qualname anyway)
|
| 412 |
+
pytest.skip("Python version is not using __qualname__ for "
|
| 413 |
+
"TypeError formatting.")
|
| 414 |
+
|
| 415 |
+
assert exc.args == expected_exception.args
|
| 416 |
+
|
| 417 |
+
@pytest.mark.parametrize("value", [234, "this func is not replaced"])
|
| 418 |
+
def test_dispatcher_error(self, value):
|
| 419 |
+
# If the dispatcher raises an error, we must not attempt to mutate it
|
| 420 |
+
error = TypeError(value)
|
| 421 |
+
|
| 422 |
+
def dispatcher():
|
| 423 |
+
raise error
|
| 424 |
+
|
| 425 |
+
@array_function_dispatch(dispatcher)
|
| 426 |
+
def func():
|
| 427 |
+
return 3
|
| 428 |
+
|
| 429 |
+
try:
|
| 430 |
+
func()
|
| 431 |
+
raise AssertionError("must fail")
|
| 432 |
+
except TypeError as exc:
|
| 433 |
+
assert exc is error # unmodified exception
|
| 434 |
+
|
| 435 |
+
def test_properties(self):
|
| 436 |
+
# Check that str and repr are sensible
|
| 437 |
+
func = dispatched_two_arg
|
| 438 |
+
assert str(func) == str(func._implementation)
|
| 439 |
+
repr_no_id = repr(func).split("at ")[0]
|
| 440 |
+
repr_no_id_impl = repr(func._implementation).split("at ")[0]
|
| 441 |
+
assert repr_no_id == repr_no_id_impl
|
| 442 |
+
|
| 443 |
+
@pytest.mark.parametrize("func", [
|
| 444 |
+
lambda x, y: 0, # no like argument
|
| 445 |
+
lambda like=None: 0, # not keyword only
|
| 446 |
+
lambda *, like=None, a=3: 0, # not last (not that it matters)
|
| 447 |
+
])
|
| 448 |
+
def test_bad_like_sig(self, func):
|
| 449 |
+
# We sanity check the signature, and these should fail.
|
| 450 |
+
with pytest.raises(RuntimeError):
|
| 451 |
+
array_function_dispatch()(func)
|
| 452 |
+
|
| 453 |
+
def test_bad_like_passing(self):
|
| 454 |
+
# Cover internal sanity check for passing like as first positional arg
|
| 455 |
+
def func(*, like=None):
|
| 456 |
+
pass
|
| 457 |
+
|
| 458 |
+
func_with_like = array_function_dispatch()(func)
|
| 459 |
+
with pytest.raises(TypeError):
|
| 460 |
+
func_with_like()
|
| 461 |
+
with pytest.raises(TypeError):
|
| 462 |
+
func_with_like(like=234)
|
| 463 |
+
|
| 464 |
+
def test_too_many_args(self):
|
| 465 |
+
# Mainly a unit-test to increase coverage
|
| 466 |
+
objs = []
|
| 467 |
+
for i in range(80):
|
| 468 |
+
class MyArr:
|
| 469 |
+
def __array_function__(self, *args, **kwargs):
|
| 470 |
+
return NotImplemented
|
| 471 |
+
|
| 472 |
+
objs.append(MyArr())
|
| 473 |
+
|
| 474 |
+
def _dispatch(*args):
|
| 475 |
+
return args
|
| 476 |
+
|
| 477 |
+
@array_function_dispatch(_dispatch)
|
| 478 |
+
def func(*args):
|
| 479 |
+
pass
|
| 480 |
+
|
| 481 |
+
with pytest.raises(TypeError, match="maximum number"):
|
| 482 |
+
func(*objs)
|
| 483 |
+
|
| 484 |
+
|
| 485 |
+
|
| 486 |
+
class TestNDArrayMethods:
|
| 487 |
+
|
| 488 |
+
def test_repr(self):
|
| 489 |
+
# gh-12162: should still be defined even if __array_function__ doesn't
|
| 490 |
+
# implement np.array_repr()
|
| 491 |
+
|
| 492 |
+
class MyArray(np.ndarray):
|
| 493 |
+
def __array_function__(*args, **kwargs):
|
| 494 |
+
return NotImplemented
|
| 495 |
+
|
| 496 |
+
array = np.array(1).view(MyArray)
|
| 497 |
+
assert_equal(repr(array), 'MyArray(1)')
|
| 498 |
+
assert_equal(str(array), '1')
|
| 499 |
+
|
| 500 |
+
|
| 501 |
+
class TestNumPyFunctions:
|
| 502 |
+
|
| 503 |
+
def test_set_module(self):
|
| 504 |
+
assert_equal(np.sum.__module__, 'numpy')
|
| 505 |
+
assert_equal(np.char.equal.__module__, 'numpy.char')
|
| 506 |
+
assert_equal(np.fft.fft.__module__, 'numpy.fft')
|
| 507 |
+
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
|
| 508 |
+
|
| 509 |
+
def test_inspect_sum(self):
|
| 510 |
+
signature = inspect.signature(np.sum)
|
| 511 |
+
assert_('axis' in signature.parameters)
|
| 512 |
+
|
| 513 |
+
def test_override_sum(self):
|
| 514 |
+
MyArray, implements = _new_duck_type_and_implements()
|
| 515 |
+
|
| 516 |
+
@implements(np.sum)
|
| 517 |
+
def _(array):
|
| 518 |
+
return 'yes'
|
| 519 |
+
|
| 520 |
+
assert_equal(np.sum(MyArray()), 'yes')
|
| 521 |
+
|
| 522 |
+
def test_sum_on_mock_array(self):
|
| 523 |
+
|
| 524 |
+
# We need a proxy for mocks because __array_function__ is only looked
|
| 525 |
+
# up in the class dict
|
| 526 |
+
class ArrayProxy:
|
| 527 |
+
def __init__(self, value):
|
| 528 |
+
self.value = value
|
| 529 |
+
def __array_function__(self, *args, **kwargs):
|
| 530 |
+
return self.value.__array_function__(*args, **kwargs)
|
| 531 |
+
def __array__(self, *args, **kwargs):
|
| 532 |
+
return self.value.__array__(*args, **kwargs)
|
| 533 |
+
|
| 534 |
+
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
|
| 535 |
+
proxy.value.__array_function__.return_value = 1
|
| 536 |
+
result = np.sum(proxy)
|
| 537 |
+
assert_equal(result, 1)
|
| 538 |
+
proxy.value.__array_function__.assert_called_once_with(
|
| 539 |
+
np.sum, (ArrayProxy,), (proxy,), {})
|
| 540 |
+
proxy.value.__array__.assert_not_called()
|
| 541 |
+
|
| 542 |
+
def test_sum_forwarding_implementation(self):
|
| 543 |
+
|
| 544 |
+
class MyArray(np.ndarray):
|
| 545 |
+
|
| 546 |
+
def sum(self, axis, out):
|
| 547 |
+
return 'summed'
|
| 548 |
+
|
| 549 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 550 |
+
return super().__array_function__(func, types, args, kwargs)
|
| 551 |
+
|
| 552 |
+
# note: the internal implementation of np.sum() calls the .sum() method
|
| 553 |
+
array = np.array(1).view(MyArray)
|
| 554 |
+
assert_equal(np.sum(array), 'summed')
|
| 555 |
+
|
| 556 |
+
|
| 557 |
+
class TestArrayLike:
|
| 558 |
+
def setup_method(self):
|
| 559 |
+
class MyArray:
|
| 560 |
+
def __init__(self, function=None):
|
| 561 |
+
self.function = function
|
| 562 |
+
|
| 563 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 564 |
+
assert func is getattr(np, func.__name__)
|
| 565 |
+
try:
|
| 566 |
+
my_func = getattr(self, func.__name__)
|
| 567 |
+
except AttributeError:
|
| 568 |
+
return NotImplemented
|
| 569 |
+
return my_func(*args, **kwargs)
|
| 570 |
+
|
| 571 |
+
self.MyArray = MyArray
|
| 572 |
+
|
| 573 |
+
class MyNoArrayFunctionArray:
|
| 574 |
+
def __init__(self, function=None):
|
| 575 |
+
self.function = function
|
| 576 |
+
|
| 577 |
+
self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
|
| 578 |
+
|
| 579 |
+
class MySubclass(np.ndarray):
|
| 580 |
+
def __array_function__(self, func, types, args, kwargs):
|
| 581 |
+
result = super().__array_function__(func, types, args, kwargs)
|
| 582 |
+
return result.view(self.__class__)
|
| 583 |
+
|
| 584 |
+
self.MySubclass = MySubclass
|
| 585 |
+
|
| 586 |
+
def add_method(self, name, arr_class, enable_value_error=False):
|
| 587 |
+
def _definition(*args, **kwargs):
|
| 588 |
+
# Check that `like=` isn't propagated downstream
|
| 589 |
+
assert 'like' not in kwargs
|
| 590 |
+
|
| 591 |
+
if enable_value_error and 'value_error' in kwargs:
|
| 592 |
+
raise ValueError
|
| 593 |
+
|
| 594 |
+
return arr_class(getattr(arr_class, name))
|
| 595 |
+
setattr(arr_class, name, _definition)
|
| 596 |
+
|
| 597 |
+
def func_args(*args, **kwargs):
|
| 598 |
+
return args, kwargs
|
| 599 |
+
|
| 600 |
+
def test_array_like_not_implemented(self):
|
| 601 |
+
self.add_method('array', self.MyArray)
|
| 602 |
+
|
| 603 |
+
ref = self.MyArray.array()
|
| 604 |
+
|
| 605 |
+
with assert_raises_regex(TypeError, 'no implementation found'):
|
| 606 |
+
array_like = np.asarray(1, like=ref)
|
| 607 |
+
|
| 608 |
+
_array_tests = [
|
| 609 |
+
('array', *func_args((1,))),
|
| 610 |
+
('asarray', *func_args((1,))),
|
| 611 |
+
('asanyarray', *func_args((1,))),
|
| 612 |
+
('ascontiguousarray', *func_args((2, 3))),
|
| 613 |
+
('asfortranarray', *func_args((2, 3))),
|
| 614 |
+
('require', *func_args((np.arange(6).reshape(2, 3),),
|
| 615 |
+
requirements=['A', 'F'])),
|
| 616 |
+
('empty', *func_args((1,))),
|
| 617 |
+
('full', *func_args((1,), 2)),
|
| 618 |
+
('ones', *func_args((1,))),
|
| 619 |
+
('zeros', *func_args((1,))),
|
| 620 |
+
('arange', *func_args(3)),
|
| 621 |
+
('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
|
| 622 |
+
('fromiter', *func_args(range(3), dtype=int)),
|
| 623 |
+
('fromstring', *func_args('1,2', dtype=int, sep=',')),
|
| 624 |
+
('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
|
| 625 |
+
('genfromtxt', *func_args(lambda: StringIO('1,2.1'),
|
| 626 |
+
dtype=[('int', 'i8'), ('float', 'f8')],
|
| 627 |
+
delimiter=',')),
|
| 628 |
+
]
|
| 629 |
+
|
| 630 |
+
|
| 631 |
+
def test_nep35_functions_as_array_functions(self,):
|
| 632 |
+
all_array_functions = get_overridable_numpy_array_functions()
|
| 633 |
+
like_array_functions_subset = {
|
| 634 |
+
getattr(np, func_name) for func_name, *_ in self.__class__._array_tests
|
| 635 |
+
}
|
| 636 |
+
assert like_array_functions_subset.issubset(all_array_functions)
|
| 637 |
+
|
| 638 |
+
nep35_python_functions = {
|
| 639 |
+
np.eye, np.fromfunction, np.full, np.genfromtxt,
|
| 640 |
+
np.identity, np.loadtxt, np.ones, np.require, np.tri,
|
| 641 |
+
}
|
| 642 |
+
assert nep35_python_functions.issubset(all_array_functions)
|
| 643 |
+
|
| 644 |
+
nep35_C_functions = {
|
| 645 |
+
np.arange, np.array, np.asanyarray, np.asarray,
|
| 646 |
+
np.ascontiguousarray, np.asfortranarray, np.empty,
|
| 647 |
+
np.frombuffer, np.fromfile, np.fromiter, np.fromstring,
|
| 648 |
+
np.zeros,
|
| 649 |
+
}
|
| 650 |
+
assert nep35_C_functions.issubset(all_array_functions)
|
| 651 |
+
|
| 652 |
+
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
|
| 653 |
+
@pytest.mark.parametrize('numpy_ref', [True, False])
|
| 654 |
+
def test_array_like(self, function, args, kwargs, numpy_ref):
|
| 655 |
+
self.add_method('array', self.MyArray)
|
| 656 |
+
self.add_method(function, self.MyArray)
|
| 657 |
+
np_func = getattr(np, function)
|
| 658 |
+
my_func = getattr(self.MyArray, function)
|
| 659 |
+
|
| 660 |
+
if numpy_ref is True:
|
| 661 |
+
ref = np.array(1)
|
| 662 |
+
else:
|
| 663 |
+
ref = self.MyArray.array()
|
| 664 |
+
|
| 665 |
+
like_args = tuple(a() if callable(a) else a for a in args)
|
| 666 |
+
array_like = np_func(*like_args, **kwargs, like=ref)
|
| 667 |
+
|
| 668 |
+
if numpy_ref is True:
|
| 669 |
+
assert type(array_like) is np.ndarray
|
| 670 |
+
|
| 671 |
+
np_args = tuple(a() if callable(a) else a for a in args)
|
| 672 |
+
np_arr = np_func(*np_args, **kwargs)
|
| 673 |
+
|
| 674 |
+
# Special-case np.empty to ensure values match
|
| 675 |
+
if function == "empty":
|
| 676 |
+
np_arr.fill(1)
|
| 677 |
+
array_like.fill(1)
|
| 678 |
+
|
| 679 |
+
assert_equal(array_like, np_arr)
|
| 680 |
+
else:
|
| 681 |
+
assert type(array_like) is self.MyArray
|
| 682 |
+
assert array_like.function is my_func
|
| 683 |
+
|
| 684 |
+
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
|
| 685 |
+
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
|
| 686 |
+
def test_no_array_function_like(self, function, args, kwargs, ref):
|
| 687 |
+
self.add_method('array', self.MyNoArrayFunctionArray)
|
| 688 |
+
self.add_method(function, self.MyNoArrayFunctionArray)
|
| 689 |
+
np_func = getattr(np, function)
|
| 690 |
+
|
| 691 |
+
# Instantiate ref if it's the MyNoArrayFunctionArray class
|
| 692 |
+
if ref == "MyNoArrayFunctionArray":
|
| 693 |
+
ref = self.MyNoArrayFunctionArray.array()
|
| 694 |
+
|
| 695 |
+
like_args = tuple(a() if callable(a) else a for a in args)
|
| 696 |
+
|
| 697 |
+
with assert_raises_regex(TypeError,
|
| 698 |
+
'The `like` argument must be an array-like that implements'):
|
| 699 |
+
np_func(*like_args, **kwargs, like=ref)
|
| 700 |
+
|
| 701 |
+
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
|
| 702 |
+
def test_subclass(self, function, args, kwargs):
|
| 703 |
+
ref = np.array(1).view(self.MySubclass)
|
| 704 |
+
np_func = getattr(np, function)
|
| 705 |
+
like_args = tuple(a() if callable(a) else a for a in args)
|
| 706 |
+
array_like = np_func(*like_args, **kwargs, like=ref)
|
| 707 |
+
assert type(array_like) is self.MySubclass
|
| 708 |
+
if np_func is np.empty:
|
| 709 |
+
return
|
| 710 |
+
np_args = tuple(a() if callable(a) else a for a in args)
|
| 711 |
+
np_arr = np_func(*np_args, **kwargs)
|
| 712 |
+
assert_equal(array_like.view(np.ndarray), np_arr)
|
| 713 |
+
|
| 714 |
+
@pytest.mark.parametrize('numpy_ref', [True, False])
|
| 715 |
+
def test_array_like_fromfile(self, numpy_ref):
|
| 716 |
+
self.add_method('array', self.MyArray)
|
| 717 |
+
self.add_method("fromfile", self.MyArray)
|
| 718 |
+
|
| 719 |
+
if numpy_ref is True:
|
| 720 |
+
ref = np.array(1)
|
| 721 |
+
else:
|
| 722 |
+
ref = self.MyArray.array()
|
| 723 |
+
|
| 724 |
+
data = np.random.random(5)
|
| 725 |
+
|
| 726 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 727 |
+
fname = os.path.join(tmpdir, "testfile")
|
| 728 |
+
data.tofile(fname)
|
| 729 |
+
|
| 730 |
+
array_like = np.fromfile(fname, like=ref)
|
| 731 |
+
if numpy_ref is True:
|
| 732 |
+
assert type(array_like) is np.ndarray
|
| 733 |
+
np_res = np.fromfile(fname, like=ref)
|
| 734 |
+
assert_equal(np_res, data)
|
| 735 |
+
assert_equal(array_like, np_res)
|
| 736 |
+
else:
|
| 737 |
+
assert type(array_like) is self.MyArray
|
| 738 |
+
assert array_like.function is self.MyArray.fromfile
|
| 739 |
+
|
| 740 |
+
def test_exception_handling(self):
|
| 741 |
+
self.add_method('array', self.MyArray, enable_value_error=True)
|
| 742 |
+
|
| 743 |
+
ref = self.MyArray.array()
|
| 744 |
+
|
| 745 |
+
with assert_raises(TypeError):
|
| 746 |
+
# Raises the error about `value_error` being invalid first
|
| 747 |
+
np.array(1, value_error=True, like=ref)
|
| 748 |
+
|
| 749 |
+
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
|
| 750 |
+
def test_like_as_none(self, function, args, kwargs):
|
| 751 |
+
self.add_method('array', self.MyArray)
|
| 752 |
+
self.add_method(function, self.MyArray)
|
| 753 |
+
np_func = getattr(np, function)
|
| 754 |
+
|
| 755 |
+
like_args = tuple(a() if callable(a) else a for a in args)
|
| 756 |
+
# required for loadtxt and genfromtxt to init w/o error.
|
| 757 |
+
like_args_exp = tuple(a() if callable(a) else a for a in args)
|
| 758 |
+
|
| 759 |
+
array_like = np_func(*like_args, **kwargs, like=None)
|
| 760 |
+
expected = np_func(*like_args_exp, **kwargs)
|
| 761 |
+
# Special-case np.empty to ensure values match
|
| 762 |
+
if function == "empty":
|
| 763 |
+
array_like.fill(1)
|
| 764 |
+
expected.fill(1)
|
| 765 |
+
assert_equal(array_like, expected)
|
| 766 |
+
|
| 767 |
+
|
| 768 |
+
def test_function_like():
|
| 769 |
+
# We provide a `__get__` implementation, make sure it works
|
| 770 |
+
assert type(np.mean) is np._core._multiarray_umath._ArrayFunctionDispatcher
|
| 771 |
+
|
| 772 |
+
class MyClass:
|
| 773 |
+
def __array__(self, dtype=None, copy=None):
|
| 774 |
+
# valid argument to mean:
|
| 775 |
+
return np.arange(3)
|
| 776 |
+
|
| 777 |
+
func1 = staticmethod(np.mean)
|
| 778 |
+
func2 = np.mean
|
| 779 |
+
func3 = classmethod(np.mean)
|
| 780 |
+
|
| 781 |
+
m = MyClass()
|
| 782 |
+
assert m.func1([10]) == 10
|
| 783 |
+
assert m.func2() == 1 # mean of the arange
|
| 784 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
| 785 |
+
# Tries to operate on the class
|
| 786 |
+
m.func3()
|
| 787 |
+
|
| 788 |
+
# Manual binding also works (the above may shortcut):
|
| 789 |
+
bound = np.mean.__get__(m, MyClass)
|
| 790 |
+
assert bound() == 1
|
| 791 |
+
|
| 792 |
+
bound = np.mean.__get__(None, MyClass) # unbound actually
|
| 793 |
+
assert bound([10]) == 10
|
| 794 |
+
|
| 795 |
+
bound = np.mean.__get__(MyClass) # classmethod
|
| 796 |
+
with pytest.raises(TypeError, match="unsupported operand type"):
|
| 797 |
+
bound()
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_records.py
ADDED
|
@@ -0,0 +1,540 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import collections.abc
|
| 2 |
+
import textwrap
|
| 3 |
+
from io import BytesIO
|
| 4 |
+
from os import path
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
import pickle
|
| 7 |
+
|
| 8 |
+
import pytest
|
| 9 |
+
|
| 10 |
+
import numpy as np
|
| 11 |
+
from numpy.testing import (
|
| 12 |
+
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
|
| 13 |
+
assert_raises, temppath,
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class TestFromrecords:
|
| 18 |
+
def test_fromrecords(self):
|
| 19 |
+
r = np.rec.fromrecords([[456, 'dbe', 1.2], [2, 'de', 1.3]],
|
| 20 |
+
names='col1,col2,col3')
|
| 21 |
+
assert_equal(r[0].item(), (456, 'dbe', 1.2))
|
| 22 |
+
assert_equal(r['col1'].dtype.kind, 'i')
|
| 23 |
+
assert_equal(r['col2'].dtype.kind, 'U')
|
| 24 |
+
assert_equal(r['col2'].dtype.itemsize, 12)
|
| 25 |
+
assert_equal(r['col3'].dtype.kind, 'f')
|
| 26 |
+
|
| 27 |
+
def test_fromrecords_0len(self):
|
| 28 |
+
""" Verify fromrecords works with a 0-length input """
|
| 29 |
+
dtype = [('a', float), ('b', float)]
|
| 30 |
+
r = np.rec.fromrecords([], dtype=dtype)
|
| 31 |
+
assert_equal(r.shape, (0,))
|
| 32 |
+
|
| 33 |
+
def test_fromrecords_2d(self):
|
| 34 |
+
data = [
|
| 35 |
+
[(1, 2), (3, 4), (5, 6)],
|
| 36 |
+
[(6, 5), (4, 3), (2, 1)]
|
| 37 |
+
]
|
| 38 |
+
expected_a = [[1, 3, 5], [6, 4, 2]]
|
| 39 |
+
expected_b = [[2, 4, 6], [5, 3, 1]]
|
| 40 |
+
|
| 41 |
+
# try with dtype
|
| 42 |
+
r1 = np.rec.fromrecords(data, dtype=[('a', int), ('b', int)])
|
| 43 |
+
assert_equal(r1['a'], expected_a)
|
| 44 |
+
assert_equal(r1['b'], expected_b)
|
| 45 |
+
|
| 46 |
+
# try with names
|
| 47 |
+
r2 = np.rec.fromrecords(data, names=['a', 'b'])
|
| 48 |
+
assert_equal(r2['a'], expected_a)
|
| 49 |
+
assert_equal(r2['b'], expected_b)
|
| 50 |
+
|
| 51 |
+
assert_equal(r1, r2)
|
| 52 |
+
|
| 53 |
+
def test_method_array(self):
|
| 54 |
+
r = np.rec.array(
|
| 55 |
+
b'abcdefg' * 100, formats='i2,S3,i4', shape=3, byteorder='big'
|
| 56 |
+
)
|
| 57 |
+
assert_equal(r[1].item(), (25444, b'efg', 1633837924))
|
| 58 |
+
|
| 59 |
+
def test_method_array2(self):
|
| 60 |
+
r = np.rec.array(
|
| 61 |
+
[
|
| 62 |
+
(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'),
|
| 63 |
+
(5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')
|
| 64 |
+
],
|
| 65 |
+
formats='u1,f4,S1'
|
| 66 |
+
)
|
| 67 |
+
assert_equal(r[1].item(), (2, 22.0, b'b'))
|
| 68 |
+
|
| 69 |
+
def test_recarray_slices(self):
|
| 70 |
+
r = np.rec.array(
|
| 71 |
+
[
|
| 72 |
+
(1, 11, 'a'), (2, 22, 'b'), (3, 33, 'c'), (4, 44, 'd'),
|
| 73 |
+
(5, 55, 'ex'), (6, 66, 'f'), (7, 77, 'g')
|
| 74 |
+
],
|
| 75 |
+
formats='u1,f4,S1'
|
| 76 |
+
)
|
| 77 |
+
assert_equal(r[1::2][1].item(), (4, 44.0, b'd'))
|
| 78 |
+
|
| 79 |
+
def test_recarray_fromarrays(self):
|
| 80 |
+
x1 = np.array([1, 2, 3, 4])
|
| 81 |
+
x2 = np.array(['a', 'dd', 'xyz', '12'])
|
| 82 |
+
x3 = np.array([1.1, 2, 3, 4])
|
| 83 |
+
r = np.rec.fromarrays([x1, x2, x3], names='a,b,c')
|
| 84 |
+
assert_equal(r[1].item(), (2, 'dd', 2.0))
|
| 85 |
+
x1[1] = 34
|
| 86 |
+
assert_equal(r.a, np.array([1, 2, 3, 4]))
|
| 87 |
+
|
| 88 |
+
def test_recarray_fromfile(self):
|
| 89 |
+
data_dir = path.join(path.dirname(__file__), 'data')
|
| 90 |
+
filename = path.join(data_dir, 'recarray_from_file.fits')
|
| 91 |
+
fd = open(filename, 'rb')
|
| 92 |
+
fd.seek(2880 * 2)
|
| 93 |
+
r1 = np.rec.fromfile(fd, formats='f8,i4,S5', shape=3, byteorder='big')
|
| 94 |
+
fd.seek(2880 * 2)
|
| 95 |
+
r2 = np.rec.array(fd, formats='f8,i4,S5', shape=3, byteorder='big')
|
| 96 |
+
fd.seek(2880 * 2)
|
| 97 |
+
bytes_array = BytesIO()
|
| 98 |
+
bytes_array.write(fd.read())
|
| 99 |
+
bytes_array.seek(0)
|
| 100 |
+
r3 = np.rec.fromfile(
|
| 101 |
+
bytes_array, formats='f8,i4,S5', shape=3, byteorder='big'
|
| 102 |
+
)
|
| 103 |
+
fd.close()
|
| 104 |
+
assert_equal(r1, r2)
|
| 105 |
+
assert_equal(r2, r3)
|
| 106 |
+
|
| 107 |
+
def test_recarray_from_obj(self):
|
| 108 |
+
count = 10
|
| 109 |
+
a = np.zeros(count, dtype='O')
|
| 110 |
+
b = np.zeros(count, dtype='f8')
|
| 111 |
+
c = np.zeros(count, dtype='f8')
|
| 112 |
+
for i in range(len(a)):
|
| 113 |
+
a[i] = list(range(1, 10))
|
| 114 |
+
|
| 115 |
+
mine = np.rec.fromarrays([a, b, c], names='date,data1,data2')
|
| 116 |
+
for i in range(len(a)):
|
| 117 |
+
assert_(mine.date[i] == list(range(1, 10)))
|
| 118 |
+
assert_(mine.data1[i] == 0.0)
|
| 119 |
+
assert_(mine.data2[i] == 0.0)
|
| 120 |
+
|
| 121 |
+
def test_recarray_repr(self):
|
| 122 |
+
a = np.array([(1, 0.1), (2, 0.2)],
|
| 123 |
+
dtype=[('foo', '<i4'), ('bar', '<f8')])
|
| 124 |
+
a = np.rec.array(a)
|
| 125 |
+
assert_equal(
|
| 126 |
+
repr(a),
|
| 127 |
+
textwrap.dedent("""\
|
| 128 |
+
rec.array([(1, 0.1), (2, 0.2)],
|
| 129 |
+
dtype=[('foo', '<i4'), ('bar', '<f8')])""")
|
| 130 |
+
)
|
| 131 |
+
|
| 132 |
+
# make sure non-structured dtypes also show up as rec.array
|
| 133 |
+
a = np.array(np.ones(4, dtype='f8'))
|
| 134 |
+
assert_(repr(np.rec.array(a)).startswith('rec.array'))
|
| 135 |
+
|
| 136 |
+
# check that the 'np.record' part of the dtype isn't shown
|
| 137 |
+
a = np.rec.array(np.ones(3, dtype='i4,i4'))
|
| 138 |
+
assert_equal(repr(a).find('numpy.record'), -1)
|
| 139 |
+
a = np.rec.array(np.ones(3, dtype='i4'))
|
| 140 |
+
assert_(repr(a).find('dtype=int32') != -1)
|
| 141 |
+
|
| 142 |
+
def test_0d_recarray_repr(self):
|
| 143 |
+
arr_0d = np.rec.array((1, 2.0, '2003'), dtype='<i4,<f8,<M8[Y]')
|
| 144 |
+
assert_equal(repr(arr_0d), textwrap.dedent("""\
|
| 145 |
+
rec.array((1, 2., '2003'),
|
| 146 |
+
dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])"""))
|
| 147 |
+
|
| 148 |
+
record = arr_0d[()]
|
| 149 |
+
assert_equal(repr(record),
|
| 150 |
+
"np.record((1, 2.0, '2003'), "
|
| 151 |
+
"dtype=[('f0', '<i4'), ('f1', '<f8'), ('f2', '<M8[Y]')])")
|
| 152 |
+
# 1.13 converted to python scalars before the repr
|
| 153 |
+
try:
|
| 154 |
+
np.set_printoptions(legacy='1.13')
|
| 155 |
+
assert_equal(repr(record), '(1, 2.0, datetime.date(2003, 1, 1))')
|
| 156 |
+
finally:
|
| 157 |
+
np.set_printoptions(legacy=False)
|
| 158 |
+
|
| 159 |
+
def test_recarray_from_repr(self):
|
| 160 |
+
a = np.array([(1,'ABC'), (2, "DEF")],
|
| 161 |
+
dtype=[('foo', int), ('bar', 'S4')])
|
| 162 |
+
recordarr = np.rec.array(a)
|
| 163 |
+
recarr = a.view(np.recarray)
|
| 164 |
+
recordview = a.view(np.dtype((np.record, a.dtype)))
|
| 165 |
+
|
| 166 |
+
recordarr_r = eval("np." + repr(recordarr), {'np': np})
|
| 167 |
+
recarr_r = eval("np." + repr(recarr), {'np': np})
|
| 168 |
+
# Prints the type `numpy.record` as part of the dtype:
|
| 169 |
+
recordview_r = eval("np." + repr(recordview), {'np': np, 'numpy': np})
|
| 170 |
+
|
| 171 |
+
assert_equal(type(recordarr_r), np.recarray)
|
| 172 |
+
assert_equal(recordarr_r.dtype.type, np.record)
|
| 173 |
+
assert_equal(recordarr, recordarr_r)
|
| 174 |
+
|
| 175 |
+
assert_equal(type(recarr_r), np.recarray)
|
| 176 |
+
assert_equal(recarr_r.dtype.type, np.record)
|
| 177 |
+
assert_equal(recarr, recarr_r)
|
| 178 |
+
|
| 179 |
+
assert_equal(type(recordview_r), np.ndarray)
|
| 180 |
+
assert_equal(recordview.dtype.type, np.record)
|
| 181 |
+
assert_equal(recordview, recordview_r)
|
| 182 |
+
|
| 183 |
+
def test_recarray_views(self):
|
| 184 |
+
a = np.array([(1,'ABC'), (2, "DEF")],
|
| 185 |
+
dtype=[('foo', int), ('bar', 'S4')])
|
| 186 |
+
b = np.array([1,2,3,4,5], dtype=np.int64)
|
| 187 |
+
|
| 188 |
+
#check that np.rec.array gives right dtypes
|
| 189 |
+
assert_equal(np.rec.array(a).dtype.type, np.record)
|
| 190 |
+
assert_equal(type(np.rec.array(a)), np.recarray)
|
| 191 |
+
assert_equal(np.rec.array(b).dtype.type, np.int64)
|
| 192 |
+
assert_equal(type(np.rec.array(b)), np.recarray)
|
| 193 |
+
|
| 194 |
+
#check that viewing as recarray does the same
|
| 195 |
+
assert_equal(a.view(np.recarray).dtype.type, np.record)
|
| 196 |
+
assert_equal(type(a.view(np.recarray)), np.recarray)
|
| 197 |
+
assert_equal(b.view(np.recarray).dtype.type, np.int64)
|
| 198 |
+
assert_equal(type(b.view(np.recarray)), np.recarray)
|
| 199 |
+
|
| 200 |
+
#check that view to non-structured dtype preserves type=np.recarray
|
| 201 |
+
r = np.rec.array(np.ones(4, dtype="f4,i4"))
|
| 202 |
+
rv = r.view('f8').view('f4,i4')
|
| 203 |
+
assert_equal(type(rv), np.recarray)
|
| 204 |
+
assert_equal(rv.dtype.type, np.record)
|
| 205 |
+
|
| 206 |
+
#check that getitem also preserves np.recarray and np.record
|
| 207 |
+
r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'),
|
| 208 |
+
('c', 'i4,i4')]))
|
| 209 |
+
assert_equal(r['c'].dtype.type, np.record)
|
| 210 |
+
assert_equal(type(r['c']), np.recarray)
|
| 211 |
+
|
| 212 |
+
#and that it preserves subclasses (gh-6949)
|
| 213 |
+
class C(np.recarray):
|
| 214 |
+
pass
|
| 215 |
+
|
| 216 |
+
c = r.view(C)
|
| 217 |
+
assert_equal(type(c['c']), C)
|
| 218 |
+
|
| 219 |
+
# check that accessing nested structures keep record type, but
|
| 220 |
+
# not for subarrays, non-void structures, non-structured voids
|
| 221 |
+
test_dtype = [('a', 'f4,f4'), ('b', 'V8'), ('c', ('f4',2)),
|
| 222 |
+
('d', ('i8', 'i4,i4'))]
|
| 223 |
+
r = np.rec.array([((1,1), b'11111111', [1,1], 1),
|
| 224 |
+
((1,1), b'11111111', [1,1], 1)], dtype=test_dtype)
|
| 225 |
+
assert_equal(r.a.dtype.type, np.record)
|
| 226 |
+
assert_equal(r.b.dtype.type, np.void)
|
| 227 |
+
assert_equal(r.c.dtype.type, np.float32)
|
| 228 |
+
assert_equal(r.d.dtype.type, np.int64)
|
| 229 |
+
# check the same, but for views
|
| 230 |
+
r = np.rec.array(np.ones(4, dtype='i4,i4'))
|
| 231 |
+
assert_equal(r.view('f4,f4').dtype.type, np.record)
|
| 232 |
+
assert_equal(r.view(('i4',2)).dtype.type, np.int32)
|
| 233 |
+
assert_equal(r.view('V8').dtype.type, np.void)
|
| 234 |
+
assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64)
|
| 235 |
+
|
| 236 |
+
#check that we can undo the view
|
| 237 |
+
arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')]
|
| 238 |
+
for arr in arrs:
|
| 239 |
+
rec = np.rec.array(arr)
|
| 240 |
+
# recommended way to view as an ndarray:
|
| 241 |
+
arr2 = rec.view(rec.dtype.fields or rec.dtype, np.ndarray)
|
| 242 |
+
assert_equal(arr2.dtype.type, arr.dtype.type)
|
| 243 |
+
assert_equal(type(arr2), type(arr))
|
| 244 |
+
|
| 245 |
+
def test_recarray_from_names(self):
|
| 246 |
+
ra = np.rec.array([
|
| 247 |
+
(1, 'abc', 3.7000002861022949, 0),
|
| 248 |
+
(2, 'xy', 6.6999998092651367, 1),
|
| 249 |
+
(0, ' ', 0.40000000596046448, 0)],
|
| 250 |
+
names='c1, c2, c3, c4')
|
| 251 |
+
pa = np.rec.fromrecords([
|
| 252 |
+
(1, 'abc', 3.7000002861022949, 0),
|
| 253 |
+
(2, 'xy', 6.6999998092651367, 1),
|
| 254 |
+
(0, ' ', 0.40000000596046448, 0)],
|
| 255 |
+
names='c1, c2, c3, c4')
|
| 256 |
+
assert_(ra.dtype == pa.dtype)
|
| 257 |
+
assert_(ra.shape == pa.shape)
|
| 258 |
+
for k in range(len(ra)):
|
| 259 |
+
assert_(ra[k].item() == pa[k].item())
|
| 260 |
+
|
| 261 |
+
def test_recarray_conflict_fields(self):
|
| 262 |
+
ra = np.rec.array([(1, 'abc', 2.3), (2, 'xyz', 4.2),
|
| 263 |
+
(3, 'wrs', 1.3)],
|
| 264 |
+
names='field, shape, mean')
|
| 265 |
+
ra.mean = [1.1, 2.2, 3.3]
|
| 266 |
+
assert_array_almost_equal(ra['mean'], [1.1, 2.2, 3.3])
|
| 267 |
+
assert_(type(ra.mean) is type(ra.var))
|
| 268 |
+
ra.shape = (1, 3)
|
| 269 |
+
assert_(ra.shape == (1, 3))
|
| 270 |
+
ra.shape = ['A', 'B', 'C']
|
| 271 |
+
assert_array_equal(ra['shape'], [['A', 'B', 'C']])
|
| 272 |
+
ra.field = 5
|
| 273 |
+
assert_array_equal(ra['field'], [[5, 5, 5]])
|
| 274 |
+
assert_(isinstance(ra.field, collections.abc.Callable))
|
| 275 |
+
|
| 276 |
+
def test_fromrecords_with_explicit_dtype(self):
|
| 277 |
+
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')],
|
| 278 |
+
dtype=[('a', int), ('b', object)])
|
| 279 |
+
assert_equal(a.a, [1, 2])
|
| 280 |
+
assert_equal(a[0].a, 1)
|
| 281 |
+
assert_equal(a.b, ['a', 'bbb'])
|
| 282 |
+
assert_equal(a[-1].b, 'bbb')
|
| 283 |
+
#
|
| 284 |
+
ndtype = np.dtype([('a', int), ('b', object)])
|
| 285 |
+
a = np.rec.fromrecords([(1, 'a'), (2, 'bbb')], dtype=ndtype)
|
| 286 |
+
assert_equal(a.a, [1, 2])
|
| 287 |
+
assert_equal(a[0].a, 1)
|
| 288 |
+
assert_equal(a.b, ['a', 'bbb'])
|
| 289 |
+
assert_equal(a[-1].b, 'bbb')
|
| 290 |
+
|
| 291 |
+
def test_recarray_stringtypes(self):
|
| 292 |
+
# Issue #3993
|
| 293 |
+
a = np.array([('abc ', 1), ('abc', 2)],
|
| 294 |
+
dtype=[('foo', 'S4'), ('bar', int)])
|
| 295 |
+
a = a.view(np.recarray)
|
| 296 |
+
assert_equal(a.foo[0] == a.foo[1], False)
|
| 297 |
+
|
| 298 |
+
def test_recarray_returntypes(self):
|
| 299 |
+
qux_fields = {'C': (np.dtype('S5'), 0), 'D': (np.dtype('S5'), 6)}
|
| 300 |
+
a = np.rec.array([('abc ', (1,1), 1, ('abcde', 'fgehi')),
|
| 301 |
+
('abc', (2,3), 1, ('abcde', 'jklmn'))],
|
| 302 |
+
dtype=[('foo', 'S4'),
|
| 303 |
+
('bar', [('A', int), ('B', int)]),
|
| 304 |
+
('baz', int), ('qux', qux_fields)])
|
| 305 |
+
assert_equal(type(a.foo), np.ndarray)
|
| 306 |
+
assert_equal(type(a['foo']), np.ndarray)
|
| 307 |
+
assert_equal(type(a.bar), np.recarray)
|
| 308 |
+
assert_equal(type(a['bar']), np.recarray)
|
| 309 |
+
assert_equal(a.bar.dtype.type, np.record)
|
| 310 |
+
assert_equal(type(a['qux']), np.recarray)
|
| 311 |
+
assert_equal(a.qux.dtype.type, np.record)
|
| 312 |
+
assert_equal(dict(a.qux.dtype.fields), qux_fields)
|
| 313 |
+
assert_equal(type(a.baz), np.ndarray)
|
| 314 |
+
assert_equal(type(a['baz']), np.ndarray)
|
| 315 |
+
assert_equal(type(a[0].bar), np.record)
|
| 316 |
+
assert_equal(type(a[0]['bar']), np.record)
|
| 317 |
+
assert_equal(a[0].bar.A, 1)
|
| 318 |
+
assert_equal(a[0].bar['A'], 1)
|
| 319 |
+
assert_equal(a[0]['bar'].A, 1)
|
| 320 |
+
assert_equal(a[0]['bar']['A'], 1)
|
| 321 |
+
assert_equal(a[0].qux.D, b'fgehi')
|
| 322 |
+
assert_equal(a[0].qux['D'], b'fgehi')
|
| 323 |
+
assert_equal(a[0]['qux'].D, b'fgehi')
|
| 324 |
+
assert_equal(a[0]['qux']['D'], b'fgehi')
|
| 325 |
+
|
| 326 |
+
def test_zero_width_strings(self):
|
| 327 |
+
# Test for #6430, based on the test case from #1901
|
| 328 |
+
|
| 329 |
+
cols = [['test'] * 3, [''] * 3]
|
| 330 |
+
rec = np.rec.fromarrays(cols)
|
| 331 |
+
assert_equal(rec['f0'], ['test', 'test', 'test'])
|
| 332 |
+
assert_equal(rec['f1'], ['', '', ''])
|
| 333 |
+
|
| 334 |
+
dt = np.dtype([('f0', '|S4'), ('f1', '|S')])
|
| 335 |
+
rec = np.rec.fromarrays(cols, dtype=dt)
|
| 336 |
+
assert_equal(rec.itemsize, 4)
|
| 337 |
+
assert_equal(rec['f0'], [b'test', b'test', b'test'])
|
| 338 |
+
assert_equal(rec['f1'], [b'', b'', b''])
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
class TestPathUsage:
|
| 342 |
+
# Test that pathlib.Path can be used
|
| 343 |
+
def test_tofile_fromfile(self):
|
| 344 |
+
with temppath(suffix='.bin') as path:
|
| 345 |
+
path = Path(path)
|
| 346 |
+
np.random.seed(123)
|
| 347 |
+
a = np.random.rand(10).astype('f8,i4,S5')
|
| 348 |
+
a[5] = (0.5,10,'abcde')
|
| 349 |
+
with path.open("wb") as fd:
|
| 350 |
+
a.tofile(fd)
|
| 351 |
+
x = np._core.records.fromfile(
|
| 352 |
+
path, formats='f8,i4,S5', shape=10
|
| 353 |
+
)
|
| 354 |
+
assert_array_equal(x, a)
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
class TestRecord:
|
| 358 |
+
def setup_method(self):
|
| 359 |
+
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
|
| 360 |
+
dtype=[("col1", "<i4"),
|
| 361 |
+
("col2", "<i4"),
|
| 362 |
+
("col3", "<i4")])
|
| 363 |
+
|
| 364 |
+
def test_assignment1(self):
|
| 365 |
+
a = self.data
|
| 366 |
+
assert_equal(a.col1[0], 1)
|
| 367 |
+
a[0].col1 = 0
|
| 368 |
+
assert_equal(a.col1[0], 0)
|
| 369 |
+
|
| 370 |
+
def test_assignment2(self):
|
| 371 |
+
a = self.data
|
| 372 |
+
assert_equal(a.col1[0], 1)
|
| 373 |
+
a.col1[0] = 0
|
| 374 |
+
assert_equal(a.col1[0], 0)
|
| 375 |
+
|
| 376 |
+
def test_invalid_assignment(self):
|
| 377 |
+
a = self.data
|
| 378 |
+
|
| 379 |
+
def assign_invalid_column(x):
|
| 380 |
+
x[0].col5 = 1
|
| 381 |
+
|
| 382 |
+
assert_raises(AttributeError, assign_invalid_column, a)
|
| 383 |
+
|
| 384 |
+
def test_nonwriteable_setfield(self):
|
| 385 |
+
# gh-8171
|
| 386 |
+
r = np.rec.array([(0,), (1,)], dtype=[('f', 'i4')])
|
| 387 |
+
r.flags.writeable = False
|
| 388 |
+
with assert_raises(ValueError):
|
| 389 |
+
r.f = [2, 3]
|
| 390 |
+
with assert_raises(ValueError):
|
| 391 |
+
r.setfield([2,3], *r.dtype.fields['f'])
|
| 392 |
+
|
| 393 |
+
def test_out_of_order_fields(self):
|
| 394 |
+
# names in the same order, padding added to descr
|
| 395 |
+
x = self.data[['col1', 'col2']]
|
| 396 |
+
assert_equal(x.dtype.names, ('col1', 'col2'))
|
| 397 |
+
assert_equal(x.dtype.descr,
|
| 398 |
+
[('col1', '<i4'), ('col2', '<i4'), ('', '|V4')])
|
| 399 |
+
|
| 400 |
+
# names change order to match indexing, as of 1.14 - descr can't
|
| 401 |
+
# represent that
|
| 402 |
+
y = self.data[['col2', 'col1']]
|
| 403 |
+
assert_equal(y.dtype.names, ('col2', 'col1'))
|
| 404 |
+
assert_raises(ValueError, lambda: y.dtype.descr)
|
| 405 |
+
|
| 406 |
+
def test_pickle_1(self):
|
| 407 |
+
# Issue #1529
|
| 408 |
+
a = np.array([(1, [])], dtype=[('a', np.int32), ('b', np.int32, 0)])
|
| 409 |
+
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
|
| 410 |
+
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
|
| 411 |
+
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
|
| 412 |
+
protocol=proto)))
|
| 413 |
+
|
| 414 |
+
def test_pickle_2(self):
|
| 415 |
+
a = self.data
|
| 416 |
+
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
|
| 417 |
+
assert_equal(a, pickle.loads(pickle.dumps(a, protocol=proto)))
|
| 418 |
+
assert_equal(a[0], pickle.loads(pickle.dumps(a[0],
|
| 419 |
+
protocol=proto)))
|
| 420 |
+
|
| 421 |
+
def test_pickle_3(self):
|
| 422 |
+
# Issue #7140
|
| 423 |
+
a = self.data
|
| 424 |
+
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
|
| 425 |
+
pa = pickle.loads(pickle.dumps(a[0], protocol=proto))
|
| 426 |
+
assert_(pa.flags.c_contiguous)
|
| 427 |
+
assert_(pa.flags.f_contiguous)
|
| 428 |
+
assert_(pa.flags.writeable)
|
| 429 |
+
assert_(pa.flags.aligned)
|
| 430 |
+
|
| 431 |
+
def test_pickle_void(self):
|
| 432 |
+
# issue gh-13593
|
| 433 |
+
dt = np.dtype([('obj', 'O'), ('int', 'i')])
|
| 434 |
+
a = np.empty(1, dtype=dt)
|
| 435 |
+
data = (bytearray(b'eman'),)
|
| 436 |
+
a['obj'] = data
|
| 437 |
+
a['int'] = 42
|
| 438 |
+
ctor, args = a[0].__reduce__()
|
| 439 |
+
# check the constructor is what we expect before interpreting the arguments
|
| 440 |
+
assert ctor is np._core.multiarray.scalar
|
| 441 |
+
dtype, obj = args
|
| 442 |
+
# make sure we did not pickle the address
|
| 443 |
+
assert not isinstance(obj, bytes)
|
| 444 |
+
|
| 445 |
+
assert_raises(RuntimeError, ctor, dtype, 13)
|
| 446 |
+
|
| 447 |
+
# Test roundtrip:
|
| 448 |
+
dump = pickle.dumps(a[0])
|
| 449 |
+
unpickled = pickle.loads(dump)
|
| 450 |
+
assert a[0] == unpickled
|
| 451 |
+
|
| 452 |
+
# Also check the similar (impossible) "object scalar" path:
|
| 453 |
+
with pytest.warns(DeprecationWarning):
|
| 454 |
+
assert ctor(np.dtype("O"), data) is data
|
| 455 |
+
|
| 456 |
+
def test_objview_record(self):
|
| 457 |
+
# https://github.com/numpy/numpy/issues/2599
|
| 458 |
+
dt = np.dtype([('foo', 'i8'), ('bar', 'O')])
|
| 459 |
+
r = np.zeros((1, 3), dtype=dt).view(np.recarray)
|
| 460 |
+
r.foo = np.array([1, 2, 3]) # TypeError?
|
| 461 |
+
|
| 462 |
+
# https://github.com/numpy/numpy/issues/3256
|
| 463 |
+
ra = np.recarray(
|
| 464 |
+
(2,), dtype=[('x', object), ('y', float), ('z', int)]
|
| 465 |
+
)
|
| 466 |
+
ra[['x','y']] # TypeError?
|
| 467 |
+
|
| 468 |
+
def test_record_scalar_setitem(self):
|
| 469 |
+
# https://github.com/numpy/numpy/issues/3561
|
| 470 |
+
rec = np.recarray(1, dtype=[('x', float, 5)])
|
| 471 |
+
rec[0].x = 1
|
| 472 |
+
assert_equal(rec[0].x, np.ones(5))
|
| 473 |
+
|
| 474 |
+
def test_missing_field(self):
|
| 475 |
+
# https://github.com/numpy/numpy/issues/4806
|
| 476 |
+
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
|
| 477 |
+
assert_raises(KeyError, lambda: arr[['nofield']])
|
| 478 |
+
|
| 479 |
+
def test_fromarrays_nested_structured_arrays(self):
|
| 480 |
+
arrays = [
|
| 481 |
+
np.arange(10),
|
| 482 |
+
np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]),
|
| 483 |
+
]
|
| 484 |
+
arr = np.rec.fromarrays(arrays) # ValueError?
|
| 485 |
+
|
| 486 |
+
@pytest.mark.parametrize('nfields', [0, 1, 2])
|
| 487 |
+
def test_assign_dtype_attribute(self, nfields):
|
| 488 |
+
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
|
| 489 |
+
data = np.zeros(3, dt).view(np.recarray)
|
| 490 |
+
|
| 491 |
+
# the original and resulting dtypes differ on whether they are records
|
| 492 |
+
assert data.dtype.type == np.record
|
| 493 |
+
assert dt.type != np.record
|
| 494 |
+
|
| 495 |
+
# ensure that the dtype remains a record even when assigned
|
| 496 |
+
data.dtype = dt
|
| 497 |
+
assert data.dtype.type == np.record
|
| 498 |
+
|
| 499 |
+
@pytest.mark.parametrize('nfields', [0, 1, 2])
|
| 500 |
+
def test_nested_fields_are_records(self, nfields):
|
| 501 |
+
""" Test that nested structured types are treated as records too """
|
| 502 |
+
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
|
| 503 |
+
dt_outer = np.dtype([('inner', dt)])
|
| 504 |
+
|
| 505 |
+
data = np.zeros(3, dt_outer).view(np.recarray)
|
| 506 |
+
assert isinstance(data, np.recarray)
|
| 507 |
+
assert isinstance(data['inner'], np.recarray)
|
| 508 |
+
|
| 509 |
+
data0 = data[0]
|
| 510 |
+
assert isinstance(data0, np.record)
|
| 511 |
+
assert isinstance(data0['inner'], np.record)
|
| 512 |
+
|
| 513 |
+
def test_nested_dtype_padding(self):
|
| 514 |
+
""" test that trailing padding is preserved """
|
| 515 |
+
# construct a dtype with padding at the end
|
| 516 |
+
dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
|
| 517 |
+
dt_padded_end = dt[['a', 'b']]
|
| 518 |
+
assert dt_padded_end.itemsize == dt.itemsize
|
| 519 |
+
|
| 520 |
+
dt_outer = np.dtype([('inner', dt_padded_end)])
|
| 521 |
+
|
| 522 |
+
data = np.zeros(3, dt_outer).view(np.recarray)
|
| 523 |
+
assert_equal(data['inner'].dtype, dt_padded_end)
|
| 524 |
+
|
| 525 |
+
data0 = data[0]
|
| 526 |
+
assert_equal(data0['inner'].dtype, dt_padded_end)
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def test_find_duplicate():
|
| 530 |
+
l1 = [1, 2, 3, 4, 5, 6]
|
| 531 |
+
assert_(np.rec.find_duplicate(l1) == [])
|
| 532 |
+
|
| 533 |
+
l2 = [1, 2, 1, 4, 5, 6]
|
| 534 |
+
assert_(np.rec.find_duplicate(l2) == [1])
|
| 535 |
+
|
| 536 |
+
l3 = [1, 2, 1, 4, 1, 6, 2, 3]
|
| 537 |
+
assert_(np.rec.find_duplicate(l3) == [1, 2])
|
| 538 |
+
|
| 539 |
+
l3 = [2, 2, 1, 4, 1, 6, 2, 3]
|
| 540 |
+
assert_(np.rec.find_duplicate(l3) == [2, 1])
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalar_ctors.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test the scalar constructors, which also do type-coercion
|
| 3 |
+
"""
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
from numpy.testing import (
|
| 8 |
+
assert_equal, assert_almost_equal, assert_warns,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
class TestFromString:
|
| 12 |
+
def test_floating(self):
|
| 13 |
+
# Ticket #640, floats from string
|
| 14 |
+
fsingle = np.single('1.234')
|
| 15 |
+
fdouble = np.double('1.234')
|
| 16 |
+
flongdouble = np.longdouble('1.234')
|
| 17 |
+
assert_almost_equal(fsingle, 1.234)
|
| 18 |
+
assert_almost_equal(fdouble, 1.234)
|
| 19 |
+
assert_almost_equal(flongdouble, 1.234)
|
| 20 |
+
|
| 21 |
+
def test_floating_overflow(self):
|
| 22 |
+
""" Strings containing an unrepresentable float overflow """
|
| 23 |
+
fhalf = np.half('1e10000')
|
| 24 |
+
assert_equal(fhalf, np.inf)
|
| 25 |
+
fsingle = np.single('1e10000')
|
| 26 |
+
assert_equal(fsingle, np.inf)
|
| 27 |
+
fdouble = np.double('1e10000')
|
| 28 |
+
assert_equal(fdouble, np.inf)
|
| 29 |
+
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '1e10000')
|
| 30 |
+
assert_equal(flongdouble, np.inf)
|
| 31 |
+
|
| 32 |
+
fhalf = np.half('-1e10000')
|
| 33 |
+
assert_equal(fhalf, -np.inf)
|
| 34 |
+
fsingle = np.single('-1e10000')
|
| 35 |
+
assert_equal(fsingle, -np.inf)
|
| 36 |
+
fdouble = np.double('-1e10000')
|
| 37 |
+
assert_equal(fdouble, -np.inf)
|
| 38 |
+
flongdouble = assert_warns(RuntimeWarning, np.longdouble, '-1e10000')
|
| 39 |
+
assert_equal(flongdouble, -np.inf)
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
class TestExtraArgs:
|
| 43 |
+
def test_superclass(self):
|
| 44 |
+
# try both positional and keyword arguments
|
| 45 |
+
s = np.str_(b'\\x61', encoding='unicode-escape')
|
| 46 |
+
assert s == 'a'
|
| 47 |
+
s = np.str_(b'\\x61', 'unicode-escape')
|
| 48 |
+
assert s == 'a'
|
| 49 |
+
|
| 50 |
+
# previously this would return '\\xx'
|
| 51 |
+
with pytest.raises(UnicodeDecodeError):
|
| 52 |
+
np.str_(b'\\xx', encoding='unicode-escape')
|
| 53 |
+
with pytest.raises(UnicodeDecodeError):
|
| 54 |
+
np.str_(b'\\xx', 'unicode-escape')
|
| 55 |
+
|
| 56 |
+
# superclass fails, but numpy succeeds
|
| 57 |
+
assert np.bytes_(-2) == b'-2'
|
| 58 |
+
|
| 59 |
+
def test_datetime(self):
|
| 60 |
+
dt = np.datetime64('2000-01', ('M', 2))
|
| 61 |
+
assert np.datetime_data(dt) == ('M', 2)
|
| 62 |
+
|
| 63 |
+
with pytest.raises(TypeError):
|
| 64 |
+
np.datetime64('2000', garbage=True)
|
| 65 |
+
|
| 66 |
+
def test_bool(self):
|
| 67 |
+
with pytest.raises(TypeError):
|
| 68 |
+
np.bool(False, garbage=True)
|
| 69 |
+
|
| 70 |
+
def test_void(self):
|
| 71 |
+
with pytest.raises(TypeError):
|
| 72 |
+
np.void(b'test', garbage=True)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class TestFromInt:
|
| 76 |
+
def test_intp(self):
|
| 77 |
+
# Ticket #99
|
| 78 |
+
assert_equal(1024, np.intp(1024))
|
| 79 |
+
|
| 80 |
+
def test_uint64_from_negative(self):
|
| 81 |
+
with pytest.raises(OverflowError):
|
| 82 |
+
np.uint64(-2)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
int_types = [np.byte, np.short, np.intc, np.long, np.longlong]
|
| 86 |
+
uint_types = [np.ubyte, np.ushort, np.uintc, np.ulong, np.ulonglong]
|
| 87 |
+
float_types = [np.half, np.single, np.double, np.longdouble]
|
| 88 |
+
cfloat_types = [np.csingle, np.cdouble, np.clongdouble]
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class TestArrayFromScalar:
|
| 92 |
+
""" gh-15467 and gh-19125 """
|
| 93 |
+
|
| 94 |
+
def _do_test(self, t1, t2, arg=2):
|
| 95 |
+
if arg is None:
|
| 96 |
+
x = t1()
|
| 97 |
+
elif isinstance(arg, tuple):
|
| 98 |
+
if t1 is np.clongdouble:
|
| 99 |
+
pytest.xfail("creating a clongdouble from real and "
|
| 100 |
+
"imaginary parts isn't supported")
|
| 101 |
+
x = t1(*arg)
|
| 102 |
+
else:
|
| 103 |
+
x = t1(arg)
|
| 104 |
+
arr = np.array(x, dtype=t2)
|
| 105 |
+
# type should be preserved exactly
|
| 106 |
+
if t2 is None:
|
| 107 |
+
assert arr.dtype.type is t1
|
| 108 |
+
else:
|
| 109 |
+
assert arr.dtype.type is t2
|
| 110 |
+
|
| 111 |
+
@pytest.mark.parametrize('t1', int_types + uint_types)
|
| 112 |
+
@pytest.mark.parametrize('t2', int_types + uint_types + [None])
|
| 113 |
+
def test_integers(self, t1, t2):
|
| 114 |
+
return self._do_test(t1, t2)
|
| 115 |
+
|
| 116 |
+
@pytest.mark.parametrize('t1', float_types)
|
| 117 |
+
@pytest.mark.parametrize('t2', float_types + [None])
|
| 118 |
+
def test_reals(self, t1, t2):
|
| 119 |
+
return self._do_test(t1, t2)
|
| 120 |
+
|
| 121 |
+
@pytest.mark.parametrize('t1', cfloat_types)
|
| 122 |
+
@pytest.mark.parametrize('t2', cfloat_types + [None])
|
| 123 |
+
@pytest.mark.parametrize('arg', [2, 1 + 3j, (1, 2), None])
|
| 124 |
+
def test_complex(self, t1, t2, arg):
|
| 125 |
+
self._do_test(t1, t2, arg)
|
| 126 |
+
|
| 127 |
+
@pytest.mark.parametrize('t', cfloat_types)
|
| 128 |
+
def test_complex_errors(self, t):
|
| 129 |
+
with pytest.raises(TypeError):
|
| 130 |
+
t(1j, 1j)
|
| 131 |
+
with pytest.raises(TypeError):
|
| 132 |
+
t(1, None)
|
| 133 |
+
with pytest.raises(TypeError):
|
| 134 |
+
t(None, 1)
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
@pytest.mark.parametrize("length",
|
| 138 |
+
[5, np.int8(5), np.array(5, dtype=np.uint16)])
|
| 139 |
+
def test_void_via_length(length):
|
| 140 |
+
res = np.void(length)
|
| 141 |
+
assert type(res) is np.void
|
| 142 |
+
assert res.item() == b"\0" * 5
|
| 143 |
+
assert res.dtype == "V5"
|
| 144 |
+
|
| 145 |
+
@pytest.mark.parametrize("bytes_",
|
| 146 |
+
[b"spam", np.array(567.)])
|
| 147 |
+
def test_void_from_byteslike(bytes_):
|
| 148 |
+
res = np.void(bytes_)
|
| 149 |
+
expected = bytes(bytes_)
|
| 150 |
+
assert type(res) is np.void
|
| 151 |
+
assert res.item() == expected
|
| 152 |
+
|
| 153 |
+
# Passing dtype can extend it (this is how filling works)
|
| 154 |
+
res = np.void(bytes_, dtype="V100")
|
| 155 |
+
assert type(res) is np.void
|
| 156 |
+
assert res.item()[:len(expected)] == expected
|
| 157 |
+
assert res.item()[len(expected):] == b"\0" * (res.nbytes - len(expected))
|
| 158 |
+
# As well as shorten:
|
| 159 |
+
res = np.void(bytes_, dtype="V4")
|
| 160 |
+
assert type(res) is np.void
|
| 161 |
+
assert res.item() == expected[:4]
|
| 162 |
+
|
| 163 |
+
def test_void_arraylike_trumps_byteslike():
|
| 164 |
+
# The memoryview is converted as an array-like of shape (18,)
|
| 165 |
+
# rather than a single bytes-like of that length.
|
| 166 |
+
m = memoryview(b"just one mintleaf?")
|
| 167 |
+
res = np.void(m)
|
| 168 |
+
assert type(res) is np.ndarray
|
| 169 |
+
assert res.dtype == "V1"
|
| 170 |
+
assert res.shape == (18,)
|
| 171 |
+
|
| 172 |
+
def test_void_dtype_arg():
|
| 173 |
+
# Basic test for the dtype argument (positional and keyword)
|
| 174 |
+
res = np.void((1, 2), dtype="i,i")
|
| 175 |
+
assert res.item() == (1, 2)
|
| 176 |
+
res = np.void((2, 3), "i,i")
|
| 177 |
+
assert res.item() == (2, 3)
|
| 178 |
+
|
| 179 |
+
@pytest.mark.parametrize("data",
|
| 180 |
+
[5, np.int8(5), np.array(5, dtype=np.uint16)])
|
| 181 |
+
def test_void_from_integer_with_dtype(data):
|
| 182 |
+
# The "length" meaning is ignored, rather data is used:
|
| 183 |
+
res = np.void(data, dtype="i,i")
|
| 184 |
+
assert type(res) is np.void
|
| 185 |
+
assert res.dtype == "i,i"
|
| 186 |
+
assert res["f0"] == 5 and res["f1"] == 5
|
| 187 |
+
|
| 188 |
+
def test_void_from_structure():
|
| 189 |
+
dtype = np.dtype([('s', [('f', 'f8'), ('u', 'U1')]), ('i', 'i2')])
|
| 190 |
+
data = np.array(((1., 'a'), 2), dtype=dtype)
|
| 191 |
+
res = np.void(data[()], dtype=dtype)
|
| 192 |
+
assert type(res) is np.void
|
| 193 |
+
assert res.dtype == dtype
|
| 194 |
+
assert res == data[()]
|
| 195 |
+
|
| 196 |
+
def test_void_bad_dtype():
|
| 197 |
+
with pytest.raises(TypeError,
|
| 198 |
+
match="void: descr must be a `void.*int64"):
|
| 199 |
+
np.void(4, dtype="i8")
|
| 200 |
+
|
| 201 |
+
# Subarray dtype (with shape `(4,)` is rejected):
|
| 202 |
+
with pytest.raises(TypeError,
|
| 203 |
+
match=r"void: descr must be a `void.*\(4,\)"):
|
| 204 |
+
np.void(4, dtype="4i")
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalarbuffer.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test scalar buffer interface adheres to PEP 3118
|
| 3 |
+
"""
|
| 4 |
+
import numpy as np
|
| 5 |
+
from numpy._core._rational_tests import rational
|
| 6 |
+
from numpy._core._multiarray_tests import get_buffer_info
|
| 7 |
+
import pytest
|
| 8 |
+
|
| 9 |
+
from numpy.testing import assert_, assert_equal, assert_raises
|
| 10 |
+
|
| 11 |
+
# PEP3118 format strings for native (standard alignment and byteorder) types
|
| 12 |
+
scalars_and_codes = [
|
| 13 |
+
(np.bool, '?'),
|
| 14 |
+
(np.byte, 'b'),
|
| 15 |
+
(np.short, 'h'),
|
| 16 |
+
(np.intc, 'i'),
|
| 17 |
+
(np.long, 'l'),
|
| 18 |
+
(np.longlong, 'q'),
|
| 19 |
+
(np.ubyte, 'B'),
|
| 20 |
+
(np.ushort, 'H'),
|
| 21 |
+
(np.uintc, 'I'),
|
| 22 |
+
(np.ulong, 'L'),
|
| 23 |
+
(np.ulonglong, 'Q'),
|
| 24 |
+
(np.half, 'e'),
|
| 25 |
+
(np.single, 'f'),
|
| 26 |
+
(np.double, 'd'),
|
| 27 |
+
(np.longdouble, 'g'),
|
| 28 |
+
(np.csingle, 'Zf'),
|
| 29 |
+
(np.cdouble, 'Zd'),
|
| 30 |
+
(np.clongdouble, 'Zg'),
|
| 31 |
+
]
|
| 32 |
+
scalars_only, codes_only = zip(*scalars_and_codes)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class TestScalarPEP3118:
|
| 36 |
+
|
| 37 |
+
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
|
| 38 |
+
def test_scalar_match_array(self, scalar):
|
| 39 |
+
x = scalar()
|
| 40 |
+
a = np.array([], dtype=np.dtype(scalar))
|
| 41 |
+
mv_x = memoryview(x)
|
| 42 |
+
mv_a = memoryview(a)
|
| 43 |
+
assert_equal(mv_x.format, mv_a.format)
|
| 44 |
+
|
| 45 |
+
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
|
| 46 |
+
def test_scalar_dim(self, scalar):
|
| 47 |
+
x = scalar()
|
| 48 |
+
mv_x = memoryview(x)
|
| 49 |
+
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
|
| 50 |
+
assert_equal(mv_x.ndim, 0)
|
| 51 |
+
assert_equal(mv_x.shape, ())
|
| 52 |
+
assert_equal(mv_x.strides, ())
|
| 53 |
+
assert_equal(mv_x.suboffsets, ())
|
| 54 |
+
|
| 55 |
+
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
|
| 56 |
+
def test_scalar_code_and_properties(self, scalar, code):
|
| 57 |
+
x = scalar()
|
| 58 |
+
expected = dict(strides=(), itemsize=x.dtype.itemsize, ndim=0,
|
| 59 |
+
shape=(), format=code, readonly=True)
|
| 60 |
+
|
| 61 |
+
mv_x = memoryview(x)
|
| 62 |
+
assert self._as_dict(mv_x) == expected
|
| 63 |
+
|
| 64 |
+
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
|
| 65 |
+
def test_scalar_buffers_readonly(self, scalar):
|
| 66 |
+
x = scalar()
|
| 67 |
+
with pytest.raises(BufferError, match="scalar buffer is readonly"):
|
| 68 |
+
get_buffer_info(x, ["WRITABLE"])
|
| 69 |
+
|
| 70 |
+
def test_void_scalar_structured_data(self):
|
| 71 |
+
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
|
| 72 |
+
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
|
| 73 |
+
assert_(isinstance(x, np.void))
|
| 74 |
+
mv_x = memoryview(x)
|
| 75 |
+
expected_size = 16 * np.dtype((np.str_, 1)).itemsize
|
| 76 |
+
expected_size += 2 * np.dtype(np.float64).itemsize
|
| 77 |
+
assert_equal(mv_x.itemsize, expected_size)
|
| 78 |
+
assert_equal(mv_x.ndim, 0)
|
| 79 |
+
assert_equal(mv_x.shape, ())
|
| 80 |
+
assert_equal(mv_x.strides, ())
|
| 81 |
+
assert_equal(mv_x.suboffsets, ())
|
| 82 |
+
|
| 83 |
+
# check scalar format string against ndarray format string
|
| 84 |
+
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
|
| 85 |
+
assert_(isinstance(a, np.ndarray))
|
| 86 |
+
mv_a = memoryview(a)
|
| 87 |
+
assert_equal(mv_x.itemsize, mv_a.itemsize)
|
| 88 |
+
assert_equal(mv_x.format, mv_a.format)
|
| 89 |
+
|
| 90 |
+
# Check that we do not allow writeable buffer export (technically
|
| 91 |
+
# we could allow it sometimes here...)
|
| 92 |
+
with pytest.raises(BufferError, match="scalar buffer is readonly"):
|
| 93 |
+
get_buffer_info(x, ["WRITABLE"])
|
| 94 |
+
|
| 95 |
+
def _as_dict(self, m):
|
| 96 |
+
return dict(strides=m.strides, shape=m.shape, itemsize=m.itemsize,
|
| 97 |
+
ndim=m.ndim, format=m.format, readonly=m.readonly)
|
| 98 |
+
|
| 99 |
+
def test_datetime_memoryview(self):
|
| 100 |
+
# gh-11656
|
| 101 |
+
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
|
| 102 |
+
|
| 103 |
+
dt1 = np.datetime64('2016-01-01')
|
| 104 |
+
dt2 = np.datetime64('2017-01-01')
|
| 105 |
+
expected = dict(strides=(1,), itemsize=1, ndim=1, shape=(8,),
|
| 106 |
+
format='B', readonly=True)
|
| 107 |
+
v = memoryview(dt1)
|
| 108 |
+
assert self._as_dict(v) == expected
|
| 109 |
+
|
| 110 |
+
v = memoryview(dt2 - dt1)
|
| 111 |
+
assert self._as_dict(v) == expected
|
| 112 |
+
|
| 113 |
+
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
|
| 114 |
+
a = np.empty(1, dt)
|
| 115 |
+
# Fails to create a PEP 3118 valid buffer
|
| 116 |
+
assert_raises((ValueError, BufferError), memoryview, a[0])
|
| 117 |
+
|
| 118 |
+
# Check that we do not allow writeable buffer export
|
| 119 |
+
with pytest.raises(BufferError, match="scalar buffer is readonly"):
|
| 120 |
+
get_buffer_info(dt1, ["WRITABLE"])
|
| 121 |
+
|
| 122 |
+
@pytest.mark.parametrize('s', [
|
| 123 |
+
pytest.param("\x32\x32", id="ascii"),
|
| 124 |
+
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
|
| 125 |
+
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
|
| 126 |
+
])
|
| 127 |
+
def test_str_ucs4(self, s):
|
| 128 |
+
s = np.str_(s) # only our subclass implements the buffer protocol
|
| 129 |
+
|
| 130 |
+
# all the same, characters always encode as ucs4
|
| 131 |
+
expected = dict(strides=(), itemsize=8, ndim=0, shape=(), format='2w',
|
| 132 |
+
readonly=True)
|
| 133 |
+
|
| 134 |
+
v = memoryview(s)
|
| 135 |
+
assert self._as_dict(v) == expected
|
| 136 |
+
|
| 137 |
+
# integers of the paltform-appropriate endianness
|
| 138 |
+
code_points = np.frombuffer(v, dtype='i4')
|
| 139 |
+
|
| 140 |
+
assert_equal(code_points, [ord(c) for c in s])
|
| 141 |
+
|
| 142 |
+
# Check that we do not allow writeable buffer export
|
| 143 |
+
with pytest.raises(BufferError, match="scalar buffer is readonly"):
|
| 144 |
+
get_buffer_info(s, ["WRITABLE"])
|
| 145 |
+
|
| 146 |
+
def test_user_scalar_fails_buffer(self):
|
| 147 |
+
r = rational(1)
|
| 148 |
+
with assert_raises(TypeError):
|
| 149 |
+
memoryview(r)
|
| 150 |
+
|
| 151 |
+
# Check that we do not allow writeable buffer export
|
| 152 |
+
with pytest.raises(BufferError, match="scalar buffer is readonly"):
|
| 153 |
+
get_buffer_info(r, ["WRITABLE"])
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_scalarprint.py
ADDED
|
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
""" Test printing of scalar types.
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
import code
|
| 5 |
+
import platform
|
| 6 |
+
import pytest
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
from tempfile import TemporaryFile
|
| 10 |
+
import numpy as np
|
| 11 |
+
from numpy.testing import assert_, assert_equal, assert_raises, IS_MUSL
|
| 12 |
+
|
| 13 |
+
class TestRealScalars:
|
| 14 |
+
def test_str(self):
|
| 15 |
+
svals = [0.0, -0.0, 1, -1, np.inf, -np.inf, np.nan]
|
| 16 |
+
styps = [np.float16, np.float32, np.float64, np.longdouble]
|
| 17 |
+
wanted = [
|
| 18 |
+
['0.0', '0.0', '0.0', '0.0' ],
|
| 19 |
+
['-0.0', '-0.0', '-0.0', '-0.0'],
|
| 20 |
+
['1.0', '1.0', '1.0', '1.0' ],
|
| 21 |
+
['-1.0', '-1.0', '-1.0', '-1.0'],
|
| 22 |
+
['inf', 'inf', 'inf', 'inf' ],
|
| 23 |
+
['-inf', '-inf', '-inf', '-inf'],
|
| 24 |
+
['nan', 'nan', 'nan', 'nan']]
|
| 25 |
+
|
| 26 |
+
for wants, val in zip(wanted, svals):
|
| 27 |
+
for want, styp in zip(wants, styps):
|
| 28 |
+
msg = 'for str({}({}))'.format(np.dtype(styp).name, repr(val))
|
| 29 |
+
assert_equal(str(styp(val)), want, err_msg=msg)
|
| 30 |
+
|
| 31 |
+
def test_scalar_cutoffs(self):
|
| 32 |
+
# test that both the str and repr of np.float64 behaves
|
| 33 |
+
# like python floats in python3.
|
| 34 |
+
def check(v):
|
| 35 |
+
assert_equal(str(np.float64(v)), str(v))
|
| 36 |
+
assert_equal(str(np.float64(v)), repr(v))
|
| 37 |
+
assert_equal(repr(np.float64(v)), f"np.float64({v!r})")
|
| 38 |
+
assert_equal(repr(np.float64(v)), f"np.float64({v})")
|
| 39 |
+
|
| 40 |
+
# check we use the same number of significant digits
|
| 41 |
+
check(1.12345678901234567890)
|
| 42 |
+
check(0.0112345678901234567890)
|
| 43 |
+
|
| 44 |
+
# check switch from scientific output to positional and back
|
| 45 |
+
check(1e-5)
|
| 46 |
+
check(1e-4)
|
| 47 |
+
check(1e15)
|
| 48 |
+
check(1e16)
|
| 49 |
+
|
| 50 |
+
def test_py2_float_print(self):
|
| 51 |
+
# gh-10753
|
| 52 |
+
# In python2, the python float type implements an obsolete method
|
| 53 |
+
# tp_print, which overrides tp_repr and tp_str when using "print" to
|
| 54 |
+
# output to a "real file" (ie, not a StringIO). Make sure we don't
|
| 55 |
+
# inherit it.
|
| 56 |
+
x = np.double(0.1999999999999)
|
| 57 |
+
with TemporaryFile('r+t') as f:
|
| 58 |
+
print(x, file=f)
|
| 59 |
+
f.seek(0)
|
| 60 |
+
output = f.read()
|
| 61 |
+
assert_equal(output, str(x) + '\n')
|
| 62 |
+
# In python2 the value float('0.1999999999999') prints with reduced
|
| 63 |
+
# precision as '0.2', but we want numpy's np.double('0.1999999999999')
|
| 64 |
+
# to print the unique value, '0.1999999999999'.
|
| 65 |
+
|
| 66 |
+
# gh-11031
|
| 67 |
+
# Only in the python2 interactive shell and when stdout is a "real"
|
| 68 |
+
# file, the output of the last command is printed to stdout without
|
| 69 |
+
# Py_PRINT_RAW (unlike the print statement) so `>>> x` and `>>> print
|
| 70 |
+
# x` are potentially different. Make sure they are the same. The only
|
| 71 |
+
# way I found to get prompt-like output is using an actual prompt from
|
| 72 |
+
# the 'code' module. Again, must use tempfile to get a "real" file.
|
| 73 |
+
|
| 74 |
+
# dummy user-input which enters one line and then ctrl-Ds.
|
| 75 |
+
def userinput():
|
| 76 |
+
yield 'np.sqrt(2)'
|
| 77 |
+
raise EOFError
|
| 78 |
+
gen = userinput()
|
| 79 |
+
input_func = lambda prompt="": next(gen)
|
| 80 |
+
|
| 81 |
+
with TemporaryFile('r+t') as fo, TemporaryFile('r+t') as fe:
|
| 82 |
+
orig_stdout, orig_stderr = sys.stdout, sys.stderr
|
| 83 |
+
sys.stdout, sys.stderr = fo, fe
|
| 84 |
+
|
| 85 |
+
code.interact(local={'np': np}, readfunc=input_func, banner='')
|
| 86 |
+
|
| 87 |
+
sys.stdout, sys.stderr = orig_stdout, orig_stderr
|
| 88 |
+
|
| 89 |
+
fo.seek(0)
|
| 90 |
+
capture = fo.read().strip()
|
| 91 |
+
|
| 92 |
+
assert_equal(capture, repr(np.sqrt(2)))
|
| 93 |
+
|
| 94 |
+
def test_dragon4(self):
|
| 95 |
+
# these tests are adapted from Ryan Juckett's dragon4 implementation,
|
| 96 |
+
# see dragon4.c for details.
|
| 97 |
+
|
| 98 |
+
fpos32 = lambda x, **k: np.format_float_positional(np.float32(x), **k)
|
| 99 |
+
fsci32 = lambda x, **k: np.format_float_scientific(np.float32(x), **k)
|
| 100 |
+
fpos64 = lambda x, **k: np.format_float_positional(np.float64(x), **k)
|
| 101 |
+
fsci64 = lambda x, **k: np.format_float_scientific(np.float64(x), **k)
|
| 102 |
+
|
| 103 |
+
preckwd = lambda prec: {'unique': False, 'precision': prec}
|
| 104 |
+
|
| 105 |
+
assert_equal(fpos32('1.0'), "1.")
|
| 106 |
+
assert_equal(fsci32('1.0'), "1.e+00")
|
| 107 |
+
assert_equal(fpos32('10.234'), "10.234")
|
| 108 |
+
assert_equal(fpos32('-10.234'), "-10.234")
|
| 109 |
+
assert_equal(fsci32('10.234'), "1.0234e+01")
|
| 110 |
+
assert_equal(fsci32('-10.234'), "-1.0234e+01")
|
| 111 |
+
assert_equal(fpos32('1000.0'), "1000.")
|
| 112 |
+
assert_equal(fpos32('1.0', precision=0), "1.")
|
| 113 |
+
assert_equal(fsci32('1.0', precision=0), "1.e+00")
|
| 114 |
+
assert_equal(fpos32('10.234', precision=0), "10.")
|
| 115 |
+
assert_equal(fpos32('-10.234', precision=0), "-10.")
|
| 116 |
+
assert_equal(fsci32('10.234', precision=0), "1.e+01")
|
| 117 |
+
assert_equal(fsci32('-10.234', precision=0), "-1.e+01")
|
| 118 |
+
assert_equal(fpos32('10.234', precision=2), "10.23")
|
| 119 |
+
assert_equal(fsci32('-10.234', precision=2), "-1.02e+01")
|
| 120 |
+
assert_equal(fsci64('9.9999999999999995e-08', **preckwd(16)),
|
| 121 |
+
'9.9999999999999995e-08')
|
| 122 |
+
assert_equal(fsci64('9.8813129168249309e-324', **preckwd(16)),
|
| 123 |
+
'9.8813129168249309e-324')
|
| 124 |
+
assert_equal(fsci64('9.9999999999999694e-311', **preckwd(16)),
|
| 125 |
+
'9.9999999999999694e-311')
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
# test rounding
|
| 129 |
+
# 3.1415927410 is closest float32 to np.pi
|
| 130 |
+
assert_equal(fpos32('3.14159265358979323846', **preckwd(10)),
|
| 131 |
+
"3.1415927410")
|
| 132 |
+
assert_equal(fsci32('3.14159265358979323846', **preckwd(10)),
|
| 133 |
+
"3.1415927410e+00")
|
| 134 |
+
assert_equal(fpos64('3.14159265358979323846', **preckwd(10)),
|
| 135 |
+
"3.1415926536")
|
| 136 |
+
assert_equal(fsci64('3.14159265358979323846', **preckwd(10)),
|
| 137 |
+
"3.1415926536e+00")
|
| 138 |
+
# 299792448 is closest float32 to 299792458
|
| 139 |
+
assert_equal(fpos32('299792458.0', **preckwd(5)), "299792448.00000")
|
| 140 |
+
assert_equal(fsci32('299792458.0', **preckwd(5)), "2.99792e+08")
|
| 141 |
+
assert_equal(fpos64('299792458.0', **preckwd(5)), "299792458.00000")
|
| 142 |
+
assert_equal(fsci64('299792458.0', **preckwd(5)), "2.99792e+08")
|
| 143 |
+
|
| 144 |
+
assert_equal(fpos32('3.14159265358979323846', **preckwd(25)),
|
| 145 |
+
"3.1415927410125732421875000")
|
| 146 |
+
assert_equal(fpos64('3.14159265358979323846', **preckwd(50)),
|
| 147 |
+
"3.14159265358979311599796346854418516159057617187500")
|
| 148 |
+
assert_equal(fpos64('3.14159265358979323846'), "3.141592653589793")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
# smallest numbers
|
| 152 |
+
assert_equal(fpos32(0.5**(126 + 23), unique=False, precision=149),
|
| 153 |
+
"0.00000000000000000000000000000000000000000000140129846432"
|
| 154 |
+
"4817070923729583289916131280261941876515771757068283889791"
|
| 155 |
+
"08268586060148663818836212158203125")
|
| 156 |
+
|
| 157 |
+
assert_equal(fpos64(5e-324, unique=False, precision=1074),
|
| 158 |
+
"0.00000000000000000000000000000000000000000000000000000000"
|
| 159 |
+
"0000000000000000000000000000000000000000000000000000000000"
|
| 160 |
+
"0000000000000000000000000000000000000000000000000000000000"
|
| 161 |
+
"0000000000000000000000000000000000000000000000000000000000"
|
| 162 |
+
"0000000000000000000000000000000000000000000000000000000000"
|
| 163 |
+
"0000000000000000000000000000000000049406564584124654417656"
|
| 164 |
+
"8792868221372365059802614324764425585682500675507270208751"
|
| 165 |
+
"8652998363616359923797965646954457177309266567103559397963"
|
| 166 |
+
"9877479601078187812630071319031140452784581716784898210368"
|
| 167 |
+
"8718636056998730723050006387409153564984387312473397273169"
|
| 168 |
+
"6151400317153853980741262385655911710266585566867681870395"
|
| 169 |
+
"6031062493194527159149245532930545654440112748012970999954"
|
| 170 |
+
"1931989409080416563324524757147869014726780159355238611550"
|
| 171 |
+
"1348035264934720193790268107107491703332226844753335720832"
|
| 172 |
+
"4319360923828934583680601060115061698097530783422773183292"
|
| 173 |
+
"4790498252473077637592724787465608477820373446969953364701"
|
| 174 |
+
"7972677717585125660551199131504891101451037862738167250955"
|
| 175 |
+
"8373897335989936648099411642057026370902792427675445652290"
|
| 176 |
+
"87538682506419718265533447265625")
|
| 177 |
+
|
| 178 |
+
# largest numbers
|
| 179 |
+
f32x = np.finfo(np.float32).max
|
| 180 |
+
assert_equal(fpos32(f32x, **preckwd(0)),
|
| 181 |
+
"340282346638528859811704183484516925440.")
|
| 182 |
+
assert_equal(fpos64(np.finfo(np.float64).max, **preckwd(0)),
|
| 183 |
+
"1797693134862315708145274237317043567980705675258449965989"
|
| 184 |
+
"1747680315726078002853876058955863276687817154045895351438"
|
| 185 |
+
"2464234321326889464182768467546703537516986049910576551282"
|
| 186 |
+
"0762454900903893289440758685084551339423045832369032229481"
|
| 187 |
+
"6580855933212334827479782620414472316873817718091929988125"
|
| 188 |
+
"0404026184124858368.")
|
| 189 |
+
# Warning: In unique mode only the integer digits necessary for
|
| 190 |
+
# uniqueness are computed, the rest are 0.
|
| 191 |
+
assert_equal(fpos32(f32x),
|
| 192 |
+
"340282350000000000000000000000000000000.")
|
| 193 |
+
|
| 194 |
+
# Further tests of zero-padding vs rounding in different combinations
|
| 195 |
+
# of unique, fractional, precision, min_digits
|
| 196 |
+
# precision can only reduce digits, not add them.
|
| 197 |
+
# min_digits can only extend digits, not reduce them.
|
| 198 |
+
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=0),
|
| 199 |
+
"340282350000000000000000000000000000000.")
|
| 200 |
+
assert_equal(fpos32(f32x, unique=True, fractional=True, precision=4),
|
| 201 |
+
"340282350000000000000000000000000000000.")
|
| 202 |
+
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=0),
|
| 203 |
+
"340282346638528859811704183484516925440.")
|
| 204 |
+
assert_equal(fpos32(f32x, unique=True, fractional=True, min_digits=4),
|
| 205 |
+
"340282346638528859811704183484516925440.0000")
|
| 206 |
+
assert_equal(fpos32(f32x, unique=True, fractional=True,
|
| 207 |
+
min_digits=4, precision=4),
|
| 208 |
+
"340282346638528859811704183484516925440.0000")
|
| 209 |
+
assert_raises(ValueError, fpos32, f32x, unique=True, fractional=False,
|
| 210 |
+
precision=0)
|
| 211 |
+
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=4),
|
| 212 |
+
"340300000000000000000000000000000000000.")
|
| 213 |
+
assert_equal(fpos32(f32x, unique=True, fractional=False, precision=20),
|
| 214 |
+
"340282350000000000000000000000000000000.")
|
| 215 |
+
assert_equal(fpos32(f32x, unique=True, fractional=False, min_digits=4),
|
| 216 |
+
"340282350000000000000000000000000000000.")
|
| 217 |
+
assert_equal(fpos32(f32x, unique=True, fractional=False,
|
| 218 |
+
min_digits=20),
|
| 219 |
+
"340282346638528859810000000000000000000.")
|
| 220 |
+
assert_equal(fpos32(f32x, unique=True, fractional=False,
|
| 221 |
+
min_digits=15),
|
| 222 |
+
"340282346638529000000000000000000000000.")
|
| 223 |
+
assert_equal(fpos32(f32x, unique=False, fractional=False, precision=4),
|
| 224 |
+
"340300000000000000000000000000000000000.")
|
| 225 |
+
# test that unique rounding is preserved when precision is supplied
|
| 226 |
+
# but no extra digits need to be printed (gh-18609)
|
| 227 |
+
a = np.float64.fromhex('-1p-97')
|
| 228 |
+
assert_equal(fsci64(a, unique=True), '-6.310887241768095e-30')
|
| 229 |
+
assert_equal(fsci64(a, unique=False, precision=15),
|
| 230 |
+
'-6.310887241768094e-30')
|
| 231 |
+
assert_equal(fsci64(a, unique=True, precision=15),
|
| 232 |
+
'-6.310887241768095e-30')
|
| 233 |
+
assert_equal(fsci64(a, unique=True, min_digits=15),
|
| 234 |
+
'-6.310887241768095e-30')
|
| 235 |
+
assert_equal(fsci64(a, unique=True, precision=15, min_digits=15),
|
| 236 |
+
'-6.310887241768095e-30')
|
| 237 |
+
# adds/remove digits in unique mode with unbiased rnding
|
| 238 |
+
assert_equal(fsci64(a, unique=True, precision=14),
|
| 239 |
+
'-6.31088724176809e-30')
|
| 240 |
+
assert_equal(fsci64(a, unique=True, min_digits=16),
|
| 241 |
+
'-6.3108872417680944e-30')
|
| 242 |
+
assert_equal(fsci64(a, unique=True, precision=16),
|
| 243 |
+
'-6.310887241768095e-30')
|
| 244 |
+
assert_equal(fsci64(a, unique=True, min_digits=14),
|
| 245 |
+
'-6.310887241768095e-30')
|
| 246 |
+
# test min_digits in unique mode with different rounding cases
|
| 247 |
+
assert_equal(fsci64('1e120', min_digits=3), '1.000e+120')
|
| 248 |
+
assert_equal(fsci64('1e100', min_digits=3), '1.000e+100')
|
| 249 |
+
|
| 250 |
+
# test trailing zeros
|
| 251 |
+
assert_equal(fpos32('1.0', unique=False, precision=3), "1.000")
|
| 252 |
+
assert_equal(fpos64('1.0', unique=False, precision=3), "1.000")
|
| 253 |
+
assert_equal(fsci32('1.0', unique=False, precision=3), "1.000e+00")
|
| 254 |
+
assert_equal(fsci64('1.0', unique=False, precision=3), "1.000e+00")
|
| 255 |
+
assert_equal(fpos32('1.5', unique=False, precision=3), "1.500")
|
| 256 |
+
assert_equal(fpos64('1.5', unique=False, precision=3), "1.500")
|
| 257 |
+
assert_equal(fsci32('1.5', unique=False, precision=3), "1.500e+00")
|
| 258 |
+
assert_equal(fsci64('1.5', unique=False, precision=3), "1.500e+00")
|
| 259 |
+
# gh-10713
|
| 260 |
+
assert_equal(fpos64('324', unique=False, precision=5,
|
| 261 |
+
fractional=False), "324.00")
|
| 262 |
+
|
| 263 |
+
def test_dragon4_interface(self):
|
| 264 |
+
tps = [np.float16, np.float32, np.float64]
|
| 265 |
+
# test is flaky for musllinux on np.float128
|
| 266 |
+
if hasattr(np, 'float128') and not IS_MUSL:
|
| 267 |
+
tps.append(np.float128)
|
| 268 |
+
|
| 269 |
+
fpos = np.format_float_positional
|
| 270 |
+
fsci = np.format_float_scientific
|
| 271 |
+
|
| 272 |
+
for tp in tps:
|
| 273 |
+
# test padding
|
| 274 |
+
assert_equal(fpos(tp('1.0'), pad_left=4, pad_right=4), " 1. ")
|
| 275 |
+
assert_equal(fpos(tp('-1.0'), pad_left=4, pad_right=4), " -1. ")
|
| 276 |
+
assert_equal(fpos(tp('-10.2'),
|
| 277 |
+
pad_left=4, pad_right=4), " -10.2 ")
|
| 278 |
+
|
| 279 |
+
# test exp_digits
|
| 280 |
+
assert_equal(fsci(tp('1.23e1'), exp_digits=5), "1.23e+00001")
|
| 281 |
+
|
| 282 |
+
# test fixed (non-unique) mode
|
| 283 |
+
assert_equal(fpos(tp('1.0'), unique=False, precision=4), "1.0000")
|
| 284 |
+
assert_equal(fsci(tp('1.0'), unique=False, precision=4),
|
| 285 |
+
"1.0000e+00")
|
| 286 |
+
|
| 287 |
+
# test trimming
|
| 288 |
+
# trim of 'k' or '.' only affects non-unique mode, since unique
|
| 289 |
+
# mode will not output trailing 0s.
|
| 290 |
+
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='k'),
|
| 291 |
+
"1.0000")
|
| 292 |
+
|
| 293 |
+
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='.'),
|
| 294 |
+
"1.")
|
| 295 |
+
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='.'),
|
| 296 |
+
"1.2" if tp != np.float16 else "1.2002")
|
| 297 |
+
|
| 298 |
+
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='0'),
|
| 299 |
+
"1.0")
|
| 300 |
+
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='0'),
|
| 301 |
+
"1.2" if tp != np.float16 else "1.2002")
|
| 302 |
+
assert_equal(fpos(tp('1.'), trim='0'), "1.0")
|
| 303 |
+
|
| 304 |
+
assert_equal(fpos(tp('1.'), unique=False, precision=4, trim='-'),
|
| 305 |
+
"1")
|
| 306 |
+
assert_equal(fpos(tp('1.2'), unique=False, precision=4, trim='-'),
|
| 307 |
+
"1.2" if tp != np.float16 else "1.2002")
|
| 308 |
+
assert_equal(fpos(tp('1.'), trim='-'), "1")
|
| 309 |
+
assert_equal(fpos(tp('1.001'), precision=1, trim='-'), "1")
|
| 310 |
+
|
| 311 |
+
@pytest.mark.skipif(not platform.machine().startswith("ppc64"),
|
| 312 |
+
reason="only applies to ppc float128 values")
|
| 313 |
+
def test_ppc64_ibm_double_double128(self):
|
| 314 |
+
# check that the precision decreases once we get into the subnormal
|
| 315 |
+
# range. Unlike float64, this starts around 1e-292 instead of 1e-308,
|
| 316 |
+
# which happens when the first double is normal and the second is
|
| 317 |
+
# subnormal.
|
| 318 |
+
x = np.float128('2.123123123123123123123123123123123e-286')
|
| 319 |
+
got = [str(x/np.float128('2e' + str(i))) for i in range(0,40)]
|
| 320 |
+
expected = [
|
| 321 |
+
"1.06156156156156156156156156156157e-286",
|
| 322 |
+
"1.06156156156156156156156156156158e-287",
|
| 323 |
+
"1.06156156156156156156156156156159e-288",
|
| 324 |
+
"1.0615615615615615615615615615616e-289",
|
| 325 |
+
"1.06156156156156156156156156156157e-290",
|
| 326 |
+
"1.06156156156156156156156156156156e-291",
|
| 327 |
+
"1.0615615615615615615615615615616e-292",
|
| 328 |
+
"1.0615615615615615615615615615615e-293",
|
| 329 |
+
"1.061561561561561561561561561562e-294",
|
| 330 |
+
"1.06156156156156156156156156155e-295",
|
| 331 |
+
"1.0615615615615615615615615616e-296",
|
| 332 |
+
"1.06156156156156156156156156e-297",
|
| 333 |
+
"1.06156156156156156156156157e-298",
|
| 334 |
+
"1.0615615615615615615615616e-299",
|
| 335 |
+
"1.06156156156156156156156e-300",
|
| 336 |
+
"1.06156156156156156156155e-301",
|
| 337 |
+
"1.0615615615615615615616e-302",
|
| 338 |
+
"1.061561561561561561562e-303",
|
| 339 |
+
"1.06156156156156156156e-304",
|
| 340 |
+
"1.0615615615615615618e-305",
|
| 341 |
+
"1.06156156156156156e-306",
|
| 342 |
+
"1.06156156156156157e-307",
|
| 343 |
+
"1.0615615615615616e-308",
|
| 344 |
+
"1.06156156156156e-309",
|
| 345 |
+
"1.06156156156157e-310",
|
| 346 |
+
"1.0615615615616e-311",
|
| 347 |
+
"1.06156156156e-312",
|
| 348 |
+
"1.06156156154e-313",
|
| 349 |
+
"1.0615615616e-314",
|
| 350 |
+
"1.06156156e-315",
|
| 351 |
+
"1.06156155e-316",
|
| 352 |
+
"1.061562e-317",
|
| 353 |
+
"1.06156e-318",
|
| 354 |
+
"1.06155e-319",
|
| 355 |
+
"1.0617e-320",
|
| 356 |
+
"1.06e-321",
|
| 357 |
+
"1.04e-322",
|
| 358 |
+
"1e-323",
|
| 359 |
+
"0.0",
|
| 360 |
+
"0.0"]
|
| 361 |
+
assert_equal(got, expected)
|
| 362 |
+
|
| 363 |
+
# Note: we follow glibc behavior, but it (or gcc) might not be right.
|
| 364 |
+
# In particular we can get two values that print the same but are not
|
| 365 |
+
# equal:
|
| 366 |
+
a = np.float128('2')/np.float128('3')
|
| 367 |
+
b = np.float128(str(a))
|
| 368 |
+
assert_equal(str(a), str(b))
|
| 369 |
+
assert_(a != b)
|
| 370 |
+
|
| 371 |
+
def float32_roundtrip(self):
|
| 372 |
+
# gh-9360
|
| 373 |
+
x = np.float32(1024 - 2**-14)
|
| 374 |
+
y = np.float32(1024 - 2**-13)
|
| 375 |
+
assert_(repr(x) != repr(y))
|
| 376 |
+
assert_equal(np.float32(repr(x)), x)
|
| 377 |
+
assert_equal(np.float32(repr(y)), y)
|
| 378 |
+
|
| 379 |
+
def float64_vs_python(self):
|
| 380 |
+
# gh-2643, gh-6136, gh-6908
|
| 381 |
+
assert_equal(repr(np.float64(0.1)), repr(0.1))
|
| 382 |
+
assert_(repr(np.float64(0.20000000000000004)) != repr(0.2))
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_simd_module.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
from numpy._core._simd import targets
|
| 3 |
+
"""
|
| 4 |
+
This testing unit only for checking the sanity of common functionality,
|
| 5 |
+
therefore all we need is just to take one submodule that represents any
|
| 6 |
+
of enabled SIMD extensions to run the test on it and the second submodule
|
| 7 |
+
required to run only one check related to the possibility of mixing
|
| 8 |
+
the data types among each submodule.
|
| 9 |
+
"""
|
| 10 |
+
npyvs = [npyv_mod for npyv_mod in targets.values() if npyv_mod and npyv_mod.simd]
|
| 11 |
+
npyv, npyv2 = (npyvs + [None, None])[:2]
|
| 12 |
+
|
| 13 |
+
unsigned_sfx = ["u8", "u16", "u32", "u64"]
|
| 14 |
+
signed_sfx = ["s8", "s16", "s32", "s64"]
|
| 15 |
+
fp_sfx = []
|
| 16 |
+
if npyv and npyv.simd_f32:
|
| 17 |
+
fp_sfx.append("f32")
|
| 18 |
+
if npyv and npyv.simd_f64:
|
| 19 |
+
fp_sfx.append("f64")
|
| 20 |
+
|
| 21 |
+
int_sfx = unsigned_sfx + signed_sfx
|
| 22 |
+
all_sfx = unsigned_sfx + int_sfx
|
| 23 |
+
|
| 24 |
+
@pytest.mark.skipif(not npyv, reason="could not find any SIMD extension with NPYV support")
|
| 25 |
+
class Test_SIMD_MODULE:
|
| 26 |
+
|
| 27 |
+
@pytest.mark.parametrize('sfx', all_sfx)
|
| 28 |
+
def test_num_lanes(self, sfx):
|
| 29 |
+
nlanes = getattr(npyv, "nlanes_" + sfx)
|
| 30 |
+
vector = getattr(npyv, "setall_" + sfx)(1)
|
| 31 |
+
assert len(vector) == nlanes
|
| 32 |
+
|
| 33 |
+
@pytest.mark.parametrize('sfx', all_sfx)
|
| 34 |
+
def test_type_name(self, sfx):
|
| 35 |
+
vector = getattr(npyv, "setall_" + sfx)(1)
|
| 36 |
+
assert vector.__name__ == "npyv_" + sfx
|
| 37 |
+
|
| 38 |
+
def test_raises(self):
|
| 39 |
+
a, b = [npyv.setall_u32(1)]*2
|
| 40 |
+
for sfx in all_sfx:
|
| 41 |
+
vcb = lambda intrin: getattr(npyv, f"{intrin}_{sfx}")
|
| 42 |
+
pytest.raises(TypeError, vcb("add"), a)
|
| 43 |
+
pytest.raises(TypeError, vcb("add"), a, b, a)
|
| 44 |
+
pytest.raises(TypeError, vcb("setall"))
|
| 45 |
+
pytest.raises(TypeError, vcb("setall"), [1])
|
| 46 |
+
pytest.raises(TypeError, vcb("load"), 1)
|
| 47 |
+
pytest.raises(ValueError, vcb("load"), [1])
|
| 48 |
+
pytest.raises(ValueError, vcb("store"), [1], getattr(npyv, f"reinterpret_{sfx}_u32")(a))
|
| 49 |
+
|
| 50 |
+
@pytest.mark.skipif(not npyv2, reason=(
|
| 51 |
+
"could not find a second SIMD extension with NPYV support"
|
| 52 |
+
))
|
| 53 |
+
def test_nomix(self):
|
| 54 |
+
# mix among submodules isn't allowed
|
| 55 |
+
a = npyv.setall_u32(1)
|
| 56 |
+
a2 = npyv2.setall_u32(1)
|
| 57 |
+
pytest.raises(TypeError, npyv.add_u32, a2, a2)
|
| 58 |
+
pytest.raises(TypeError, npyv2.add_u32, a, a)
|
| 59 |
+
|
| 60 |
+
@pytest.mark.parametrize('sfx', unsigned_sfx)
|
| 61 |
+
def test_unsigned_overflow(self, sfx):
|
| 62 |
+
nlanes = getattr(npyv, "nlanes_" + sfx)
|
| 63 |
+
maxu = (1 << int(sfx[1:])) - 1
|
| 64 |
+
maxu_72 = (1 << 72) - 1
|
| 65 |
+
lane = getattr(npyv, "setall_" + sfx)(maxu_72)[0]
|
| 66 |
+
assert lane == maxu
|
| 67 |
+
lanes = getattr(npyv, "load_" + sfx)([maxu_72] * nlanes)
|
| 68 |
+
assert lanes == [maxu] * nlanes
|
| 69 |
+
lane = getattr(npyv, "setall_" + sfx)(-1)[0]
|
| 70 |
+
assert lane == maxu
|
| 71 |
+
lanes = getattr(npyv, "load_" + sfx)([-1] * nlanes)
|
| 72 |
+
assert lanes == [maxu] * nlanes
|
| 73 |
+
|
| 74 |
+
@pytest.mark.parametrize('sfx', signed_sfx)
|
| 75 |
+
def test_signed_overflow(self, sfx):
|
| 76 |
+
nlanes = getattr(npyv, "nlanes_" + sfx)
|
| 77 |
+
maxs_72 = (1 << 71) - 1
|
| 78 |
+
lane = getattr(npyv, "setall_" + sfx)(maxs_72)[0]
|
| 79 |
+
assert lane == -1
|
| 80 |
+
lanes = getattr(npyv, "load_" + sfx)([maxs_72] * nlanes)
|
| 81 |
+
assert lanes == [-1] * nlanes
|
| 82 |
+
mins_72 = -1 << 71
|
| 83 |
+
lane = getattr(npyv, "setall_" + sfx)(mins_72)[0]
|
| 84 |
+
assert lane == 0
|
| 85 |
+
lanes = getattr(npyv, "load_" + sfx)([mins_72] * nlanes)
|
| 86 |
+
assert lanes == [0] * nlanes
|
| 87 |
+
|
| 88 |
+
def test_truncate_f32(self):
|
| 89 |
+
if not npyv.simd_f32:
|
| 90 |
+
pytest.skip("F32 isn't support by the SIMD extension")
|
| 91 |
+
f32 = npyv.setall_f32(0.1)[0]
|
| 92 |
+
assert f32 != 0.1
|
| 93 |
+
assert round(f32, 1) == 0.1
|
| 94 |
+
|
| 95 |
+
def test_compare(self):
|
| 96 |
+
data_range = range(0, npyv.nlanes_u32)
|
| 97 |
+
vdata = npyv.load_u32(data_range)
|
| 98 |
+
assert vdata == list(data_range)
|
| 99 |
+
assert vdata == tuple(data_range)
|
| 100 |
+
for i in data_range:
|
| 101 |
+
assert vdata[i] == data_range[i]
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_stringdtype.py
ADDED
|
@@ -0,0 +1,1813 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import concurrent.futures
|
| 2 |
+
import itertools
|
| 3 |
+
import os
|
| 4 |
+
import pickle
|
| 5 |
+
import string
|
| 6 |
+
import sys
|
| 7 |
+
import tempfile
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
import pytest
|
| 11 |
+
|
| 12 |
+
from numpy.dtypes import StringDType
|
| 13 |
+
from numpy._core.tests._natype import pd_NA
|
| 14 |
+
from numpy.testing import assert_array_equal, IS_WASM, IS_PYPY
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
@pytest.fixture
|
| 18 |
+
def string_list():
|
| 19 |
+
return ["abc", "def", "ghi" * 10, "A¢☃€ 😊" * 100, "Abc" * 1000, "DEF"]
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@pytest.fixture
|
| 23 |
+
def random_string_list():
|
| 24 |
+
chars = list(string.ascii_letters + string.digits)
|
| 25 |
+
chars = np.array(chars, dtype="U1")
|
| 26 |
+
ret = np.random.choice(chars, size=100 * 10, replace=True)
|
| 27 |
+
return ret.view("U100")
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
@pytest.fixture(params=[True, False])
|
| 31 |
+
def coerce(request):
|
| 32 |
+
return request.param
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@pytest.fixture(
|
| 36 |
+
params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
|
| 37 |
+
ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
|
| 38 |
+
)
|
| 39 |
+
def na_object(request):
|
| 40 |
+
return request.param
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def get_dtype(na_object, coerce=True):
|
| 44 |
+
# explicit is check for pd_NA because != with pd_NA returns pd_NA
|
| 45 |
+
if na_object is pd_NA or na_object != "unset":
|
| 46 |
+
return StringDType(na_object=na_object, coerce=coerce)
|
| 47 |
+
else:
|
| 48 |
+
return StringDType(coerce=coerce)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
@pytest.fixture()
|
| 52 |
+
def dtype(na_object, coerce):
|
| 53 |
+
return get_dtype(na_object, coerce)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
# second copy for cast tests to do a cartesian product over dtypes
|
| 57 |
+
@pytest.fixture(params=[True, False])
|
| 58 |
+
def coerce2(request):
|
| 59 |
+
return request.param
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@pytest.fixture(
|
| 63 |
+
params=["unset", None, pd_NA, np.nan, float("nan"), "__nan__"],
|
| 64 |
+
ids=["unset", "None", "pandas.NA", "np.nan", "float('nan')", "string nan"],
|
| 65 |
+
)
|
| 66 |
+
def na_object2(request):
|
| 67 |
+
return request.param
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
@pytest.fixture()
|
| 71 |
+
def dtype2(na_object2, coerce2):
|
| 72 |
+
# explicit is check for pd_NA because != with pd_NA returns pd_NA
|
| 73 |
+
if na_object2 is pd_NA or na_object2 != "unset":
|
| 74 |
+
return StringDType(na_object=na_object2, coerce=coerce2)
|
| 75 |
+
else:
|
| 76 |
+
return StringDType(coerce=coerce2)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def test_dtype_creation():
|
| 80 |
+
hashes = set()
|
| 81 |
+
dt = StringDType()
|
| 82 |
+
assert not hasattr(dt, "na_object") and dt.coerce is True
|
| 83 |
+
hashes.add(hash(dt))
|
| 84 |
+
|
| 85 |
+
dt = StringDType(na_object=None)
|
| 86 |
+
assert dt.na_object is None and dt.coerce is True
|
| 87 |
+
hashes.add(hash(dt))
|
| 88 |
+
|
| 89 |
+
dt = StringDType(coerce=False)
|
| 90 |
+
assert not hasattr(dt, "na_object") and dt.coerce is False
|
| 91 |
+
hashes.add(hash(dt))
|
| 92 |
+
|
| 93 |
+
dt = StringDType(na_object=None, coerce=False)
|
| 94 |
+
assert dt.na_object is None and dt.coerce is False
|
| 95 |
+
hashes.add(hash(dt))
|
| 96 |
+
|
| 97 |
+
assert len(hashes) == 4
|
| 98 |
+
|
| 99 |
+
dt = np.dtype("T")
|
| 100 |
+
assert dt == StringDType()
|
| 101 |
+
assert dt.kind == "T"
|
| 102 |
+
assert dt.char == "T"
|
| 103 |
+
|
| 104 |
+
hashes.add(hash(dt))
|
| 105 |
+
assert len(hashes) == 4
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def test_dtype_equality(dtype):
|
| 109 |
+
assert dtype == dtype
|
| 110 |
+
for ch in "SU":
|
| 111 |
+
assert dtype != np.dtype(ch)
|
| 112 |
+
assert dtype != np.dtype(f"{ch}8")
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def test_dtype_repr(dtype):
|
| 116 |
+
if not hasattr(dtype, "na_object") and dtype.coerce:
|
| 117 |
+
assert repr(dtype) == "StringDType()"
|
| 118 |
+
elif dtype.coerce:
|
| 119 |
+
assert repr(dtype) == f"StringDType(na_object={dtype.na_object!r})"
|
| 120 |
+
elif not hasattr(dtype, "na_object"):
|
| 121 |
+
assert repr(dtype) == "StringDType(coerce=False)"
|
| 122 |
+
else:
|
| 123 |
+
assert (
|
| 124 |
+
repr(dtype)
|
| 125 |
+
== f"StringDType(na_object={dtype.na_object!r}, coerce=False)"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def test_create_with_na(dtype):
|
| 130 |
+
if not hasattr(dtype, "na_object"):
|
| 131 |
+
pytest.skip("does not have an na object")
|
| 132 |
+
na_val = dtype.na_object
|
| 133 |
+
string_list = ["hello", na_val, "world"]
|
| 134 |
+
arr = np.array(string_list, dtype=dtype)
|
| 135 |
+
assert str(arr) == "[" + " ".join([repr(s) for s in string_list]) + "]"
|
| 136 |
+
assert arr[1] is dtype.na_object
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
@pytest.mark.parametrize("i", list(range(5)))
|
| 140 |
+
def test_set_replace_na(i):
|
| 141 |
+
# Test strings of various lengths can be set to NaN and then replaced.
|
| 142 |
+
s_empty = ""
|
| 143 |
+
s_short = "0123456789"
|
| 144 |
+
s_medium = "abcdefghijklmnopqrstuvwxyz"
|
| 145 |
+
s_long = "-=+" * 100
|
| 146 |
+
strings = [s_medium, s_empty, s_short, s_medium, s_long]
|
| 147 |
+
a = np.array(strings, StringDType(na_object=np.nan))
|
| 148 |
+
for s in [a[i], s_medium+s_short, s_short, s_empty, s_long]:
|
| 149 |
+
a[i] = np.nan
|
| 150 |
+
assert np.isnan(a[i])
|
| 151 |
+
a[i] = s
|
| 152 |
+
assert a[i] == s
|
| 153 |
+
assert_array_equal(a, strings[:i] + [s] + strings[i+1:])
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def test_null_roundtripping():
|
| 157 |
+
data = ["hello\0world", "ABC\0DEF\0\0"]
|
| 158 |
+
arr = np.array(data, dtype="T")
|
| 159 |
+
assert data[0] == arr[0]
|
| 160 |
+
assert data[1] == arr[1]
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
def test_string_too_large_error():
|
| 164 |
+
arr = np.array(["a", "b", "c"], dtype=StringDType())
|
| 165 |
+
with pytest.raises(MemoryError):
|
| 166 |
+
arr * (2**63 - 2)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@pytest.mark.parametrize(
|
| 170 |
+
"data",
|
| 171 |
+
[
|
| 172 |
+
["abc", "def", "ghi"],
|
| 173 |
+
["🤣", "📵", "😰"],
|
| 174 |
+
["🚜", "🙃", "😾"],
|
| 175 |
+
["😹", "🚠", "🚌"],
|
| 176 |
+
],
|
| 177 |
+
)
|
| 178 |
+
def test_array_creation_utf8(dtype, data):
|
| 179 |
+
arr = np.array(data, dtype=dtype)
|
| 180 |
+
assert str(arr) == "[" + " ".join(["'" + str(d) + "'" for d in data]) + "]"
|
| 181 |
+
assert arr.dtype == dtype
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@pytest.mark.parametrize(
|
| 185 |
+
"data",
|
| 186 |
+
[
|
| 187 |
+
[1, 2, 3],
|
| 188 |
+
[b"abc", b"def", b"ghi"],
|
| 189 |
+
[object, object, object],
|
| 190 |
+
],
|
| 191 |
+
)
|
| 192 |
+
def test_scalars_string_conversion(data, dtype):
|
| 193 |
+
if dtype.coerce:
|
| 194 |
+
assert_array_equal(
|
| 195 |
+
np.array(data, dtype=dtype),
|
| 196 |
+
np.array([str(d) for d in data], dtype=dtype),
|
| 197 |
+
)
|
| 198 |
+
else:
|
| 199 |
+
with pytest.raises(ValueError):
|
| 200 |
+
np.array(data, dtype=dtype)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
@pytest.mark.parametrize(
|
| 204 |
+
("strings"),
|
| 205 |
+
[
|
| 206 |
+
["this", "is", "an", "array"],
|
| 207 |
+
["€", "", "😊"],
|
| 208 |
+
["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"],
|
| 209 |
+
],
|
| 210 |
+
)
|
| 211 |
+
def test_self_casts(dtype, dtype2, strings):
|
| 212 |
+
if hasattr(dtype, "na_object"):
|
| 213 |
+
strings = strings + [dtype.na_object]
|
| 214 |
+
elif hasattr(dtype2, "na_object"):
|
| 215 |
+
strings = strings + [""]
|
| 216 |
+
arr = np.array(strings, dtype=dtype)
|
| 217 |
+
newarr = arr.astype(dtype2)
|
| 218 |
+
|
| 219 |
+
if hasattr(dtype, "na_object") and not hasattr(dtype2, "na_object"):
|
| 220 |
+
assert newarr[-1] == str(dtype.na_object)
|
| 221 |
+
with pytest.raises(TypeError):
|
| 222 |
+
arr.astype(dtype2, casting="safe")
|
| 223 |
+
elif hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"):
|
| 224 |
+
assert newarr[-1] is dtype2.na_object
|
| 225 |
+
arr.astype(dtype2, casting="safe")
|
| 226 |
+
elif hasattr(dtype2, "na_object"):
|
| 227 |
+
assert newarr[-1] == ""
|
| 228 |
+
arr.astype(dtype2, casting="safe")
|
| 229 |
+
else:
|
| 230 |
+
arr.astype(dtype2, casting="safe")
|
| 231 |
+
|
| 232 |
+
if hasattr(dtype, "na_object") and hasattr(dtype2, "na_object"):
|
| 233 |
+
na1 = dtype.na_object
|
| 234 |
+
na2 = dtype2.na_object
|
| 235 |
+
if (na1 is not na2 and
|
| 236 |
+
# check for pd_NA first because bool(pd_NA) is an error
|
| 237 |
+
((na1 is pd_NA or na2 is pd_NA) or
|
| 238 |
+
# the second check is a NaN check, spelled this way
|
| 239 |
+
# to avoid errors from math.isnan and np.isnan
|
| 240 |
+
(na1 != na2 and not (na1 != na1 and na2 != na2)))):
|
| 241 |
+
with pytest.raises(TypeError):
|
| 242 |
+
arr[:-1] == newarr[:-1]
|
| 243 |
+
return
|
| 244 |
+
assert_array_equal(arr[:-1], newarr[:-1])
|
| 245 |
+
|
| 246 |
+
|
| 247 |
+
@pytest.mark.parametrize(
|
| 248 |
+
("strings"),
|
| 249 |
+
[
|
| 250 |
+
["this", "is", "an", "array"],
|
| 251 |
+
["€", "", "😊"],
|
| 252 |
+
["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"],
|
| 253 |
+
],
|
| 254 |
+
)
|
| 255 |
+
class TestStringLikeCasts:
|
| 256 |
+
def test_unicode_casts(self, dtype, strings):
|
| 257 |
+
arr = np.array(strings, dtype=np.str_).astype(dtype)
|
| 258 |
+
expected = np.array(strings, dtype=dtype)
|
| 259 |
+
assert_array_equal(arr, expected)
|
| 260 |
+
|
| 261 |
+
arr_as_U8 = expected.astype("U8")
|
| 262 |
+
assert_array_equal(arr_as_U8, np.array(strings, dtype="U8"))
|
| 263 |
+
assert_array_equal(arr_as_U8.astype(dtype), arr)
|
| 264 |
+
arr_as_U3 = expected.astype("U3")
|
| 265 |
+
assert_array_equal(arr_as_U3, np.array(strings, dtype="U3"))
|
| 266 |
+
assert_array_equal(
|
| 267 |
+
arr_as_U3.astype(dtype),
|
| 268 |
+
np.array([s[:3] for s in strings], dtype=dtype),
|
| 269 |
+
)
|
| 270 |
+
|
| 271 |
+
def test_void_casts(self, dtype, strings):
|
| 272 |
+
sarr = np.array(strings, dtype=dtype)
|
| 273 |
+
utf8_bytes = [s.encode("utf-8") for s in strings]
|
| 274 |
+
void_dtype = f"V{max([len(s) for s in utf8_bytes])}"
|
| 275 |
+
varr = np.array(utf8_bytes, dtype=void_dtype)
|
| 276 |
+
assert_array_equal(varr, sarr.astype(void_dtype))
|
| 277 |
+
assert_array_equal(varr.astype(dtype), sarr)
|
| 278 |
+
|
| 279 |
+
def test_bytes_casts(self, dtype, strings):
|
| 280 |
+
sarr = np.array(strings, dtype=dtype)
|
| 281 |
+
try:
|
| 282 |
+
utf8_bytes = [s.encode("ascii") for s in strings]
|
| 283 |
+
bytes_dtype = f"S{max([len(s) for s in utf8_bytes])}"
|
| 284 |
+
barr = np.array(utf8_bytes, dtype=bytes_dtype)
|
| 285 |
+
assert_array_equal(barr, sarr.astype(bytes_dtype))
|
| 286 |
+
assert_array_equal(barr.astype(dtype), sarr)
|
| 287 |
+
except UnicodeEncodeError:
|
| 288 |
+
with pytest.raises(UnicodeEncodeError):
|
| 289 |
+
sarr.astype("S20")
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
def test_additional_unicode_cast(random_string_list, dtype):
|
| 293 |
+
arr = np.array(random_string_list, dtype=dtype)
|
| 294 |
+
# test that this short-circuits correctly
|
| 295 |
+
assert_array_equal(arr, arr.astype(arr.dtype))
|
| 296 |
+
# tests the casts via the comparison promoter
|
| 297 |
+
assert_array_equal(arr, arr.astype(random_string_list.dtype))
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def test_insert_scalar(dtype, string_list):
|
| 301 |
+
"""Test that inserting a scalar works."""
|
| 302 |
+
arr = np.array(string_list, dtype=dtype)
|
| 303 |
+
scalar_instance = "what"
|
| 304 |
+
arr[1] = scalar_instance
|
| 305 |
+
assert_array_equal(
|
| 306 |
+
arr,
|
| 307 |
+
np.array(string_list[:1] + ["what"] + string_list[2:], dtype=dtype),
|
| 308 |
+
)
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
comparison_operators = [
|
| 312 |
+
np.equal,
|
| 313 |
+
np.not_equal,
|
| 314 |
+
np.greater,
|
| 315 |
+
np.greater_equal,
|
| 316 |
+
np.less,
|
| 317 |
+
np.less_equal,
|
| 318 |
+
]
|
| 319 |
+
|
| 320 |
+
|
| 321 |
+
@pytest.mark.parametrize("op", comparison_operators)
|
| 322 |
+
@pytest.mark.parametrize("o_dtype", [np.str_, object, StringDType()])
|
| 323 |
+
def test_comparisons(string_list, dtype, op, o_dtype):
|
| 324 |
+
sarr = np.array(string_list, dtype=dtype)
|
| 325 |
+
oarr = np.array(string_list, dtype=o_dtype)
|
| 326 |
+
|
| 327 |
+
# test that comparison operators work
|
| 328 |
+
res = op(sarr, sarr)
|
| 329 |
+
ores = op(oarr, oarr)
|
| 330 |
+
# test that promotion works as well
|
| 331 |
+
orres = op(sarr, oarr)
|
| 332 |
+
olres = op(oarr, sarr)
|
| 333 |
+
|
| 334 |
+
assert_array_equal(res, ores)
|
| 335 |
+
assert_array_equal(res, orres)
|
| 336 |
+
assert_array_equal(res, olres)
|
| 337 |
+
|
| 338 |
+
# test we get the correct answer for unequal length strings
|
| 339 |
+
sarr2 = np.array([s + "2" for s in string_list], dtype=dtype)
|
| 340 |
+
oarr2 = np.array([s + "2" for s in string_list], dtype=o_dtype)
|
| 341 |
+
|
| 342 |
+
res = op(sarr, sarr2)
|
| 343 |
+
ores = op(oarr, oarr2)
|
| 344 |
+
olres = op(oarr, sarr2)
|
| 345 |
+
orres = op(sarr, oarr2)
|
| 346 |
+
|
| 347 |
+
assert_array_equal(res, ores)
|
| 348 |
+
assert_array_equal(res, olres)
|
| 349 |
+
assert_array_equal(res, orres)
|
| 350 |
+
|
| 351 |
+
res = op(sarr2, sarr)
|
| 352 |
+
ores = op(oarr2, oarr)
|
| 353 |
+
olres = op(oarr2, sarr)
|
| 354 |
+
orres = op(sarr2, oarr)
|
| 355 |
+
|
| 356 |
+
assert_array_equal(res, ores)
|
| 357 |
+
assert_array_equal(res, olres)
|
| 358 |
+
assert_array_equal(res, orres)
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def test_isnan(dtype, string_list):
|
| 362 |
+
if not hasattr(dtype, "na_object"):
|
| 363 |
+
pytest.skip("no na support")
|
| 364 |
+
sarr = np.array(string_list + [dtype.na_object], dtype=dtype)
|
| 365 |
+
is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object)
|
| 366 |
+
bool_errors = 0
|
| 367 |
+
try:
|
| 368 |
+
bool(dtype.na_object)
|
| 369 |
+
except TypeError:
|
| 370 |
+
bool_errors = 1
|
| 371 |
+
if is_nan or bool_errors:
|
| 372 |
+
# isnan is only true when na_object is a NaN
|
| 373 |
+
assert_array_equal(
|
| 374 |
+
np.isnan(sarr),
|
| 375 |
+
np.array([0] * len(string_list) + [1], dtype=np.bool),
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
assert not np.any(np.isnan(sarr))
|
| 379 |
+
|
| 380 |
+
|
| 381 |
+
def test_pickle(dtype, string_list):
|
| 382 |
+
arr = np.array(string_list, dtype=dtype)
|
| 383 |
+
|
| 384 |
+
with tempfile.NamedTemporaryFile("wb", delete=False) as f:
|
| 385 |
+
pickle.dump([arr, dtype], f)
|
| 386 |
+
|
| 387 |
+
with open(f.name, "rb") as f:
|
| 388 |
+
res = pickle.load(f)
|
| 389 |
+
|
| 390 |
+
assert_array_equal(res[0], arr)
|
| 391 |
+
assert res[1] == dtype
|
| 392 |
+
|
| 393 |
+
os.remove(f.name)
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
@pytest.mark.parametrize(
|
| 397 |
+
"strings",
|
| 398 |
+
[
|
| 399 |
+
["left", "right", "leftovers", "righty", "up", "down"],
|
| 400 |
+
[
|
| 401 |
+
"left" * 10,
|
| 402 |
+
"right" * 10,
|
| 403 |
+
"leftovers" * 10,
|
| 404 |
+
"righty" * 10,
|
| 405 |
+
"up" * 10,
|
| 406 |
+
],
|
| 407 |
+
["🤣🤣", "🤣", "📵", "😰"],
|
| 408 |
+
["🚜", "🙃", "😾"],
|
| 409 |
+
["😹", "🚠", "🚌"],
|
| 410 |
+
["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"],
|
| 411 |
+
],
|
| 412 |
+
)
|
| 413 |
+
def test_sort(dtype, strings):
|
| 414 |
+
"""Test that sorting matches python's internal sorting."""
|
| 415 |
+
|
| 416 |
+
def test_sort(strings, arr_sorted):
|
| 417 |
+
arr = np.array(strings, dtype=dtype)
|
| 418 |
+
na_object = getattr(arr.dtype, "na_object", "")
|
| 419 |
+
if na_object is None and None in strings:
|
| 420 |
+
with pytest.raises(
|
| 421 |
+
ValueError,
|
| 422 |
+
match="Cannot compare null that is not a nan-like value",
|
| 423 |
+
):
|
| 424 |
+
np.argsort(arr)
|
| 425 |
+
argsorted = None
|
| 426 |
+
elif na_object is pd_NA or na_object != '':
|
| 427 |
+
argsorted = None
|
| 428 |
+
else:
|
| 429 |
+
argsorted = np.argsort(arr)
|
| 430 |
+
np.random.default_rng().shuffle(arr)
|
| 431 |
+
if na_object is None and None in strings:
|
| 432 |
+
with pytest.raises(
|
| 433 |
+
ValueError,
|
| 434 |
+
match="Cannot compare null that is not a nan-like value",
|
| 435 |
+
):
|
| 436 |
+
arr.sort()
|
| 437 |
+
else:
|
| 438 |
+
arr.sort()
|
| 439 |
+
assert np.array_equal(arr, arr_sorted, equal_nan=True)
|
| 440 |
+
if argsorted is not None:
|
| 441 |
+
assert np.array_equal(argsorted, np.argsort(strings))
|
| 442 |
+
|
| 443 |
+
|
| 444 |
+
# make a copy so we don't mutate the lists in the fixture
|
| 445 |
+
strings = strings.copy()
|
| 446 |
+
arr_sorted = np.array(sorted(strings), dtype=dtype)
|
| 447 |
+
test_sort(strings, arr_sorted)
|
| 448 |
+
|
| 449 |
+
if not hasattr(dtype, "na_object"):
|
| 450 |
+
return
|
| 451 |
+
|
| 452 |
+
# make sure NAs get sorted to the end of the array and string NAs get
|
| 453 |
+
# sorted like normal strings
|
| 454 |
+
strings.insert(0, dtype.na_object)
|
| 455 |
+
strings.insert(2, dtype.na_object)
|
| 456 |
+
# can't use append because doing that with NA converts
|
| 457 |
+
# the result to object dtype
|
| 458 |
+
if not isinstance(dtype.na_object, str):
|
| 459 |
+
arr_sorted = np.array(
|
| 460 |
+
arr_sorted.tolist() + [dtype.na_object, dtype.na_object],
|
| 461 |
+
dtype=dtype,
|
| 462 |
+
)
|
| 463 |
+
else:
|
| 464 |
+
arr_sorted = np.array(sorted(strings), dtype=dtype)
|
| 465 |
+
|
| 466 |
+
test_sort(strings, arr_sorted)
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
@pytest.mark.parametrize(
|
| 470 |
+
"strings",
|
| 471 |
+
[
|
| 472 |
+
["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"],
|
| 473 |
+
["A¢☃€ 😊", "", " ", " "],
|
| 474 |
+
["", "a", "😸", "ááðfáíóåéë"],
|
| 475 |
+
],
|
| 476 |
+
)
|
| 477 |
+
def test_nonzero(strings, na_object):
|
| 478 |
+
dtype = get_dtype(na_object)
|
| 479 |
+
arr = np.array(strings, dtype=dtype)
|
| 480 |
+
is_nonzero = np.array(
|
| 481 |
+
[i for i, item in enumerate(strings) if len(item) != 0])
|
| 482 |
+
assert_array_equal(arr.nonzero()[0], is_nonzero)
|
| 483 |
+
|
| 484 |
+
if na_object is not pd_NA and na_object == 'unset':
|
| 485 |
+
return
|
| 486 |
+
|
| 487 |
+
strings_with_na = np.array(strings + [na_object], dtype=dtype)
|
| 488 |
+
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
|
| 489 |
+
|
| 490 |
+
if is_nan:
|
| 491 |
+
assert strings_with_na.nonzero()[0][-1] == 4
|
| 492 |
+
else:
|
| 493 |
+
assert strings_with_na.nonzero()[0][-1] == 3
|
| 494 |
+
|
| 495 |
+
# check that the casting to bool and nonzero give consistent results
|
| 496 |
+
assert_array_equal(strings_with_na[strings_with_na.nonzero()],
|
| 497 |
+
strings_with_na[strings_with_na.astype(bool)])
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
def test_where(string_list, na_object):
|
| 501 |
+
dtype = get_dtype(na_object)
|
| 502 |
+
a = np.array(string_list, dtype=dtype)
|
| 503 |
+
b = a[::-1]
|
| 504 |
+
res = np.where([True, False, True, False, True, False], a, b)
|
| 505 |
+
assert_array_equal(res, [a[0], b[1], a[2], b[3], a[4], b[5]])
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def test_fancy_indexing(string_list):
|
| 509 |
+
sarr = np.array(string_list, dtype="T")
|
| 510 |
+
assert_array_equal(sarr, sarr[np.arange(sarr.shape[0])])
|
| 511 |
+
|
| 512 |
+
inds = [
|
| 513 |
+
[True, True],
|
| 514 |
+
[0, 1],
|
| 515 |
+
...,
|
| 516 |
+
np.array([0, 1], dtype='uint8'),
|
| 517 |
+
]
|
| 518 |
+
|
| 519 |
+
lops = [
|
| 520 |
+
['a'*25, 'b'*25],
|
| 521 |
+
['', ''],
|
| 522 |
+
['hello', 'world'],
|
| 523 |
+
['hello', 'world'*25],
|
| 524 |
+
]
|
| 525 |
+
|
| 526 |
+
# see gh-27003 and gh-27053
|
| 527 |
+
for ind in inds:
|
| 528 |
+
for lop in lops:
|
| 529 |
+
a = np.array(lop, dtype="T")
|
| 530 |
+
assert_array_equal(a[ind], a)
|
| 531 |
+
rop = ['d'*25, 'e'*25]
|
| 532 |
+
for b in [rop, np.array(rop, dtype="T")]:
|
| 533 |
+
a[ind] = b
|
| 534 |
+
assert_array_equal(a, b)
|
| 535 |
+
assert a[0] == 'd'*25
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
def test_creation_functions():
|
| 539 |
+
assert_array_equal(np.zeros(3, dtype="T"), ["", "", ""])
|
| 540 |
+
assert_array_equal(np.empty(3, dtype="T"), ["", "", ""])
|
| 541 |
+
|
| 542 |
+
assert np.zeros(3, dtype="T")[0] == ""
|
| 543 |
+
assert np.empty(3, dtype="T")[0] == ""
|
| 544 |
+
|
| 545 |
+
|
| 546 |
+
def test_concatenate(string_list):
|
| 547 |
+
sarr = np.array(string_list, dtype="T")
|
| 548 |
+
sarr_cat = np.array(string_list + string_list, dtype="T")
|
| 549 |
+
|
| 550 |
+
assert_array_equal(np.concatenate([sarr], axis=0), sarr)
|
| 551 |
+
|
| 552 |
+
|
| 553 |
+
def test_resize_method(string_list):
|
| 554 |
+
sarr = np.array(string_list, dtype="T")
|
| 555 |
+
if IS_PYPY:
|
| 556 |
+
sarr.resize(len(string_list)+3, refcheck=False)
|
| 557 |
+
else:
|
| 558 |
+
sarr.resize(len(string_list)+3)
|
| 559 |
+
assert_array_equal(sarr, np.array(string_list + ['']*3, dtype="T"))
|
| 560 |
+
|
| 561 |
+
|
| 562 |
+
def test_create_with_copy_none(string_list):
|
| 563 |
+
arr = np.array(string_list, dtype=StringDType())
|
| 564 |
+
# create another stringdtype array with an arena that has a different
|
| 565 |
+
# in-memory layout than the first array
|
| 566 |
+
arr_rev = np.array(string_list[::-1], dtype=StringDType())
|
| 567 |
+
|
| 568 |
+
# this should create a copy and the resulting array
|
| 569 |
+
# shouldn't share an allocator or arena with arr_rev, despite
|
| 570 |
+
# explicitly passing arr_rev.dtype
|
| 571 |
+
arr_copy = np.array(arr, copy=None, dtype=arr_rev.dtype)
|
| 572 |
+
np.testing.assert_array_equal(arr, arr_copy)
|
| 573 |
+
assert arr_copy.base is None
|
| 574 |
+
|
| 575 |
+
with pytest.raises(ValueError, match="Unable to avoid copy"):
|
| 576 |
+
np.array(arr, copy=False, dtype=arr_rev.dtype)
|
| 577 |
+
|
| 578 |
+
# because we're using arr's dtype instance, the view is safe
|
| 579 |
+
arr_view = np.array(arr, copy=None, dtype=arr.dtype)
|
| 580 |
+
np.testing.assert_array_equal(arr, arr)
|
| 581 |
+
np.testing.assert_array_equal(arr_view[::-1], arr_rev)
|
| 582 |
+
assert arr_view is arr
|
| 583 |
+
|
| 584 |
+
|
| 585 |
+
def test_astype_copy_false():
|
| 586 |
+
orig_dt = StringDType()
|
| 587 |
+
arr = np.array(["hello", "world"], dtype=StringDType())
|
| 588 |
+
assert not arr.astype(StringDType(coerce=False), copy=False).dtype.coerce
|
| 589 |
+
|
| 590 |
+
assert arr.astype(orig_dt, copy=False).dtype is orig_dt
|
| 591 |
+
|
| 592 |
+
@pytest.mark.parametrize(
|
| 593 |
+
"strings",
|
| 594 |
+
[
|
| 595 |
+
["left", "right", "leftovers", "righty", "up", "down"],
|
| 596 |
+
["🤣🤣", "🤣", "📵", "😰"],
|
| 597 |
+
["🚜", "🙃", "😾"],
|
| 598 |
+
["😹", "🚠", "🚌"],
|
| 599 |
+
["A¢☃€ 😊", " A☃€¢😊", "☃€😊 A¢", "😊☃A¢ €"],
|
| 600 |
+
],
|
| 601 |
+
)
|
| 602 |
+
def test_argmax(strings):
|
| 603 |
+
"""Test that argmax/argmin matches what python calculates."""
|
| 604 |
+
arr = np.array(strings, dtype="T")
|
| 605 |
+
assert np.argmax(arr) == strings.index(max(strings))
|
| 606 |
+
assert np.argmin(arr) == strings.index(min(strings))
|
| 607 |
+
|
| 608 |
+
|
| 609 |
+
@pytest.mark.parametrize(
|
| 610 |
+
"arrfunc,expected",
|
| 611 |
+
[
|
| 612 |
+
[np.sort, None],
|
| 613 |
+
[np.nonzero, (np.array([], dtype=np.int_),)],
|
| 614 |
+
[np.argmax, 0],
|
| 615 |
+
[np.argmin, 0],
|
| 616 |
+
],
|
| 617 |
+
)
|
| 618 |
+
def test_arrfuncs_zeros(arrfunc, expected):
|
| 619 |
+
arr = np.zeros(10, dtype="T")
|
| 620 |
+
result = arrfunc(arr)
|
| 621 |
+
if expected is None:
|
| 622 |
+
expected = arr
|
| 623 |
+
assert_array_equal(result, expected, strict=True)
|
| 624 |
+
|
| 625 |
+
|
| 626 |
+
@pytest.mark.parametrize(
|
| 627 |
+
("strings", "cast_answer", "any_answer", "all_answer"),
|
| 628 |
+
[
|
| 629 |
+
[["hello", "world"], [True, True], True, True],
|
| 630 |
+
[["", ""], [False, False], False, False],
|
| 631 |
+
[["hello", ""], [True, False], True, False],
|
| 632 |
+
[["", "world"], [False, True], True, False],
|
| 633 |
+
],
|
| 634 |
+
)
|
| 635 |
+
def test_cast_to_bool(strings, cast_answer, any_answer, all_answer):
|
| 636 |
+
sarr = np.array(strings, dtype="T")
|
| 637 |
+
assert_array_equal(sarr.astype("bool"), cast_answer)
|
| 638 |
+
|
| 639 |
+
assert np.any(sarr) == any_answer
|
| 640 |
+
assert np.all(sarr) == all_answer
|
| 641 |
+
|
| 642 |
+
|
| 643 |
+
@pytest.mark.parametrize(
|
| 644 |
+
("strings", "cast_answer"),
|
| 645 |
+
[
|
| 646 |
+
[[True, True], ["True", "True"]],
|
| 647 |
+
[[False, False], ["False", "False"]],
|
| 648 |
+
[[True, False], ["True", "False"]],
|
| 649 |
+
[[False, True], ["False", "True"]],
|
| 650 |
+
],
|
| 651 |
+
)
|
| 652 |
+
def test_cast_from_bool(strings, cast_answer):
|
| 653 |
+
barr = np.array(strings, dtype=bool)
|
| 654 |
+
assert_array_equal(barr.astype("T"), np.array(cast_answer, dtype="T"))
|
| 655 |
+
|
| 656 |
+
|
| 657 |
+
@pytest.mark.parametrize("bitsize", [8, 16, 32, 64])
|
| 658 |
+
@pytest.mark.parametrize("signed", [True, False])
|
| 659 |
+
def test_sized_integer_casts(bitsize, signed):
|
| 660 |
+
idtype = f"int{bitsize}"
|
| 661 |
+
if signed:
|
| 662 |
+
inp = [-(2**p - 1) for p in reversed(range(bitsize - 1))]
|
| 663 |
+
inp += [2**p - 1 for p in range(1, bitsize - 1)]
|
| 664 |
+
else:
|
| 665 |
+
idtype = "u" + idtype
|
| 666 |
+
inp = [2**p - 1 for p in range(bitsize)]
|
| 667 |
+
ainp = np.array(inp, dtype=idtype)
|
| 668 |
+
assert_array_equal(ainp, ainp.astype("T").astype(idtype))
|
| 669 |
+
|
| 670 |
+
# safe casting works
|
| 671 |
+
ainp.astype("T", casting="safe")
|
| 672 |
+
|
| 673 |
+
with pytest.raises(TypeError):
|
| 674 |
+
ainp.astype("T").astype(idtype, casting="safe")
|
| 675 |
+
|
| 676 |
+
oob = [str(2**bitsize), str(-(2**bitsize))]
|
| 677 |
+
with pytest.raises(OverflowError):
|
| 678 |
+
np.array(oob, dtype="T").astype(idtype)
|
| 679 |
+
|
| 680 |
+
with pytest.raises(ValueError):
|
| 681 |
+
np.array(["1", np.nan, "3"],
|
| 682 |
+
dtype=StringDType(na_object=np.nan)).astype(idtype)
|
| 683 |
+
|
| 684 |
+
|
| 685 |
+
@pytest.mark.parametrize("typename", ["byte", "short", "int", "longlong"])
|
| 686 |
+
@pytest.mark.parametrize("signed", ["", "u"])
|
| 687 |
+
def test_unsized_integer_casts(typename, signed):
|
| 688 |
+
idtype = f"{signed}{typename}"
|
| 689 |
+
|
| 690 |
+
inp = [1, 2, 3, 4]
|
| 691 |
+
ainp = np.array(inp, dtype=idtype)
|
| 692 |
+
assert_array_equal(ainp, ainp.astype("T").astype(idtype))
|
| 693 |
+
|
| 694 |
+
|
| 695 |
+
@pytest.mark.parametrize(
|
| 696 |
+
"typename",
|
| 697 |
+
[
|
| 698 |
+
pytest.param(
|
| 699 |
+
"longdouble",
|
| 700 |
+
marks=pytest.mark.xfail(
|
| 701 |
+
np.dtypes.LongDoubleDType() != np.dtypes.Float64DType(),
|
| 702 |
+
reason="numpy lacks an ld2a implementation",
|
| 703 |
+
strict=True,
|
| 704 |
+
),
|
| 705 |
+
),
|
| 706 |
+
"float64",
|
| 707 |
+
"float32",
|
| 708 |
+
"float16",
|
| 709 |
+
],
|
| 710 |
+
)
|
| 711 |
+
def test_float_casts(typename):
|
| 712 |
+
inp = [1.1, 2.8, -3.2, 2.7e4]
|
| 713 |
+
ainp = np.array(inp, dtype=typename)
|
| 714 |
+
assert_array_equal(ainp, ainp.astype("T").astype(typename))
|
| 715 |
+
|
| 716 |
+
inp = [0.1]
|
| 717 |
+
sres = np.array(inp, dtype=typename).astype("T")
|
| 718 |
+
res = sres.astype(typename)
|
| 719 |
+
assert_array_equal(np.array(inp, dtype=typename), res)
|
| 720 |
+
assert sres[0] == "0.1"
|
| 721 |
+
|
| 722 |
+
if typename == "longdouble":
|
| 723 |
+
# let's not worry about platform-dependent rounding of longdouble
|
| 724 |
+
return
|
| 725 |
+
|
| 726 |
+
fi = np.finfo(typename)
|
| 727 |
+
|
| 728 |
+
inp = [1e-324, fi.smallest_subnormal, -1e-324, -fi.smallest_subnormal]
|
| 729 |
+
eres = [0, fi.smallest_subnormal, -0, -fi.smallest_subnormal]
|
| 730 |
+
res = np.array(inp, dtype=typename).astype("T").astype(typename)
|
| 731 |
+
assert_array_equal(eres, res)
|
| 732 |
+
|
| 733 |
+
inp = [2e308, fi.max, -2e308, fi.min]
|
| 734 |
+
eres = [np.inf, fi.max, -np.inf, fi.min]
|
| 735 |
+
res = np.array(inp, dtype=typename).astype("T").astype(typename)
|
| 736 |
+
assert_array_equal(eres, res)
|
| 737 |
+
|
| 738 |
+
|
| 739 |
+
@pytest.mark.parametrize(
|
| 740 |
+
"typename",
|
| 741 |
+
[
|
| 742 |
+
"csingle",
|
| 743 |
+
"cdouble",
|
| 744 |
+
pytest.param(
|
| 745 |
+
"clongdouble",
|
| 746 |
+
marks=pytest.mark.xfail(
|
| 747 |
+
np.dtypes.CLongDoubleDType() != np.dtypes.Complex128DType(),
|
| 748 |
+
reason="numpy lacks an ld2a implementation",
|
| 749 |
+
strict=True,
|
| 750 |
+
),
|
| 751 |
+
),
|
| 752 |
+
],
|
| 753 |
+
)
|
| 754 |
+
def test_cfloat_casts(typename):
|
| 755 |
+
inp = [1.1 + 1.1j, 2.8 + 2.8j, -3.2 - 3.2j, 2.7e4 + 2.7e4j]
|
| 756 |
+
ainp = np.array(inp, dtype=typename)
|
| 757 |
+
assert_array_equal(ainp, ainp.astype("T").astype(typename))
|
| 758 |
+
|
| 759 |
+
inp = [0.1 + 0.1j]
|
| 760 |
+
sres = np.array(inp, dtype=typename).astype("T")
|
| 761 |
+
res = sres.astype(typename)
|
| 762 |
+
assert_array_equal(np.array(inp, dtype=typename), res)
|
| 763 |
+
assert sres[0] == "(0.1+0.1j)"
|
| 764 |
+
|
| 765 |
+
|
| 766 |
+
def test_take(string_list):
|
| 767 |
+
sarr = np.array(string_list, dtype="T")
|
| 768 |
+
res = sarr.take(np.arange(len(string_list)))
|
| 769 |
+
assert_array_equal(sarr, res)
|
| 770 |
+
|
| 771 |
+
# make sure it also works for out
|
| 772 |
+
out = np.empty(len(string_list), dtype="T")
|
| 773 |
+
out[0] = "hello"
|
| 774 |
+
res = sarr.take(np.arange(len(string_list)), out=out)
|
| 775 |
+
assert res is out
|
| 776 |
+
assert_array_equal(sarr, res)
|
| 777 |
+
|
| 778 |
+
|
| 779 |
+
@pytest.mark.parametrize("use_out", [True, False])
|
| 780 |
+
@pytest.mark.parametrize(
|
| 781 |
+
"ufunc_name,func",
|
| 782 |
+
[
|
| 783 |
+
("min", min),
|
| 784 |
+
("max", max),
|
| 785 |
+
],
|
| 786 |
+
)
|
| 787 |
+
def test_ufuncs_minmax(string_list, ufunc_name, func, use_out):
|
| 788 |
+
"""Test that the min/max ufuncs match Python builtin min/max behavior."""
|
| 789 |
+
arr = np.array(string_list, dtype="T")
|
| 790 |
+
uarr = np.array(string_list, dtype=str)
|
| 791 |
+
res = np.array(func(string_list), dtype="T")
|
| 792 |
+
assert_array_equal(getattr(arr, ufunc_name)(), res)
|
| 793 |
+
|
| 794 |
+
ufunc = getattr(np, ufunc_name + "imum")
|
| 795 |
+
|
| 796 |
+
if use_out:
|
| 797 |
+
res = ufunc(arr, arr, out=arr)
|
| 798 |
+
else:
|
| 799 |
+
res = ufunc(arr, arr)
|
| 800 |
+
|
| 801 |
+
assert_array_equal(uarr, res)
|
| 802 |
+
assert_array_equal(getattr(arr, ufunc_name)(), func(string_list))
|
| 803 |
+
|
| 804 |
+
|
| 805 |
+
def test_max_regression():
|
| 806 |
+
arr = np.array(['y', 'y', 'z'], dtype="T")
|
| 807 |
+
assert arr.max() == 'z'
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
@pytest.mark.parametrize("use_out", [True, False])
|
| 811 |
+
@pytest.mark.parametrize(
|
| 812 |
+
"other_strings",
|
| 813 |
+
[
|
| 814 |
+
["abc", "def" * 500, "ghi" * 16, "🤣" * 100, "📵", "😰"],
|
| 815 |
+
["🚜", "🙃", "😾", "😹", "🚠", "🚌"],
|
| 816 |
+
["🥦", "¨", "⨯", "∰ ", "⨌ ", "⎶ "],
|
| 817 |
+
],
|
| 818 |
+
)
|
| 819 |
+
def test_ufunc_add(dtype, string_list, other_strings, use_out):
|
| 820 |
+
arr1 = np.array(string_list, dtype=dtype)
|
| 821 |
+
arr2 = np.array(other_strings, dtype=dtype)
|
| 822 |
+
result = np.array([a + b for a, b in zip(arr1, arr2)], dtype=dtype)
|
| 823 |
+
|
| 824 |
+
if use_out:
|
| 825 |
+
res = np.add(arr1, arr2, out=arr1)
|
| 826 |
+
else:
|
| 827 |
+
res = np.add(arr1, arr2)
|
| 828 |
+
|
| 829 |
+
assert_array_equal(res, result)
|
| 830 |
+
|
| 831 |
+
if not hasattr(dtype, "na_object"):
|
| 832 |
+
return
|
| 833 |
+
|
| 834 |
+
is_nan = isinstance(dtype.na_object, float) and np.isnan(dtype.na_object)
|
| 835 |
+
is_str = isinstance(dtype.na_object, str)
|
| 836 |
+
bool_errors = 0
|
| 837 |
+
try:
|
| 838 |
+
bool(dtype.na_object)
|
| 839 |
+
except TypeError:
|
| 840 |
+
bool_errors = 1
|
| 841 |
+
|
| 842 |
+
arr1 = np.array([dtype.na_object] + string_list, dtype=dtype)
|
| 843 |
+
arr2 = np.array(other_strings + [dtype.na_object], dtype=dtype)
|
| 844 |
+
|
| 845 |
+
if is_nan or bool_errors or is_str:
|
| 846 |
+
res = np.add(arr1, arr2)
|
| 847 |
+
assert_array_equal(res[1:-1], arr1[1:-1] + arr2[1:-1])
|
| 848 |
+
if not is_str:
|
| 849 |
+
assert res[0] is dtype.na_object and res[-1] is dtype.na_object
|
| 850 |
+
else:
|
| 851 |
+
assert res[0] == dtype.na_object + arr2[0]
|
| 852 |
+
assert res[-1] == arr1[-1] + dtype.na_object
|
| 853 |
+
else:
|
| 854 |
+
with pytest.raises(ValueError):
|
| 855 |
+
np.add(arr1, arr2)
|
| 856 |
+
|
| 857 |
+
|
| 858 |
+
def test_ufunc_add_reduce(dtype):
|
| 859 |
+
values = ["a", "this is a long string", "c"]
|
| 860 |
+
arr = np.array(values, dtype=dtype)
|
| 861 |
+
out = np.empty((), dtype=dtype)
|
| 862 |
+
|
| 863 |
+
expected = np.array("".join(values), dtype=dtype)
|
| 864 |
+
assert_array_equal(np.add.reduce(arr), expected)
|
| 865 |
+
|
| 866 |
+
np.add.reduce(arr, out=out)
|
| 867 |
+
assert_array_equal(out, expected)
|
| 868 |
+
|
| 869 |
+
|
| 870 |
+
def test_add_promoter(string_list):
|
| 871 |
+
arr = np.array(string_list, dtype=StringDType())
|
| 872 |
+
lresult = np.array(["hello" + s for s in string_list], dtype=StringDType())
|
| 873 |
+
rresult = np.array([s + "hello" for s in string_list], dtype=StringDType())
|
| 874 |
+
|
| 875 |
+
for op in ["hello", np.str_("hello"), np.array(["hello"])]:
|
| 876 |
+
assert_array_equal(op + arr, lresult)
|
| 877 |
+
assert_array_equal(arr + op, rresult)
|
| 878 |
+
|
| 879 |
+
# The promoter should be able to handle things if users pass `dtype=`
|
| 880 |
+
res = np.add("hello", string_list, dtype=StringDType)
|
| 881 |
+
assert res.dtype == StringDType()
|
| 882 |
+
|
| 883 |
+
# The promoter should not kick in if users override the input,
|
| 884 |
+
# which means arr is cast, this fails because of the unknown length.
|
| 885 |
+
with pytest.raises(TypeError, match="cannot cast dtype"):
|
| 886 |
+
np.add(arr, "add", signature=("U", "U", None), casting="unsafe")
|
| 887 |
+
|
| 888 |
+
# But it must simply reject the following:
|
| 889 |
+
with pytest.raises(TypeError, match=".*did not contain a loop"):
|
| 890 |
+
np.add(arr, "add", signature=(None, "U", None))
|
| 891 |
+
|
| 892 |
+
with pytest.raises(TypeError, match=".*did not contain a loop"):
|
| 893 |
+
np.add("a", "b", signature=("U", "U", StringDType))
|
| 894 |
+
|
| 895 |
+
|
| 896 |
+
def test_add_no_legacy_promote_with_signature():
|
| 897 |
+
# Possibly misplaced, but useful to test with string DType. We check that
|
| 898 |
+
# if there is clearly no loop found, a stray `dtype=` doesn't break things
|
| 899 |
+
# Regression test for the bad error in gh-26735
|
| 900 |
+
# (If legacy promotion is gone, this can be deleted...)
|
| 901 |
+
with pytest.raises(TypeError, match=".*did not contain a loop"):
|
| 902 |
+
np.add("3", 6, dtype=StringDType)
|
| 903 |
+
|
| 904 |
+
|
| 905 |
+
def test_add_promoter_reduce():
|
| 906 |
+
# Exact TypeError could change, but ensure StringDtype doesn't match
|
| 907 |
+
with pytest.raises(TypeError, match="the resolved dtypes are not"):
|
| 908 |
+
np.add.reduce(np.array(["a", "b"], dtype="U"))
|
| 909 |
+
|
| 910 |
+
# On the other hand, using `dtype=T` in the *ufunc* should work.
|
| 911 |
+
np.add.reduce(np.array(["a", "b"], dtype="U"), dtype=np.dtypes.StringDType)
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def test_multiply_reduce():
|
| 915 |
+
# At the time of writing (NumPy 2.0) this is very limited (and rather
|
| 916 |
+
# ridiculous anyway). But it works and actually makes some sense...
|
| 917 |
+
# (NumPy does not allow non-scalar initial values)
|
| 918 |
+
repeats = np.array([2, 3, 4])
|
| 919 |
+
val = "school-🚌"
|
| 920 |
+
res = np.multiply.reduce(repeats, initial=val, dtype=np.dtypes.StringDType)
|
| 921 |
+
assert res == val * np.prod(repeats)
|
| 922 |
+
|
| 923 |
+
|
| 924 |
+
def test_multiply_two_string_raises():
|
| 925 |
+
arr = np.array(["hello", "world"], dtype="T")
|
| 926 |
+
with pytest.raises(np._core._exceptions._UFuncNoLoopError):
|
| 927 |
+
np.multiply(arr, arr)
|
| 928 |
+
|
| 929 |
+
|
| 930 |
+
@pytest.mark.parametrize("use_out", [True, False])
|
| 931 |
+
@pytest.mark.parametrize("other", [2, [2, 1, 3, 4, 1, 3]])
|
| 932 |
+
@pytest.mark.parametrize(
|
| 933 |
+
"other_dtype",
|
| 934 |
+
[
|
| 935 |
+
None,
|
| 936 |
+
"int8",
|
| 937 |
+
"int16",
|
| 938 |
+
"int32",
|
| 939 |
+
"int64",
|
| 940 |
+
"uint8",
|
| 941 |
+
"uint16",
|
| 942 |
+
"uint32",
|
| 943 |
+
"uint64",
|
| 944 |
+
"short",
|
| 945 |
+
"int",
|
| 946 |
+
"intp",
|
| 947 |
+
"long",
|
| 948 |
+
"longlong",
|
| 949 |
+
"ushort",
|
| 950 |
+
"uint",
|
| 951 |
+
"uintp",
|
| 952 |
+
"ulong",
|
| 953 |
+
"ulonglong",
|
| 954 |
+
],
|
| 955 |
+
)
|
| 956 |
+
def test_ufunc_multiply(dtype, string_list, other, other_dtype, use_out):
|
| 957 |
+
"""Test the two-argument ufuncs match python builtin behavior."""
|
| 958 |
+
arr = np.array(string_list, dtype=dtype)
|
| 959 |
+
if other_dtype is not None:
|
| 960 |
+
other_dtype = np.dtype(other_dtype)
|
| 961 |
+
try:
|
| 962 |
+
len(other)
|
| 963 |
+
result = [s * o for s, o in zip(string_list, other)]
|
| 964 |
+
other = np.array(other)
|
| 965 |
+
if other_dtype is not None:
|
| 966 |
+
other = other.astype(other_dtype)
|
| 967 |
+
except TypeError:
|
| 968 |
+
if other_dtype is not None:
|
| 969 |
+
other = other_dtype.type(other)
|
| 970 |
+
result = [s * other for s in string_list]
|
| 971 |
+
|
| 972 |
+
if use_out:
|
| 973 |
+
arr_cache = arr.copy()
|
| 974 |
+
lres = np.multiply(arr, other, out=arr)
|
| 975 |
+
assert_array_equal(lres, result)
|
| 976 |
+
arr[:] = arr_cache
|
| 977 |
+
assert lres is arr
|
| 978 |
+
arr *= other
|
| 979 |
+
assert_array_equal(arr, result)
|
| 980 |
+
arr[:] = arr_cache
|
| 981 |
+
rres = np.multiply(other, arr, out=arr)
|
| 982 |
+
assert rres is arr
|
| 983 |
+
assert_array_equal(rres, result)
|
| 984 |
+
else:
|
| 985 |
+
lres = arr * other
|
| 986 |
+
assert_array_equal(lres, result)
|
| 987 |
+
rres = other * arr
|
| 988 |
+
assert_array_equal(rres, result)
|
| 989 |
+
|
| 990 |
+
if not hasattr(dtype, "na_object"):
|
| 991 |
+
return
|
| 992 |
+
|
| 993 |
+
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
|
| 994 |
+
is_str = isinstance(dtype.na_object, str)
|
| 995 |
+
bool_errors = 0
|
| 996 |
+
try:
|
| 997 |
+
bool(dtype.na_object)
|
| 998 |
+
except TypeError:
|
| 999 |
+
bool_errors = 1
|
| 1000 |
+
|
| 1001 |
+
arr = np.array(string_list + [dtype.na_object], dtype=dtype)
|
| 1002 |
+
|
| 1003 |
+
try:
|
| 1004 |
+
len(other)
|
| 1005 |
+
other = np.append(other, 3)
|
| 1006 |
+
if other_dtype is not None:
|
| 1007 |
+
other = other.astype(other_dtype)
|
| 1008 |
+
except TypeError:
|
| 1009 |
+
pass
|
| 1010 |
+
|
| 1011 |
+
if is_nan or bool_errors or is_str:
|
| 1012 |
+
for res in [arr * other, other * arr]:
|
| 1013 |
+
assert_array_equal(res[:-1], result)
|
| 1014 |
+
if not is_str:
|
| 1015 |
+
assert res[-1] is dtype.na_object
|
| 1016 |
+
else:
|
| 1017 |
+
try:
|
| 1018 |
+
assert res[-1] == dtype.na_object * other[-1]
|
| 1019 |
+
except (IndexError, TypeError):
|
| 1020 |
+
assert res[-1] == dtype.na_object * other
|
| 1021 |
+
else:
|
| 1022 |
+
with pytest.raises(TypeError):
|
| 1023 |
+
arr * other
|
| 1024 |
+
with pytest.raises(TypeError):
|
| 1025 |
+
other * arr
|
| 1026 |
+
|
| 1027 |
+
|
| 1028 |
+
def test_findlike_promoters():
|
| 1029 |
+
r = "Wally"
|
| 1030 |
+
l = "Where's Wally?"
|
| 1031 |
+
s = np.int32(3)
|
| 1032 |
+
e = np.int8(13)
|
| 1033 |
+
for dtypes in [("T", "U"), ("U", "T")]:
|
| 1034 |
+
for function, answer in [
|
| 1035 |
+
(np.strings.index, 8),
|
| 1036 |
+
(np.strings.endswith, True),
|
| 1037 |
+
]:
|
| 1038 |
+
assert answer == function(
|
| 1039 |
+
np.array(l, dtype=dtypes[0]), np.array(r, dtype=dtypes[1]), s, e
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
|
| 1043 |
+
def test_strip_promoter():
|
| 1044 |
+
arg = ["Hello!!!!", "Hello??!!"]
|
| 1045 |
+
strip_char = "!"
|
| 1046 |
+
answer = ["Hello", "Hello??"]
|
| 1047 |
+
for dtypes in [("T", "U"), ("U", "T")]:
|
| 1048 |
+
result = np.strings.strip(
|
| 1049 |
+
np.array(arg, dtype=dtypes[0]),
|
| 1050 |
+
np.array(strip_char, dtype=dtypes[1])
|
| 1051 |
+
)
|
| 1052 |
+
assert_array_equal(result, answer)
|
| 1053 |
+
assert result.dtype.char == "T"
|
| 1054 |
+
|
| 1055 |
+
|
| 1056 |
+
def test_replace_promoter():
|
| 1057 |
+
arg = ["Hello, planet!", "planet, Hello!"]
|
| 1058 |
+
old = "planet"
|
| 1059 |
+
new = "world"
|
| 1060 |
+
answer = ["Hello, world!", "world, Hello!"]
|
| 1061 |
+
for dtypes in itertools.product("TU", repeat=3):
|
| 1062 |
+
if dtypes == ("U", "U", "U"):
|
| 1063 |
+
continue
|
| 1064 |
+
answer_arr = np.strings.replace(
|
| 1065 |
+
np.array(arg, dtype=dtypes[0]),
|
| 1066 |
+
np.array(old, dtype=dtypes[1]),
|
| 1067 |
+
np.array(new, dtype=dtypes[2]),
|
| 1068 |
+
)
|
| 1069 |
+
assert_array_equal(answer_arr, answer)
|
| 1070 |
+
assert answer_arr.dtype.char == "T"
|
| 1071 |
+
|
| 1072 |
+
|
| 1073 |
+
def test_center_promoter():
|
| 1074 |
+
arg = ["Hello", "planet!"]
|
| 1075 |
+
fillchar = "/"
|
| 1076 |
+
for dtypes in [("T", "U"), ("U", "T")]:
|
| 1077 |
+
answer = np.strings.center(
|
| 1078 |
+
np.array(arg, dtype=dtypes[0]), 9, np.array(fillchar, dtype=dtypes[1])
|
| 1079 |
+
)
|
| 1080 |
+
assert_array_equal(answer, ["//Hello//", "/planet!/"])
|
| 1081 |
+
assert answer.dtype.char == "T"
|
| 1082 |
+
|
| 1083 |
+
|
| 1084 |
+
DATETIME_INPUT = [
|
| 1085 |
+
np.datetime64("1923-04-14T12:43:12"),
|
| 1086 |
+
np.datetime64("1994-06-21T14:43:15"),
|
| 1087 |
+
np.datetime64("2001-10-15T04:10:32"),
|
| 1088 |
+
np.datetime64("NaT"),
|
| 1089 |
+
np.datetime64("1995-11-25T16:02:16"),
|
| 1090 |
+
np.datetime64("2005-01-04T03:14:12"),
|
| 1091 |
+
np.datetime64("2041-12-03T14:05:03"),
|
| 1092 |
+
]
|
| 1093 |
+
|
| 1094 |
+
|
| 1095 |
+
TIMEDELTA_INPUT = [
|
| 1096 |
+
np.timedelta64(12358, "s"),
|
| 1097 |
+
np.timedelta64(23, "s"),
|
| 1098 |
+
np.timedelta64(74, "s"),
|
| 1099 |
+
np.timedelta64("NaT"),
|
| 1100 |
+
np.timedelta64(23, "s"),
|
| 1101 |
+
np.timedelta64(73, "s"),
|
| 1102 |
+
np.timedelta64(7, "s"),
|
| 1103 |
+
]
|
| 1104 |
+
|
| 1105 |
+
|
| 1106 |
+
@pytest.mark.parametrize(
|
| 1107 |
+
"input_data, input_dtype",
|
| 1108 |
+
[
|
| 1109 |
+
(DATETIME_INPUT, "M8[s]"),
|
| 1110 |
+
(TIMEDELTA_INPUT, "m8[s]")
|
| 1111 |
+
]
|
| 1112 |
+
)
|
| 1113 |
+
def test_datetime_timedelta_cast(dtype, input_data, input_dtype):
|
| 1114 |
+
|
| 1115 |
+
a = np.array(input_data, dtype=input_dtype)
|
| 1116 |
+
|
| 1117 |
+
has_na = hasattr(dtype, "na_object")
|
| 1118 |
+
is_str = isinstance(getattr(dtype, "na_object", None), str)
|
| 1119 |
+
|
| 1120 |
+
if not has_na or is_str:
|
| 1121 |
+
a = np.delete(a, 3)
|
| 1122 |
+
|
| 1123 |
+
sa = a.astype(dtype)
|
| 1124 |
+
ra = sa.astype(a.dtype)
|
| 1125 |
+
|
| 1126 |
+
if has_na and not is_str:
|
| 1127 |
+
assert sa[3] is dtype.na_object
|
| 1128 |
+
assert np.isnat(ra[3])
|
| 1129 |
+
|
| 1130 |
+
assert_array_equal(a, ra)
|
| 1131 |
+
|
| 1132 |
+
if has_na and not is_str:
|
| 1133 |
+
# don't worry about comparing how NaT is converted
|
| 1134 |
+
sa = np.delete(sa, 3)
|
| 1135 |
+
a = np.delete(a, 3)
|
| 1136 |
+
|
| 1137 |
+
if input_dtype.startswith("M"):
|
| 1138 |
+
assert_array_equal(sa, a.astype("U"))
|
| 1139 |
+
else:
|
| 1140 |
+
# The timedelta to unicode cast produces strings
|
| 1141 |
+
# that aren't round-trippable and we don't want to
|
| 1142 |
+
# reproduce that behavior in stringdtype
|
| 1143 |
+
assert_array_equal(sa, a.astype("int64").astype("U"))
|
| 1144 |
+
|
| 1145 |
+
|
| 1146 |
+
def test_nat_casts():
|
| 1147 |
+
s = 'nat'
|
| 1148 |
+
all_nats = itertools.product(*zip(s.upper(), s.lower()))
|
| 1149 |
+
all_nats = list(map(''.join, all_nats))
|
| 1150 |
+
NaT_dt = np.datetime64('NaT')
|
| 1151 |
+
NaT_td = np.timedelta64('NaT')
|
| 1152 |
+
for na_object in [np._NoValue, None, np.nan, 'nat', '']:
|
| 1153 |
+
# numpy treats empty string and all case combinations of 'nat' as NaT
|
| 1154 |
+
dtype = StringDType(na_object=na_object)
|
| 1155 |
+
arr = np.array([''] + all_nats, dtype=dtype)
|
| 1156 |
+
dt_array = arr.astype('M8[s]')
|
| 1157 |
+
td_array = arr.astype('m8[s]')
|
| 1158 |
+
assert_array_equal(dt_array, NaT_dt)
|
| 1159 |
+
assert_array_equal(td_array, NaT_td)
|
| 1160 |
+
|
| 1161 |
+
if na_object is np._NoValue:
|
| 1162 |
+
output_object = 'NaT'
|
| 1163 |
+
else:
|
| 1164 |
+
output_object = na_object
|
| 1165 |
+
|
| 1166 |
+
for arr in [dt_array, td_array]:
|
| 1167 |
+
assert_array_equal(
|
| 1168 |
+
arr.astype(dtype),
|
| 1169 |
+
np.array([output_object]*arr.size, dtype=dtype))
|
| 1170 |
+
|
| 1171 |
+
|
| 1172 |
+
def test_nat_conversion():
|
| 1173 |
+
for nat in [np.datetime64("NaT", "s"), np.timedelta64("NaT", "s")]:
|
| 1174 |
+
with pytest.raises(ValueError, match="string coercion is disabled"):
|
| 1175 |
+
np.array(["a", nat], dtype=StringDType(coerce=False))
|
| 1176 |
+
|
| 1177 |
+
|
| 1178 |
+
def test_growing_strings(dtype):
|
| 1179 |
+
# growing a string leads to a heap allocation, this tests to make sure
|
| 1180 |
+
# we do that bookkeeping correctly for all possible starting cases
|
| 1181 |
+
data = [
|
| 1182 |
+
"hello", # a short string
|
| 1183 |
+
"abcdefghijklmnopqestuvwxyz", # a medium heap-allocated string
|
| 1184 |
+
"hello" * 200, # a long heap-allocated string
|
| 1185 |
+
]
|
| 1186 |
+
|
| 1187 |
+
arr = np.array(data, dtype=dtype)
|
| 1188 |
+
uarr = np.array(data, dtype=str)
|
| 1189 |
+
|
| 1190 |
+
for _ in range(5):
|
| 1191 |
+
arr = arr + arr
|
| 1192 |
+
uarr = uarr + uarr
|
| 1193 |
+
|
| 1194 |
+
assert_array_equal(arr, uarr)
|
| 1195 |
+
|
| 1196 |
+
|
| 1197 |
+
@pytest.mark.skipif(IS_WASM, reason="no threading support in wasm")
|
| 1198 |
+
def test_threaded_access_and_mutation(dtype, random_string_list):
|
| 1199 |
+
# this test uses an RNG and may crash or cause deadlocks if there is a
|
| 1200 |
+
# threading bug
|
| 1201 |
+
rng = np.random.default_rng(0x4D3D3D3)
|
| 1202 |
+
|
| 1203 |
+
def func(arr):
|
| 1204 |
+
rnd = rng.random()
|
| 1205 |
+
# either write to random locations in the array, compute a ufunc, or
|
| 1206 |
+
# re-initialize the array
|
| 1207 |
+
if rnd < 0.25:
|
| 1208 |
+
num = np.random.randint(0, arr.size)
|
| 1209 |
+
arr[num] = arr[num] + "hello"
|
| 1210 |
+
elif rnd < 0.5:
|
| 1211 |
+
if rnd < 0.375:
|
| 1212 |
+
np.add(arr, arr)
|
| 1213 |
+
else:
|
| 1214 |
+
np.add(arr, arr, out=arr)
|
| 1215 |
+
elif rnd < 0.75:
|
| 1216 |
+
if rnd < 0.875:
|
| 1217 |
+
np.multiply(arr, np.int64(2))
|
| 1218 |
+
else:
|
| 1219 |
+
np.multiply(arr, np.int64(2), out=arr)
|
| 1220 |
+
else:
|
| 1221 |
+
arr[:] = random_string_list
|
| 1222 |
+
|
| 1223 |
+
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as tpe:
|
| 1224 |
+
arr = np.array(random_string_list, dtype=dtype)
|
| 1225 |
+
futures = [tpe.submit(func, arr) for _ in range(500)]
|
| 1226 |
+
|
| 1227 |
+
for f in futures:
|
| 1228 |
+
f.result()
|
| 1229 |
+
|
| 1230 |
+
|
| 1231 |
+
UFUNC_TEST_DATA = [
|
| 1232 |
+
"hello" * 10,
|
| 1233 |
+
"Ae¢☃€ 😊" * 20,
|
| 1234 |
+
"entry\nwith\nnewlines",
|
| 1235 |
+
"entry\twith\ttabs",
|
| 1236 |
+
]
|
| 1237 |
+
|
| 1238 |
+
|
| 1239 |
+
@pytest.fixture
|
| 1240 |
+
def string_array(dtype):
|
| 1241 |
+
return np.array(UFUNC_TEST_DATA, dtype=dtype)
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
@pytest.fixture
|
| 1245 |
+
def unicode_array():
|
| 1246 |
+
return np.array(UFUNC_TEST_DATA, dtype=np.str_)
|
| 1247 |
+
|
| 1248 |
+
|
| 1249 |
+
NAN_PRESERVING_FUNCTIONS = [
|
| 1250 |
+
"capitalize",
|
| 1251 |
+
"expandtabs",
|
| 1252 |
+
"lower",
|
| 1253 |
+
"lstrip",
|
| 1254 |
+
"rstrip",
|
| 1255 |
+
"splitlines",
|
| 1256 |
+
"strip",
|
| 1257 |
+
"swapcase",
|
| 1258 |
+
"title",
|
| 1259 |
+
"upper",
|
| 1260 |
+
]
|
| 1261 |
+
|
| 1262 |
+
BOOL_OUTPUT_FUNCTIONS = [
|
| 1263 |
+
"isalnum",
|
| 1264 |
+
"isalpha",
|
| 1265 |
+
"isdigit",
|
| 1266 |
+
"islower",
|
| 1267 |
+
"isspace",
|
| 1268 |
+
"istitle",
|
| 1269 |
+
"isupper",
|
| 1270 |
+
"isnumeric",
|
| 1271 |
+
"isdecimal",
|
| 1272 |
+
]
|
| 1273 |
+
|
| 1274 |
+
UNARY_FUNCTIONS = [
|
| 1275 |
+
"str_len",
|
| 1276 |
+
"capitalize",
|
| 1277 |
+
"expandtabs",
|
| 1278 |
+
"isalnum",
|
| 1279 |
+
"isalpha",
|
| 1280 |
+
"isdigit",
|
| 1281 |
+
"islower",
|
| 1282 |
+
"isspace",
|
| 1283 |
+
"istitle",
|
| 1284 |
+
"isupper",
|
| 1285 |
+
"lower",
|
| 1286 |
+
"lstrip",
|
| 1287 |
+
"rstrip",
|
| 1288 |
+
"splitlines",
|
| 1289 |
+
"strip",
|
| 1290 |
+
"swapcase",
|
| 1291 |
+
"title",
|
| 1292 |
+
"upper",
|
| 1293 |
+
"isnumeric",
|
| 1294 |
+
"isdecimal",
|
| 1295 |
+
"isalnum",
|
| 1296 |
+
"islower",
|
| 1297 |
+
"istitle",
|
| 1298 |
+
"isupper",
|
| 1299 |
+
]
|
| 1300 |
+
|
| 1301 |
+
UNIMPLEMENTED_VEC_STRING_FUNCTIONS = [
|
| 1302 |
+
"capitalize",
|
| 1303 |
+
"expandtabs",
|
| 1304 |
+
"lower",
|
| 1305 |
+
"splitlines",
|
| 1306 |
+
"swapcase",
|
| 1307 |
+
"title",
|
| 1308 |
+
"upper",
|
| 1309 |
+
]
|
| 1310 |
+
|
| 1311 |
+
ONLY_IN_NP_CHAR = [
|
| 1312 |
+
"join",
|
| 1313 |
+
"split",
|
| 1314 |
+
"rsplit",
|
| 1315 |
+
"splitlines"
|
| 1316 |
+
]
|
| 1317 |
+
|
| 1318 |
+
|
| 1319 |
+
@pytest.mark.parametrize("function_name", UNARY_FUNCTIONS)
|
| 1320 |
+
def test_unary(string_array, unicode_array, function_name):
|
| 1321 |
+
if function_name in ONLY_IN_NP_CHAR:
|
| 1322 |
+
func = getattr(np.char, function_name)
|
| 1323 |
+
else:
|
| 1324 |
+
func = getattr(np.strings, function_name)
|
| 1325 |
+
dtype = string_array.dtype
|
| 1326 |
+
sres = func(string_array)
|
| 1327 |
+
ures = func(unicode_array)
|
| 1328 |
+
if sres.dtype == StringDType():
|
| 1329 |
+
ures = ures.astype(StringDType())
|
| 1330 |
+
assert_array_equal(sres, ures)
|
| 1331 |
+
|
| 1332 |
+
if not hasattr(dtype, "na_object"):
|
| 1333 |
+
return
|
| 1334 |
+
|
| 1335 |
+
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
|
| 1336 |
+
is_str = isinstance(dtype.na_object, str)
|
| 1337 |
+
na_arr = np.insert(string_array, 0, dtype.na_object)
|
| 1338 |
+
|
| 1339 |
+
if function_name in UNIMPLEMENTED_VEC_STRING_FUNCTIONS:
|
| 1340 |
+
if not is_str:
|
| 1341 |
+
# to avoid these errors we'd need to add NA support to _vec_string
|
| 1342 |
+
with pytest.raises((ValueError, TypeError)):
|
| 1343 |
+
func(na_arr)
|
| 1344 |
+
else:
|
| 1345 |
+
if function_name == "splitlines":
|
| 1346 |
+
assert func(na_arr)[0] == func(dtype.na_object)[()]
|
| 1347 |
+
else:
|
| 1348 |
+
assert func(na_arr)[0] == func(dtype.na_object)
|
| 1349 |
+
return
|
| 1350 |
+
if function_name == "str_len" and not is_str:
|
| 1351 |
+
# str_len always errors for any non-string null, even NA ones because
|
| 1352 |
+
# it has an integer result
|
| 1353 |
+
with pytest.raises(ValueError):
|
| 1354 |
+
func(na_arr)
|
| 1355 |
+
return
|
| 1356 |
+
if function_name in BOOL_OUTPUT_FUNCTIONS:
|
| 1357 |
+
if is_nan:
|
| 1358 |
+
assert func(na_arr)[0] is np.False_
|
| 1359 |
+
elif is_str:
|
| 1360 |
+
assert func(na_arr)[0] == func(dtype.na_object)
|
| 1361 |
+
else:
|
| 1362 |
+
with pytest.raises(ValueError):
|
| 1363 |
+
func(na_arr)
|
| 1364 |
+
return
|
| 1365 |
+
if not (is_nan or is_str):
|
| 1366 |
+
with pytest.raises(ValueError):
|
| 1367 |
+
func(na_arr)
|
| 1368 |
+
return
|
| 1369 |
+
res = func(na_arr)
|
| 1370 |
+
if is_nan and function_name in NAN_PRESERVING_FUNCTIONS:
|
| 1371 |
+
assert res[0] is dtype.na_object
|
| 1372 |
+
elif is_str:
|
| 1373 |
+
assert res[0] == func(dtype.na_object)
|
| 1374 |
+
|
| 1375 |
+
|
| 1376 |
+
unicode_bug_fail = pytest.mark.xfail(
|
| 1377 |
+
reason="unicode output width is buggy", strict=True
|
| 1378 |
+
)
|
| 1379 |
+
|
| 1380 |
+
# None means that the argument is a string array
|
| 1381 |
+
BINARY_FUNCTIONS = [
|
| 1382 |
+
("add", (None, None)),
|
| 1383 |
+
("multiply", (None, 2)),
|
| 1384 |
+
("mod", ("format: %s", None)),
|
| 1385 |
+
("center", (None, 25)),
|
| 1386 |
+
("count", (None, "A")),
|
| 1387 |
+
("encode", (None, "UTF-8")),
|
| 1388 |
+
("endswith", (None, "lo")),
|
| 1389 |
+
("find", (None, "A")),
|
| 1390 |
+
("index", (None, "e")),
|
| 1391 |
+
("join", ("-", None)),
|
| 1392 |
+
("ljust", (None, 12)),
|
| 1393 |
+
("lstrip", (None, "A")),
|
| 1394 |
+
("partition", (None, "A")),
|
| 1395 |
+
("replace", (None, "A", "B")),
|
| 1396 |
+
("rfind", (None, "A")),
|
| 1397 |
+
("rindex", (None, "e")),
|
| 1398 |
+
("rjust", (None, 12)),
|
| 1399 |
+
("rsplit", (None, "A")),
|
| 1400 |
+
("rstrip", (None, "A")),
|
| 1401 |
+
("rpartition", (None, "A")),
|
| 1402 |
+
("split", (None, "A")),
|
| 1403 |
+
("strip", (None, "A")),
|
| 1404 |
+
("startswith", (None, "A")),
|
| 1405 |
+
("zfill", (None, 12)),
|
| 1406 |
+
]
|
| 1407 |
+
|
| 1408 |
+
PASSES_THROUGH_NAN_NULLS = [
|
| 1409 |
+
"add",
|
| 1410 |
+
"center",
|
| 1411 |
+
"ljust",
|
| 1412 |
+
"multiply",
|
| 1413 |
+
"replace",
|
| 1414 |
+
"rjust",
|
| 1415 |
+
"strip",
|
| 1416 |
+
"lstrip",
|
| 1417 |
+
"rstrip",
|
| 1418 |
+
"replace"
|
| 1419 |
+
"zfill",
|
| 1420 |
+
]
|
| 1421 |
+
|
| 1422 |
+
NULLS_ARE_FALSEY = [
|
| 1423 |
+
"startswith",
|
| 1424 |
+
"endswith",
|
| 1425 |
+
]
|
| 1426 |
+
|
| 1427 |
+
NULLS_ALWAYS_ERROR = [
|
| 1428 |
+
"count",
|
| 1429 |
+
"find",
|
| 1430 |
+
"rfind",
|
| 1431 |
+
]
|
| 1432 |
+
|
| 1433 |
+
SUPPORTS_NULLS = (
|
| 1434 |
+
PASSES_THROUGH_NAN_NULLS +
|
| 1435 |
+
NULLS_ARE_FALSEY +
|
| 1436 |
+
NULLS_ALWAYS_ERROR
|
| 1437 |
+
)
|
| 1438 |
+
|
| 1439 |
+
|
| 1440 |
+
def call_func(func, args, array, sanitize=True):
|
| 1441 |
+
if args == (None, None):
|
| 1442 |
+
return func(array, array)
|
| 1443 |
+
if args[0] is None:
|
| 1444 |
+
if sanitize:
|
| 1445 |
+
san_args = tuple(
|
| 1446 |
+
np.array(arg, dtype=array.dtype) if isinstance(arg, str) else
|
| 1447 |
+
arg for arg in args[1:]
|
| 1448 |
+
)
|
| 1449 |
+
else:
|
| 1450 |
+
san_args = args[1:]
|
| 1451 |
+
return func(array, *san_args)
|
| 1452 |
+
if args[1] is None:
|
| 1453 |
+
return func(args[0], array)
|
| 1454 |
+
# shouldn't ever happen
|
| 1455 |
+
assert 0
|
| 1456 |
+
|
| 1457 |
+
|
| 1458 |
+
@pytest.mark.parametrize("function_name, args", BINARY_FUNCTIONS)
|
| 1459 |
+
def test_binary(string_array, unicode_array, function_name, args):
|
| 1460 |
+
if function_name in ONLY_IN_NP_CHAR:
|
| 1461 |
+
func = getattr(np.char, function_name)
|
| 1462 |
+
else:
|
| 1463 |
+
func = getattr(np.strings, function_name)
|
| 1464 |
+
sres = call_func(func, args, string_array)
|
| 1465 |
+
ures = call_func(func, args, unicode_array, sanitize=False)
|
| 1466 |
+
if not isinstance(sres, tuple) and sres.dtype == StringDType():
|
| 1467 |
+
ures = ures.astype(StringDType())
|
| 1468 |
+
assert_array_equal(sres, ures)
|
| 1469 |
+
|
| 1470 |
+
dtype = string_array.dtype
|
| 1471 |
+
if function_name not in SUPPORTS_NULLS or not hasattr(dtype, "na_object"):
|
| 1472 |
+
return
|
| 1473 |
+
|
| 1474 |
+
na_arr = np.insert(string_array, 0, dtype.na_object)
|
| 1475 |
+
is_nan = np.isnan(np.array([dtype.na_object], dtype=dtype))[0]
|
| 1476 |
+
is_str = isinstance(dtype.na_object, str)
|
| 1477 |
+
should_error = not (is_nan or is_str)
|
| 1478 |
+
|
| 1479 |
+
if (
|
| 1480 |
+
(function_name in NULLS_ALWAYS_ERROR and not is_str)
|
| 1481 |
+
or (function_name in PASSES_THROUGH_NAN_NULLS and should_error)
|
| 1482 |
+
or (function_name in NULLS_ARE_FALSEY and should_error)
|
| 1483 |
+
):
|
| 1484 |
+
with pytest.raises((ValueError, TypeError)):
|
| 1485 |
+
call_func(func, args, na_arr)
|
| 1486 |
+
return
|
| 1487 |
+
|
| 1488 |
+
res = call_func(func, args, na_arr)
|
| 1489 |
+
|
| 1490 |
+
if is_str:
|
| 1491 |
+
assert res[0] == call_func(func, args, na_arr[:1])
|
| 1492 |
+
elif function_name in NULLS_ARE_FALSEY:
|
| 1493 |
+
assert res[0] is np.False_
|
| 1494 |
+
elif function_name in PASSES_THROUGH_NAN_NULLS:
|
| 1495 |
+
assert res[0] is dtype.na_object
|
| 1496 |
+
else:
|
| 1497 |
+
# shouldn't ever get here
|
| 1498 |
+
assert 0
|
| 1499 |
+
|
| 1500 |
+
|
| 1501 |
+
@pytest.mark.parametrize("function, expected", [
|
| 1502 |
+
(np.strings.find, [[2, -1], [1, -1]]),
|
| 1503 |
+
(np.strings.startswith, [[False, False], [True, False]])])
|
| 1504 |
+
@pytest.mark.parametrize("start, stop", [
|
| 1505 |
+
(1, 4),
|
| 1506 |
+
(np.int8(1), np.int8(4)),
|
| 1507 |
+
(np.array([1, 1], dtype='u2'), np.array([4, 4], dtype='u2'))])
|
| 1508 |
+
def test_non_default_start_stop(function, start, stop, expected):
|
| 1509 |
+
a = np.array([["--🐍--", "--🦜--"],
|
| 1510 |
+
["-🐍---", "-🦜---"]], "T")
|
| 1511 |
+
indx = function(a, "🐍", start, stop)
|
| 1512 |
+
assert_array_equal(indx, expected)
|
| 1513 |
+
|
| 1514 |
+
|
| 1515 |
+
@pytest.mark.parametrize("count", [2, np.int8(2), np.array([2, 2], 'u2')])
|
| 1516 |
+
def test_replace_non_default_repeat(count):
|
| 1517 |
+
a = np.array(["🐍--", "🦜-🦜-"], "T")
|
| 1518 |
+
result = np.strings.replace(a, "🦜-", "🦜†", count)
|
| 1519 |
+
assert_array_equal(result, np.array(["🐍--", "🦜†🦜†"], "T"))
|
| 1520 |
+
|
| 1521 |
+
|
| 1522 |
+
def test_strip_ljust_rjust_consistency(string_array, unicode_array):
|
| 1523 |
+
rjs = np.char.rjust(string_array, 1000)
|
| 1524 |
+
rju = np.char.rjust(unicode_array, 1000)
|
| 1525 |
+
|
| 1526 |
+
ljs = np.char.ljust(string_array, 1000)
|
| 1527 |
+
lju = np.char.ljust(unicode_array, 1000)
|
| 1528 |
+
|
| 1529 |
+
assert_array_equal(
|
| 1530 |
+
np.char.lstrip(rjs),
|
| 1531 |
+
np.char.lstrip(rju).astype(StringDType()),
|
| 1532 |
+
)
|
| 1533 |
+
|
| 1534 |
+
assert_array_equal(
|
| 1535 |
+
np.char.rstrip(ljs),
|
| 1536 |
+
np.char.rstrip(lju).astype(StringDType()),
|
| 1537 |
+
)
|
| 1538 |
+
|
| 1539 |
+
assert_array_equal(
|
| 1540 |
+
np.char.strip(ljs),
|
| 1541 |
+
np.char.strip(lju).astype(StringDType()),
|
| 1542 |
+
)
|
| 1543 |
+
|
| 1544 |
+
assert_array_equal(
|
| 1545 |
+
np.char.strip(rjs),
|
| 1546 |
+
np.char.strip(rju).astype(StringDType()),
|
| 1547 |
+
)
|
| 1548 |
+
|
| 1549 |
+
|
| 1550 |
+
def test_unset_na_coercion():
|
| 1551 |
+
# a dtype instance with an unset na object is compatible
|
| 1552 |
+
# with a dtype that has one set
|
| 1553 |
+
|
| 1554 |
+
# this test uses the "add" and "equal" ufunc but all ufuncs that
|
| 1555 |
+
# accept more than one string argument and produce a string should
|
| 1556 |
+
# behave this way
|
| 1557 |
+
# TODO: generalize to more ufuncs
|
| 1558 |
+
inp = ["hello", "world"]
|
| 1559 |
+
arr = np.array(inp, dtype=StringDType(na_object=None))
|
| 1560 |
+
for op_dtype in [None, StringDType(), StringDType(coerce=False),
|
| 1561 |
+
StringDType(na_object=None)]:
|
| 1562 |
+
if op_dtype is None:
|
| 1563 |
+
op = "2"
|
| 1564 |
+
else:
|
| 1565 |
+
op = np.array("2", dtype=op_dtype)
|
| 1566 |
+
res = arr + op
|
| 1567 |
+
assert_array_equal(res, ["hello2", "world2"])
|
| 1568 |
+
|
| 1569 |
+
# dtype instances with distinct explicitly set NA objects are incompatible
|
| 1570 |
+
for op_dtype in [StringDType(na_object=pd_NA), StringDType(na_object="")]:
|
| 1571 |
+
op = np.array("2", dtype=op_dtype)
|
| 1572 |
+
with pytest.raises(TypeError):
|
| 1573 |
+
arr + op
|
| 1574 |
+
|
| 1575 |
+
# comparisons only consider the na_object
|
| 1576 |
+
for op_dtype in [None, StringDType(), StringDType(coerce=True),
|
| 1577 |
+
StringDType(na_object=None)]:
|
| 1578 |
+
if op_dtype is None:
|
| 1579 |
+
op = inp
|
| 1580 |
+
else:
|
| 1581 |
+
op = np.array(inp, dtype=op_dtype)
|
| 1582 |
+
assert_array_equal(arr, op)
|
| 1583 |
+
|
| 1584 |
+
for op_dtype in [StringDType(na_object=pd_NA),
|
| 1585 |
+
StringDType(na_object=np.nan)]:
|
| 1586 |
+
op = np.array(inp, dtype=op_dtype)
|
| 1587 |
+
with pytest.raises(TypeError):
|
| 1588 |
+
arr == op
|
| 1589 |
+
|
| 1590 |
+
|
| 1591 |
+
def test_repeat(string_array):
|
| 1592 |
+
res = string_array.repeat(1000)
|
| 1593 |
+
# Create an empty array with expanded dimension, and fill it. Then,
|
| 1594 |
+
# reshape it to the expected result.
|
| 1595 |
+
expected = np.empty_like(string_array, shape=string_array.shape + (1000,))
|
| 1596 |
+
expected[...] = string_array[:, np.newaxis]
|
| 1597 |
+
expected = expected.reshape(-1)
|
| 1598 |
+
|
| 1599 |
+
assert_array_equal(res, expected, strict=True)
|
| 1600 |
+
|
| 1601 |
+
|
| 1602 |
+
@pytest.mark.parametrize("tile", [1, 6, (2, 5)])
|
| 1603 |
+
def test_accumulation(string_array, tile):
|
| 1604 |
+
"""Accumulation is odd for StringDType but tests dtypes with references.
|
| 1605 |
+
"""
|
| 1606 |
+
# Fill with mostly empty strings to not create absurdly big strings
|
| 1607 |
+
arr = np.zeros_like(string_array, shape=(100,))
|
| 1608 |
+
arr[:len(string_array)] = string_array
|
| 1609 |
+
arr[-len(string_array):] = string_array
|
| 1610 |
+
|
| 1611 |
+
# Bloat size a bit (get above thresholds and test >1 ndim).
|
| 1612 |
+
arr = np.tile(string_array, tile)
|
| 1613 |
+
|
| 1614 |
+
res = np.add.accumulate(arr, axis=0)
|
| 1615 |
+
res_obj = np.add.accumulate(arr.astype(object), axis=0)
|
| 1616 |
+
assert_array_equal(res, res_obj.astype(arr.dtype), strict=True)
|
| 1617 |
+
|
| 1618 |
+
if arr.ndim > 1:
|
| 1619 |
+
res = np.add.accumulate(arr, axis=-1)
|
| 1620 |
+
res_obj = np.add.accumulate(arr.astype(object), axis=-1)
|
| 1621 |
+
|
| 1622 |
+
assert_array_equal(res, res_obj.astype(arr.dtype), strict=True)
|
| 1623 |
+
|
| 1624 |
+
|
| 1625 |
+
class TestImplementation:
|
| 1626 |
+
"""Check that strings are stored in the arena when possible.
|
| 1627 |
+
|
| 1628 |
+
This tests implementation details, so should be adjusted if
|
| 1629 |
+
the implementation changes.
|
| 1630 |
+
"""
|
| 1631 |
+
|
| 1632 |
+
@classmethod
|
| 1633 |
+
def setup_class(self):
|
| 1634 |
+
self.MISSING = 0x80
|
| 1635 |
+
self.INITIALIZED = 0x40
|
| 1636 |
+
self.OUTSIDE_ARENA = 0x20
|
| 1637 |
+
self.LONG = 0x10
|
| 1638 |
+
self.dtype = StringDType(na_object=np.nan)
|
| 1639 |
+
self.sizeofstr = self.dtype.itemsize
|
| 1640 |
+
sp = self.dtype.itemsize // 2 # pointer size = sizeof(size_t)
|
| 1641 |
+
# Below, size is not strictly correct, since it really uses
|
| 1642 |
+
# 7 (or 3) bytes, but good enough for the tests here.
|
| 1643 |
+
self.view_dtype = np.dtype([
|
| 1644 |
+
('offset', f'u{sp}'),
|
| 1645 |
+
('size', f'u{sp // 2}'),
|
| 1646 |
+
('xsiz', f'V{sp // 2 - 1}'),
|
| 1647 |
+
('size_and_flags', 'u1'),
|
| 1648 |
+
] if sys.byteorder == 'little' else [
|
| 1649 |
+
('size_and_flags', 'u1'),
|
| 1650 |
+
('xsiz', f'V{sp // 2 - 1}'),
|
| 1651 |
+
('size', f'u{sp // 2}'),
|
| 1652 |
+
('offset', f'u{sp}'),
|
| 1653 |
+
])
|
| 1654 |
+
self.s_empty = ""
|
| 1655 |
+
self.s_short = "01234"
|
| 1656 |
+
self.s_medium = "abcdefghijklmnopqrstuvwxyz"
|
| 1657 |
+
self.s_long = "-=+" * 100
|
| 1658 |
+
self.a = np.array(
|
| 1659 |
+
[self.s_empty, self.s_short, self.s_medium, self.s_long],
|
| 1660 |
+
self.dtype)
|
| 1661 |
+
|
| 1662 |
+
def get_view(self, a):
|
| 1663 |
+
# Cannot view a StringDType as anything else directly, since
|
| 1664 |
+
# it has references. So, we use a stride trick hack.
|
| 1665 |
+
from numpy.lib._stride_tricks_impl import DummyArray
|
| 1666 |
+
interface = dict(a.__array_interface__)
|
| 1667 |
+
interface['descr'] = self.view_dtype.descr
|
| 1668 |
+
interface['typestr'] = self.view_dtype.str
|
| 1669 |
+
return np.asarray(DummyArray(interface, base=a))
|
| 1670 |
+
|
| 1671 |
+
def get_flags(self, a):
|
| 1672 |
+
return self.get_view(a)['size_and_flags'] & 0xf0
|
| 1673 |
+
|
| 1674 |
+
def is_short(self, a):
|
| 1675 |
+
return self.get_flags(a) == self.INITIALIZED | self.OUTSIDE_ARENA
|
| 1676 |
+
|
| 1677 |
+
def is_on_heap(self, a):
|
| 1678 |
+
return self.get_flags(a) == (self.INITIALIZED
|
| 1679 |
+
| self.OUTSIDE_ARENA
|
| 1680 |
+
| self.LONG)
|
| 1681 |
+
|
| 1682 |
+
def is_missing(self, a):
|
| 1683 |
+
return self.get_flags(a) & self.MISSING == self.MISSING
|
| 1684 |
+
|
| 1685 |
+
def in_arena(self, a):
|
| 1686 |
+
return (self.get_flags(a) & (self.INITIALIZED | self.OUTSIDE_ARENA)
|
| 1687 |
+
== self.INITIALIZED)
|
| 1688 |
+
|
| 1689 |
+
def test_setup(self):
|
| 1690 |
+
is_short = self.is_short(self.a)
|
| 1691 |
+
length = np.strings.str_len(self.a)
|
| 1692 |
+
assert_array_equal(is_short, (length > 0) & (length <= 15))
|
| 1693 |
+
assert_array_equal(self.in_arena(self.a), [False, False, True, True])
|
| 1694 |
+
assert_array_equal(self.is_on_heap(self.a), False)
|
| 1695 |
+
assert_array_equal(self.is_missing(self.a), False)
|
| 1696 |
+
view = self.get_view(self.a)
|
| 1697 |
+
sizes = np.where(is_short, view['size_and_flags'] & 0xf,
|
| 1698 |
+
view['size'])
|
| 1699 |
+
assert_array_equal(sizes, np.strings.str_len(self.a))
|
| 1700 |
+
assert_array_equal(view['xsiz'][2:],
|
| 1701 |
+
np.void(b'\x00' * (self.sizeofstr // 4 - 1)))
|
| 1702 |
+
# Check that the medium string uses only 1 byte for its length
|
| 1703 |
+
# in the arena, while the long string takes 8 (or 4).
|
| 1704 |
+
offsets = view['offset']
|
| 1705 |
+
assert offsets[2] == 1
|
| 1706 |
+
assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2
|
| 1707 |
+
|
| 1708 |
+
def test_empty(self):
|
| 1709 |
+
e = np.empty((3,), self.dtype)
|
| 1710 |
+
assert_array_equal(self.get_flags(e), 0)
|
| 1711 |
+
assert_array_equal(e, "")
|
| 1712 |
+
|
| 1713 |
+
def test_zeros(self):
|
| 1714 |
+
z = np.zeros((2,), self.dtype)
|
| 1715 |
+
assert_array_equal(self.get_flags(z), 0)
|
| 1716 |
+
assert_array_equal(z, "")
|
| 1717 |
+
|
| 1718 |
+
def test_copy(self):
|
| 1719 |
+
c = self.a.copy()
|
| 1720 |
+
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
|
| 1721 |
+
assert_array_equal(c, self.a)
|
| 1722 |
+
offsets = self.get_view(c)['offset']
|
| 1723 |
+
assert offsets[2] == 1
|
| 1724 |
+
assert offsets[3] == 1 + len(self.s_medium) + self.sizeofstr // 2
|
| 1725 |
+
|
| 1726 |
+
def test_arena_use_with_setting(self):
|
| 1727 |
+
c = np.zeros_like(self.a)
|
| 1728 |
+
assert_array_equal(self.get_flags(c), 0)
|
| 1729 |
+
c[:] = self.a
|
| 1730 |
+
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
|
| 1731 |
+
assert_array_equal(c, self.a)
|
| 1732 |
+
|
| 1733 |
+
def test_arena_reuse_with_setting(self):
|
| 1734 |
+
c = self.a.copy()
|
| 1735 |
+
c[:] = self.a
|
| 1736 |
+
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
|
| 1737 |
+
assert_array_equal(c, self.a)
|
| 1738 |
+
|
| 1739 |
+
def test_arena_reuse_after_missing(self):
|
| 1740 |
+
c = self.a.copy()
|
| 1741 |
+
c[:] = np.nan
|
| 1742 |
+
assert np.all(self.is_missing(c))
|
| 1743 |
+
# Replacing with the original strings, the arena should be reused.
|
| 1744 |
+
c[:] = self.a
|
| 1745 |
+
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
|
| 1746 |
+
assert_array_equal(c, self.a)
|
| 1747 |
+
|
| 1748 |
+
def test_arena_reuse_after_empty(self):
|
| 1749 |
+
c = self.a.copy()
|
| 1750 |
+
c[:] = ""
|
| 1751 |
+
assert_array_equal(c, "")
|
| 1752 |
+
# Replacing with the original strings, the arena should be reused.
|
| 1753 |
+
c[:] = self.a
|
| 1754 |
+
assert_array_equal(self.get_flags(c), self.get_flags(self.a))
|
| 1755 |
+
assert_array_equal(c, self.a)
|
| 1756 |
+
|
| 1757 |
+
def test_arena_reuse_for_shorter(self):
|
| 1758 |
+
c = self.a.copy()
|
| 1759 |
+
# A string slightly shorter than the shortest in the arena
|
| 1760 |
+
# should be used for all strings in the arena.
|
| 1761 |
+
c[:] = self.s_medium[:-1]
|
| 1762 |
+
assert_array_equal(c, self.s_medium[:-1])
|
| 1763 |
+
# first empty string in original was never initialized, so
|
| 1764 |
+
# filling it in now leaves it initialized inside the arena.
|
| 1765 |
+
# second string started as a short string so it can never live
|
| 1766 |
+
# in the arena.
|
| 1767 |
+
in_arena = np.array([True, False, True, True])
|
| 1768 |
+
assert_array_equal(self.in_arena(c), in_arena)
|
| 1769 |
+
# But when a short string is replaced, it will go on the heap.
|
| 1770 |
+
assert_array_equal(self.is_short(c), False)
|
| 1771 |
+
assert_array_equal(self.is_on_heap(c), ~in_arena)
|
| 1772 |
+
# We can put the originals back, and they'll still fit,
|
| 1773 |
+
# and short strings are back as short strings
|
| 1774 |
+
c[:] = self.a
|
| 1775 |
+
assert_array_equal(c, self.a)
|
| 1776 |
+
assert_array_equal(self.in_arena(c), in_arena)
|
| 1777 |
+
assert_array_equal(self.is_short(c), self.is_short(self.a))
|
| 1778 |
+
assert_array_equal(self.is_on_heap(c), False)
|
| 1779 |
+
|
| 1780 |
+
def test_arena_reuse_if_possible(self):
|
| 1781 |
+
c = self.a.copy()
|
| 1782 |
+
# A slightly longer string will not fit in the arena for
|
| 1783 |
+
# the medium string, but will fit for the longer one.
|
| 1784 |
+
c[:] = self.s_medium + "±"
|
| 1785 |
+
assert_array_equal(c, self.s_medium + "±")
|
| 1786 |
+
in_arena_exp = np.strings.str_len(self.a) >= len(self.s_medium) + 1
|
| 1787 |
+
# first entry started uninitialized and empty, so filling it leaves
|
| 1788 |
+
# it in the arena
|
| 1789 |
+
in_arena_exp[0] = True
|
| 1790 |
+
assert not np.all(in_arena_exp == self.in_arena(self.a))
|
| 1791 |
+
assert_array_equal(self.in_arena(c), in_arena_exp)
|
| 1792 |
+
assert_array_equal(self.is_short(c), False)
|
| 1793 |
+
assert_array_equal(self.is_on_heap(c), ~in_arena_exp)
|
| 1794 |
+
# And once outside arena, it stays outside, since offset is lost.
|
| 1795 |
+
# But short strings are used again.
|
| 1796 |
+
c[:] = self.a
|
| 1797 |
+
is_short_exp = self.is_short(self.a)
|
| 1798 |
+
assert_array_equal(c, self.a)
|
| 1799 |
+
assert_array_equal(self.in_arena(c), in_arena_exp)
|
| 1800 |
+
assert_array_equal(self.is_short(c), is_short_exp)
|
| 1801 |
+
assert_array_equal(self.is_on_heap(c), ~in_arena_exp & ~is_short_exp)
|
| 1802 |
+
|
| 1803 |
+
def test_arena_no_reuse_after_short(self):
|
| 1804 |
+
c = self.a.copy()
|
| 1805 |
+
# If we replace a string with a short string, it cannot
|
| 1806 |
+
# go into the arena after because the offset is lost.
|
| 1807 |
+
c[:] = self.s_short
|
| 1808 |
+
assert_array_equal(c, self.s_short)
|
| 1809 |
+
assert_array_equal(self.in_arena(c), False)
|
| 1810 |
+
c[:] = self.a
|
| 1811 |
+
assert_array_equal(c, self.a)
|
| 1812 |
+
assert_array_equal(self.in_arena(c), False)
|
| 1813 |
+
assert_array_equal(self.is_on_heap(c), self.in_arena(self.a))
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_strings.py
ADDED
|
@@ -0,0 +1,1287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import operator
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
from numpy.testing import assert_array_equal, assert_raises, IS_PYPY
|
| 8 |
+
from numpy.testing._private.utils import requires_memory
|
| 9 |
+
|
| 10 |
+
COMPARISONS = [
|
| 11 |
+
(operator.eq, np.equal, "=="),
|
| 12 |
+
(operator.ne, np.not_equal, "!="),
|
| 13 |
+
(operator.lt, np.less, "<"),
|
| 14 |
+
(operator.le, np.less_equal, "<="),
|
| 15 |
+
(operator.gt, np.greater, ">"),
|
| 16 |
+
(operator.ge, np.greater_equal, ">="),
|
| 17 |
+
]
|
| 18 |
+
|
| 19 |
+
MAX = np.iinfo(np.int64).max
|
| 20 |
+
|
| 21 |
+
IS_PYPY_LT_7_3_16 = IS_PYPY and sys.implementation.version < (7, 3, 16)
|
| 22 |
+
|
| 23 |
+
@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
|
| 24 |
+
def test_mixed_string_comparison_ufuncs_fail(op, ufunc, sym):
|
| 25 |
+
arr_string = np.array(["a", "b"], dtype="S")
|
| 26 |
+
arr_unicode = np.array(["a", "c"], dtype="U")
|
| 27 |
+
|
| 28 |
+
with pytest.raises(TypeError, match="did not contain a loop"):
|
| 29 |
+
ufunc(arr_string, arr_unicode)
|
| 30 |
+
|
| 31 |
+
with pytest.raises(TypeError, match="did not contain a loop"):
|
| 32 |
+
ufunc(arr_unicode, arr_string)
|
| 33 |
+
|
| 34 |
+
@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
|
| 35 |
+
def test_mixed_string_comparisons_ufuncs_with_cast(op, ufunc, sym):
|
| 36 |
+
arr_string = np.array(["a", "b"], dtype="S")
|
| 37 |
+
arr_unicode = np.array(["a", "c"], dtype="U")
|
| 38 |
+
|
| 39 |
+
# While there is no loop, manual casting is acceptable:
|
| 40 |
+
res1 = ufunc(arr_string, arr_unicode, signature="UU->?", casting="unsafe")
|
| 41 |
+
res2 = ufunc(arr_string, arr_unicode, signature="SS->?", casting="unsafe")
|
| 42 |
+
|
| 43 |
+
expected = op(arr_string.astype("U"), arr_unicode)
|
| 44 |
+
assert_array_equal(res1, expected)
|
| 45 |
+
assert_array_equal(res2, expected)
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
|
| 49 |
+
@pytest.mark.parametrize("dtypes", [
|
| 50 |
+
("S2", "S2"), ("S2", "S10"),
|
| 51 |
+
("<U1", "<U1"), ("<U1", ">U1"), (">U1", ">U1"),
|
| 52 |
+
("<U1", "<U10"), ("<U1", ">U10")])
|
| 53 |
+
@pytest.mark.parametrize("aligned", [True, False])
|
| 54 |
+
def test_string_comparisons(op, ufunc, sym, dtypes, aligned):
|
| 55 |
+
# ensure native byte-order for the first view to stay within unicode range
|
| 56 |
+
native_dt = np.dtype(dtypes[0]).newbyteorder("=")
|
| 57 |
+
arr = np.arange(2**15).view(native_dt).astype(dtypes[0])
|
| 58 |
+
if not aligned:
|
| 59 |
+
# Make `arr` unaligned:
|
| 60 |
+
new = np.zeros(arr.nbytes + 1, dtype=np.uint8)[1:].view(dtypes[0])
|
| 61 |
+
new[...] = arr
|
| 62 |
+
arr = new
|
| 63 |
+
|
| 64 |
+
arr2 = arr.astype(dtypes[1], copy=True)
|
| 65 |
+
np.random.shuffle(arr2)
|
| 66 |
+
arr[0] = arr2[0] # make sure one matches
|
| 67 |
+
|
| 68 |
+
expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
|
| 69 |
+
assert_array_equal(op(arr, arr2), expected)
|
| 70 |
+
assert_array_equal(ufunc(arr, arr2), expected)
|
| 71 |
+
assert_array_equal(
|
| 72 |
+
np.char.compare_chararrays(arr, arr2, sym, False), expected
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
expected = [op(d2, d1) for d1, d2 in zip(arr.tolist(), arr2.tolist())]
|
| 76 |
+
assert_array_equal(op(arr2, arr), expected)
|
| 77 |
+
assert_array_equal(ufunc(arr2, arr), expected)
|
| 78 |
+
assert_array_equal(
|
| 79 |
+
np.char.compare_chararrays(arr2, arr, sym, False), expected
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
@pytest.mark.parametrize(["op", "ufunc", "sym"], COMPARISONS)
|
| 84 |
+
@pytest.mark.parametrize("dtypes", [
|
| 85 |
+
("S2", "S2"), ("S2", "S10"), ("<U1", "<U1"), ("<U1", ">U10")])
|
| 86 |
+
def test_string_comparisons_empty(op, ufunc, sym, dtypes):
|
| 87 |
+
arr = np.empty((1, 0, 1, 5), dtype=dtypes[0])
|
| 88 |
+
arr2 = np.empty((100, 1, 0, 1), dtype=dtypes[1])
|
| 89 |
+
|
| 90 |
+
expected = np.empty(np.broadcast_shapes(arr.shape, arr2.shape), dtype=bool)
|
| 91 |
+
assert_array_equal(op(arr, arr2), expected)
|
| 92 |
+
assert_array_equal(ufunc(arr, arr2), expected)
|
| 93 |
+
assert_array_equal(
|
| 94 |
+
np.char.compare_chararrays(arr, arr2, sym, False), expected
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
@pytest.mark.parametrize("str_dt", ["S", "U"])
|
| 99 |
+
@pytest.mark.parametrize("float_dt", np.typecodes["AllFloat"])
|
| 100 |
+
def test_float_to_string_cast(str_dt, float_dt):
|
| 101 |
+
float_dt = np.dtype(float_dt)
|
| 102 |
+
fi = np.finfo(float_dt)
|
| 103 |
+
arr = np.array([np.nan, np.inf, -np.inf, fi.max, fi.min], dtype=float_dt)
|
| 104 |
+
expected = ["nan", "inf", "-inf", str(fi.max), str(fi.min)]
|
| 105 |
+
if float_dt.kind == "c":
|
| 106 |
+
expected = [f"({r}+0j)" for r in expected]
|
| 107 |
+
|
| 108 |
+
res = arr.astype(str_dt)
|
| 109 |
+
assert_array_equal(res, np.array(expected, dtype=str_dt))
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
@pytest.mark.parametrize("str_dt", "US")
|
| 113 |
+
@pytest.mark.parametrize("size", [-1, np.iinfo(np.intc).max])
|
| 114 |
+
def test_string_size_dtype_errors(str_dt, size):
|
| 115 |
+
if size > 0:
|
| 116 |
+
size = size // np.dtype(f"{str_dt}1").itemsize + 1
|
| 117 |
+
|
| 118 |
+
with pytest.raises(ValueError):
|
| 119 |
+
np.dtype((str_dt, size))
|
| 120 |
+
with pytest.raises(TypeError):
|
| 121 |
+
np.dtype(f"{str_dt}{size}")
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
@pytest.mark.parametrize("str_dt", "US")
|
| 125 |
+
def test_string_size_dtype_large_repr(str_dt):
|
| 126 |
+
size = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize
|
| 127 |
+
size_str = str(size)
|
| 128 |
+
|
| 129 |
+
dtype = np.dtype((str_dt, size))
|
| 130 |
+
assert size_str in dtype.str
|
| 131 |
+
assert size_str in str(dtype)
|
| 132 |
+
assert size_str in repr(dtype)
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@pytest.mark.slow
|
| 136 |
+
@requires_memory(2 * np.iinfo(np.intc).max)
|
| 137 |
+
@pytest.mark.parametrize("str_dt", "US")
|
| 138 |
+
def test_large_string_coercion_error(str_dt):
|
| 139 |
+
very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize
|
| 140 |
+
try:
|
| 141 |
+
large_string = "A" * (very_large + 1)
|
| 142 |
+
except Exception:
|
| 143 |
+
# We may not be able to create this Python string on 32bit.
|
| 144 |
+
pytest.skip("python failed to create huge string")
|
| 145 |
+
|
| 146 |
+
class MyStr:
|
| 147 |
+
def __str__(self):
|
| 148 |
+
return large_string
|
| 149 |
+
|
| 150 |
+
try:
|
| 151 |
+
# TypeError from NumPy, or OverflowError from 32bit Python.
|
| 152 |
+
with pytest.raises((TypeError, OverflowError)):
|
| 153 |
+
np.array([large_string], dtype=str_dt)
|
| 154 |
+
|
| 155 |
+
# Same as above, but input has to be converted to a string.
|
| 156 |
+
with pytest.raises((TypeError, OverflowError)):
|
| 157 |
+
np.array([MyStr()], dtype=str_dt)
|
| 158 |
+
except MemoryError:
|
| 159 |
+
# Catch memory errors, because `requires_memory` would do so.
|
| 160 |
+
raise AssertionError("Ops should raise before any large allocation.")
|
| 161 |
+
|
| 162 |
+
@pytest.mark.slow
|
| 163 |
+
@requires_memory(2 * np.iinfo(np.intc).max)
|
| 164 |
+
@pytest.mark.parametrize("str_dt", "US")
|
| 165 |
+
def test_large_string_addition_error(str_dt):
|
| 166 |
+
very_large = np.iinfo(np.intc).max // np.dtype(f"{str_dt}1").itemsize
|
| 167 |
+
|
| 168 |
+
a = np.array(["A" * very_large], dtype=str_dt)
|
| 169 |
+
b = np.array("B", dtype=str_dt)
|
| 170 |
+
try:
|
| 171 |
+
with pytest.raises(TypeError):
|
| 172 |
+
np.add(a, b)
|
| 173 |
+
with pytest.raises(TypeError):
|
| 174 |
+
np.add(a, a)
|
| 175 |
+
except MemoryError:
|
| 176 |
+
# Catch memory errors, because `requires_memory` would do so.
|
| 177 |
+
raise AssertionError("Ops should raise before any large allocation.")
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
def test_large_string_cast():
|
| 181 |
+
very_large = np.iinfo(np.intc).max // 4
|
| 182 |
+
# Could be nice to test very large path, but it makes too many huge
|
| 183 |
+
# allocations right now (need non-legacy cast loops for this).
|
| 184 |
+
# a = np.array([], dtype=np.dtype(("S", very_large)))
|
| 185 |
+
# assert a.astype("U").dtype.itemsize == very_large * 4
|
| 186 |
+
|
| 187 |
+
a = np.array([], dtype=np.dtype(("S", very_large + 1)))
|
| 188 |
+
# It is not perfect but OK if this raises a MemoryError during setup
|
| 189 |
+
# (this happens due clunky code and/or buffer setup.)
|
| 190 |
+
with pytest.raises((TypeError, MemoryError)):
|
| 191 |
+
a.astype("U")
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
@pytest.mark.parametrize("dt", ["S", "U", "T"])
|
| 195 |
+
class TestMethods:
|
| 196 |
+
|
| 197 |
+
@pytest.mark.parametrize("in1,in2,out", [
|
| 198 |
+
("", "", ""),
|
| 199 |
+
("abc", "abc", "abcabc"),
|
| 200 |
+
("12345", "12345", "1234512345"),
|
| 201 |
+
("MixedCase", "MixedCase", "MixedCaseMixedCase"),
|
| 202 |
+
("12345 \0 ", "12345 \0 ", "12345 \0 12345 \0 "),
|
| 203 |
+
("UPPER", "UPPER", "UPPERUPPER"),
|
| 204 |
+
(["abc", "def"], ["hello", "world"], ["abchello", "defworld"]),
|
| 205 |
+
])
|
| 206 |
+
def test_add(self, in1, in2, out, dt):
|
| 207 |
+
in1 = np.array(in1, dtype=dt)
|
| 208 |
+
in2 = np.array(in2, dtype=dt)
|
| 209 |
+
out = np.array(out, dtype=dt)
|
| 210 |
+
assert_array_equal(np.strings.add(in1, in2), out)
|
| 211 |
+
|
| 212 |
+
@pytest.mark.parametrize("in1,in2,out", [
|
| 213 |
+
("abc", 3, "abcabcabc"),
|
| 214 |
+
("abc", 0, ""),
|
| 215 |
+
("abc", -1, ""),
|
| 216 |
+
(["abc", "def"], [1, 4], ["abc", "defdefdefdef"]),
|
| 217 |
+
])
|
| 218 |
+
def test_multiply(self, in1, in2, out, dt):
|
| 219 |
+
in1 = np.array(in1, dtype=dt)
|
| 220 |
+
out = np.array(out, dtype=dt)
|
| 221 |
+
assert_array_equal(np.strings.multiply(in1, in2), out)
|
| 222 |
+
|
| 223 |
+
def test_multiply_raises(self, dt):
|
| 224 |
+
with pytest.raises(TypeError, match="unsupported type"):
|
| 225 |
+
np.strings.multiply(np.array("abc", dtype=dt), 3.14)
|
| 226 |
+
|
| 227 |
+
with pytest.raises(MemoryError):
|
| 228 |
+
np.strings.multiply(np.array("abc", dtype=dt), sys.maxsize)
|
| 229 |
+
|
| 230 |
+
@pytest.mark.parametrize("i_dt", [np.int8, np.int16, np.int32,
|
| 231 |
+
np.int64, np.int_])
|
| 232 |
+
def test_multiply_integer_dtypes(self, i_dt, dt):
|
| 233 |
+
a = np.array("abc", dtype=dt)
|
| 234 |
+
i = np.array(3, dtype=i_dt)
|
| 235 |
+
res = np.array("abcabcabc", dtype=dt)
|
| 236 |
+
assert_array_equal(np.strings.multiply(a, i), res)
|
| 237 |
+
|
| 238 |
+
@pytest.mark.parametrize("in_,out", [
|
| 239 |
+
("", False),
|
| 240 |
+
("a", True),
|
| 241 |
+
("A", True),
|
| 242 |
+
("\n", False),
|
| 243 |
+
("abc", True),
|
| 244 |
+
("aBc123", False),
|
| 245 |
+
("abc\n", False),
|
| 246 |
+
(["abc", "aBc123"], [True, False]),
|
| 247 |
+
])
|
| 248 |
+
def test_isalpha(self, in_, out, dt):
|
| 249 |
+
in_ = np.array(in_, dtype=dt)
|
| 250 |
+
assert_array_equal(np.strings.isalpha(in_), out)
|
| 251 |
+
|
| 252 |
+
@pytest.mark.parametrize("in_,out", [
|
| 253 |
+
('', False),
|
| 254 |
+
('a', True),
|
| 255 |
+
('A', True),
|
| 256 |
+
('\n', False),
|
| 257 |
+
('123abc456', True),
|
| 258 |
+
('a1b3c', True),
|
| 259 |
+
('aBc000 ', False),
|
| 260 |
+
('abc\n', False),
|
| 261 |
+
])
|
| 262 |
+
def test_isalnum(self, in_, out, dt):
|
| 263 |
+
in_ = np.array(in_, dtype=dt)
|
| 264 |
+
assert_array_equal(np.strings.isalnum(in_), out)
|
| 265 |
+
|
| 266 |
+
@pytest.mark.parametrize("in_,out", [
|
| 267 |
+
("", False),
|
| 268 |
+
("a", False),
|
| 269 |
+
("0", True),
|
| 270 |
+
("012345", True),
|
| 271 |
+
("012345a", False),
|
| 272 |
+
(["a", "012345"], [False, True]),
|
| 273 |
+
])
|
| 274 |
+
def test_isdigit(self, in_, out, dt):
|
| 275 |
+
in_ = np.array(in_, dtype=dt)
|
| 276 |
+
assert_array_equal(np.strings.isdigit(in_), out)
|
| 277 |
+
|
| 278 |
+
@pytest.mark.parametrize("in_,out", [
|
| 279 |
+
("", False),
|
| 280 |
+
("a", False),
|
| 281 |
+
("1", False),
|
| 282 |
+
(" ", True),
|
| 283 |
+
("\t", True),
|
| 284 |
+
("\r", True),
|
| 285 |
+
("\n", True),
|
| 286 |
+
(" \t\r \n", True),
|
| 287 |
+
(" \t\r\na", False),
|
| 288 |
+
(["\t1", " \t\r \n"], [False, True])
|
| 289 |
+
])
|
| 290 |
+
def test_isspace(self, in_, out, dt):
|
| 291 |
+
in_ = np.array(in_, dtype=dt)
|
| 292 |
+
assert_array_equal(np.strings.isspace(in_), out)
|
| 293 |
+
|
| 294 |
+
@pytest.mark.parametrize("in_,out", [
|
| 295 |
+
('', False),
|
| 296 |
+
('a', True),
|
| 297 |
+
('A', False),
|
| 298 |
+
('\n', False),
|
| 299 |
+
('abc', True),
|
| 300 |
+
('aBc', False),
|
| 301 |
+
('abc\n', True),
|
| 302 |
+
])
|
| 303 |
+
def test_islower(self, in_, out, dt):
|
| 304 |
+
in_ = np.array(in_, dtype=dt)
|
| 305 |
+
assert_array_equal(np.strings.islower(in_), out)
|
| 306 |
+
|
| 307 |
+
@pytest.mark.parametrize("in_,out", [
|
| 308 |
+
('', False),
|
| 309 |
+
('a', False),
|
| 310 |
+
('A', True),
|
| 311 |
+
('\n', False),
|
| 312 |
+
('ABC', True),
|
| 313 |
+
('AbC', False),
|
| 314 |
+
('ABC\n', True),
|
| 315 |
+
])
|
| 316 |
+
def test_isupper(self, in_, out, dt):
|
| 317 |
+
in_ = np.array(in_, dtype=dt)
|
| 318 |
+
assert_array_equal(np.strings.isupper(in_), out)
|
| 319 |
+
|
| 320 |
+
@pytest.mark.parametrize("in_,out", [
|
| 321 |
+
('', False),
|
| 322 |
+
('a', False),
|
| 323 |
+
('A', True),
|
| 324 |
+
('\n', False),
|
| 325 |
+
('A Titlecased Line', True),
|
| 326 |
+
('A\nTitlecased Line', True),
|
| 327 |
+
('A Titlecased, Line', True),
|
| 328 |
+
('Not a capitalized String', False),
|
| 329 |
+
('Not\ta Titlecase String', False),
|
| 330 |
+
('Not--a Titlecase String', False),
|
| 331 |
+
('NOT', False),
|
| 332 |
+
])
|
| 333 |
+
def test_istitle(self, in_, out, dt):
|
| 334 |
+
in_ = np.array(in_, dtype=dt)
|
| 335 |
+
assert_array_equal(np.strings.istitle(in_), out)
|
| 336 |
+
|
| 337 |
+
@pytest.mark.parametrize("in_,out", [
|
| 338 |
+
("", 0),
|
| 339 |
+
("abc", 3),
|
| 340 |
+
("12345", 5),
|
| 341 |
+
("MixedCase", 9),
|
| 342 |
+
("12345 \x00 ", 8),
|
| 343 |
+
("UPPER", 5),
|
| 344 |
+
(["abc", "12345 \x00 "], [3, 8]),
|
| 345 |
+
])
|
| 346 |
+
def test_str_len(self, in_, out, dt):
|
| 347 |
+
in_ = np.array(in_, dtype=dt)
|
| 348 |
+
assert_array_equal(np.strings.str_len(in_), out)
|
| 349 |
+
|
| 350 |
+
@pytest.mark.parametrize("a,sub,start,end,out", [
|
| 351 |
+
("abcdefghiabc", "abc", 0, None, 0),
|
| 352 |
+
("abcdefghiabc", "abc", 1, None, 9),
|
| 353 |
+
("abcdefghiabc", "def", 4, None, -1),
|
| 354 |
+
("abc", "", 0, None, 0),
|
| 355 |
+
("abc", "", 3, None, 3),
|
| 356 |
+
("abc", "", 4, None, -1),
|
| 357 |
+
("rrarrrrrrrrra", "a", 0, None, 2),
|
| 358 |
+
("rrarrrrrrrrra", "a", 4, None, 12),
|
| 359 |
+
("rrarrrrrrrrra", "a", 4, 6, -1),
|
| 360 |
+
("", "", 0, None, 0),
|
| 361 |
+
("", "", 1, 1, -1),
|
| 362 |
+
("", "", MAX, 0, -1),
|
| 363 |
+
("", "xx", 0, None, -1),
|
| 364 |
+
("", "xx", 1, 1, -1),
|
| 365 |
+
("", "xx", MAX, 0, -1),
|
| 366 |
+
pytest.param(99*"a" + "b", "b", 0, None, 99,
|
| 367 |
+
id="99*a+b-b-0-None-99"),
|
| 368 |
+
pytest.param(98*"a" + "ba", "ba", 0, None, 98,
|
| 369 |
+
id="98*a+ba-ba-0-None-98"),
|
| 370 |
+
pytest.param(100*"a", "b", 0, None, -1,
|
| 371 |
+
id="100*a-b-0-None--1"),
|
| 372 |
+
pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 30000,
|
| 373 |
+
id="30000*a+100*b-100*b-0-None-30000"),
|
| 374 |
+
pytest.param(30000*"a", 100*"b", 0, None, -1,
|
| 375 |
+
id="30000*a-100*b-0-None--1"),
|
| 376 |
+
pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 15000,
|
| 377 |
+
id="15000*a+15000*b-15000*b-0-None-15000"),
|
| 378 |
+
pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, -1,
|
| 379 |
+
id="15000*a+15000*b-15000*c-0-None--1"),
|
| 380 |
+
(["abcdefghiabc", "rrarrrrrrrrra"], ["def", "arr"], [0, 3],
|
| 381 |
+
None, [3, -1]),
|
| 382 |
+
("Ae¢☃€ 😊" * 2, "😊", 0, None, 6),
|
| 383 |
+
("Ae¢☃€ 😊" * 2, "😊", 7, None, 13),
|
| 384 |
+
])
|
| 385 |
+
def test_find(self, a, sub, start, end, out, dt):
|
| 386 |
+
if "😊" in a and dt == "S":
|
| 387 |
+
pytest.skip("Bytes dtype does not support non-ascii input")
|
| 388 |
+
a = np.array(a, dtype=dt)
|
| 389 |
+
sub = np.array(sub, dtype=dt)
|
| 390 |
+
assert_array_equal(np.strings.find(a, sub, start, end), out)
|
| 391 |
+
|
| 392 |
+
@pytest.mark.parametrize("a,sub,start,end,out", [
|
| 393 |
+
("abcdefghiabc", "abc", 0, None, 9),
|
| 394 |
+
("abcdefghiabc", "", 0, None, 12),
|
| 395 |
+
("abcdefghiabc", "abcd", 0, None, 0),
|
| 396 |
+
("abcdefghiabc", "abcz", 0, None, -1),
|
| 397 |
+
("abc", "", 0, None, 3),
|
| 398 |
+
("abc", "", 3, None, 3),
|
| 399 |
+
("abc", "", 4, None, -1),
|
| 400 |
+
("rrarrrrrrrrra", "a", 0, None, 12),
|
| 401 |
+
("rrarrrrrrrrra", "a", 4, None, 12),
|
| 402 |
+
("rrarrrrrrrrra", "a", 4, 6, -1),
|
| 403 |
+
(["abcdefghiabc", "rrarrrrrrrrra"], ["abc", "a"], [0, 0],
|
| 404 |
+
None, [9, 12]),
|
| 405 |
+
("Ae¢☃€ 😊" * 2, "😊", 0, None, 13),
|
| 406 |
+
("Ae¢☃€ 😊" * 2, "😊", 0, 7, 6),
|
| 407 |
+
])
|
| 408 |
+
def test_rfind(self, a, sub, start, end, out, dt):
|
| 409 |
+
if "😊" in a and dt == "S":
|
| 410 |
+
pytest.skip("Bytes dtype does not support non-ascii input")
|
| 411 |
+
a = np.array(a, dtype=dt)
|
| 412 |
+
sub = np.array(sub, dtype=dt)
|
| 413 |
+
assert_array_equal(np.strings.rfind(a, sub, start, end), out)
|
| 414 |
+
|
| 415 |
+
@pytest.mark.parametrize("a,sub,start,end,out", [
|
| 416 |
+
("aaa", "a", 0, None, 3),
|
| 417 |
+
("aaa", "b", 0, None, 0),
|
| 418 |
+
("aaa", "a", 1, None, 2),
|
| 419 |
+
("aaa", "a", 10, None, 0),
|
| 420 |
+
("aaa", "a", -1, None, 1),
|
| 421 |
+
("aaa", "a", -10, None, 3),
|
| 422 |
+
("aaa", "a", 0, 1, 1),
|
| 423 |
+
("aaa", "a", 0, 10, 3),
|
| 424 |
+
("aaa", "a", 0, -1, 2),
|
| 425 |
+
("aaa", "a", 0, -10, 0),
|
| 426 |
+
("aaa", "", 1, None, 3),
|
| 427 |
+
("aaa", "", 3, None, 1),
|
| 428 |
+
("aaa", "", 10, None, 0),
|
| 429 |
+
("aaa", "", -1, None, 2),
|
| 430 |
+
("aaa", "", -10, None, 4),
|
| 431 |
+
("aaa", "aaaa", 0, None, 0),
|
| 432 |
+
pytest.param(98*"a" + "ba", "ba", 0, None, 1,
|
| 433 |
+
id="98*a+ba-ba-0-None-1"),
|
| 434 |
+
pytest.param(30000*"a" + 100*"b", 100*"b", 0, None, 1,
|
| 435 |
+
id="30000*a+100*b-100*b-0-None-1"),
|
| 436 |
+
pytest.param(30000*"a", 100*"b", 0, None, 0,
|
| 437 |
+
id="30000*a-100*b-0-None-0"),
|
| 438 |
+
pytest.param(30000*"a" + 100*"ab", "ab", 0, None, 100,
|
| 439 |
+
id="30000*a+100*ab-ab-0-None-100"),
|
| 440 |
+
pytest.param(15000*"a" + 15000*"b", 15000*"b", 0, None, 1,
|
| 441 |
+
id="15000*a+15000*b-15000*b-0-None-1"),
|
| 442 |
+
pytest.param(15000*"a" + 15000*"b", 15000*"c", 0, None, 0,
|
| 443 |
+
id="15000*a+15000*b-15000*c-0-None-0"),
|
| 444 |
+
("", "", 0, None, 1),
|
| 445 |
+
("", "", 1, 1, 0),
|
| 446 |
+
("", "", MAX, 0, 0),
|
| 447 |
+
("", "xx", 0, None, 0),
|
| 448 |
+
("", "xx", 1, 1, 0),
|
| 449 |
+
("", "xx", MAX, 0, 0),
|
| 450 |
+
(["aaa", ""], ["a", ""], [0, 0], None, [3, 1]),
|
| 451 |
+
("Ae¢☃€ 😊" * 100, "😊", 0, None, 100),
|
| 452 |
+
])
|
| 453 |
+
def test_count(self, a, sub, start, end, out, dt):
|
| 454 |
+
if "😊" in a and dt == "S":
|
| 455 |
+
pytest.skip("Bytes dtype does not support non-ascii input")
|
| 456 |
+
a = np.array(a, dtype=dt)
|
| 457 |
+
sub = np.array(sub, dtype=dt)
|
| 458 |
+
assert_array_equal(np.strings.count(a, sub, start, end), out)
|
| 459 |
+
|
| 460 |
+
@pytest.mark.parametrize("a,prefix,start,end,out", [
|
| 461 |
+
("hello", "he", 0, None, True),
|
| 462 |
+
("hello", "hello", 0, None, True),
|
| 463 |
+
("hello", "hello world", 0, None, False),
|
| 464 |
+
("hello", "", 0, None, True),
|
| 465 |
+
("hello", "ello", 0, None, False),
|
| 466 |
+
("hello", "ello", 1, None, True),
|
| 467 |
+
("hello", "o", 4, None, True),
|
| 468 |
+
("hello", "o", 5, None, False),
|
| 469 |
+
("hello", "", 5, None, True),
|
| 470 |
+
("hello", "lo", 6, None, False),
|
| 471 |
+
("helloworld", "lowo", 3, None, True),
|
| 472 |
+
("helloworld", "lowo", 3, 7, True),
|
| 473 |
+
("helloworld", "lowo", 3, 6, False),
|
| 474 |
+
("", "", 0, 1, True),
|
| 475 |
+
("", "", 0, 0, True),
|
| 476 |
+
("", "", 1, 0, False),
|
| 477 |
+
("hello", "he", 0, -1, True),
|
| 478 |
+
("hello", "he", -53, -1, True),
|
| 479 |
+
("hello", "hello", 0, -1, False),
|
| 480 |
+
("hello", "hello world", -1, -10, False),
|
| 481 |
+
("hello", "ello", -5, None, False),
|
| 482 |
+
("hello", "ello", -4, None, True),
|
| 483 |
+
("hello", "o", -2, None, False),
|
| 484 |
+
("hello", "o", -1, None, True),
|
| 485 |
+
("hello", "", -3, -3, True),
|
| 486 |
+
("hello", "lo", -9, None, False),
|
| 487 |
+
(["hello", ""], ["he", ""], [0, 0], None, [True, True]),
|
| 488 |
+
])
|
| 489 |
+
def test_startswith(self, a, prefix, start, end, out, dt):
|
| 490 |
+
a = np.array(a, dtype=dt)
|
| 491 |
+
prefix = np.array(prefix, dtype=dt)
|
| 492 |
+
assert_array_equal(np.strings.startswith(a, prefix, start, end), out)
|
| 493 |
+
|
| 494 |
+
@pytest.mark.parametrize("a,suffix,start,end,out", [
|
| 495 |
+
("hello", "lo", 0, None, True),
|
| 496 |
+
("hello", "he", 0, None, False),
|
| 497 |
+
("hello", "", 0, None, True),
|
| 498 |
+
("hello", "hello world", 0, None, False),
|
| 499 |
+
("helloworld", "worl", 0, None, False),
|
| 500 |
+
("helloworld", "worl", 3, 9, True),
|
| 501 |
+
("helloworld", "world", 3, 12, True),
|
| 502 |
+
("helloworld", "lowo", 1, 7, True),
|
| 503 |
+
("helloworld", "lowo", 2, 7, True),
|
| 504 |
+
("helloworld", "lowo", 3, 7, True),
|
| 505 |
+
("helloworld", "lowo", 4, 7, False),
|
| 506 |
+
("helloworld", "lowo", 3, 8, False),
|
| 507 |
+
("ab", "ab", 0, 1, False),
|
| 508 |
+
("ab", "ab", 0, 0, False),
|
| 509 |
+
("", "", 0, 1, True),
|
| 510 |
+
("", "", 0, 0, True),
|
| 511 |
+
("", "", 1, 0, False),
|
| 512 |
+
("hello", "lo", -2, None, True),
|
| 513 |
+
("hello", "he", -2, None, False),
|
| 514 |
+
("hello", "", -3, -3, True),
|
| 515 |
+
("hello", "hello world", -10, -2, False),
|
| 516 |
+
("helloworld", "worl", -6, None, False),
|
| 517 |
+
("helloworld", "worl", -5, -1, True),
|
| 518 |
+
("helloworld", "worl", -5, 9, True),
|
| 519 |
+
("helloworld", "world", -7, 12, True),
|
| 520 |
+
("helloworld", "lowo", -99, -3, True),
|
| 521 |
+
("helloworld", "lowo", -8, -3, True),
|
| 522 |
+
("helloworld", "lowo", -7, -3, True),
|
| 523 |
+
("helloworld", "lowo", 3, -4, False),
|
| 524 |
+
("helloworld", "lowo", -8, -2, False),
|
| 525 |
+
(["hello", "helloworld"], ["lo", "worl"], [0, -6], None,
|
| 526 |
+
[True, False]),
|
| 527 |
+
])
|
| 528 |
+
def test_endswith(self, a, suffix, start, end, out, dt):
|
| 529 |
+
a = np.array(a, dtype=dt)
|
| 530 |
+
suffix = np.array(suffix, dtype=dt)
|
| 531 |
+
assert_array_equal(np.strings.endswith(a, suffix, start, end), out)
|
| 532 |
+
|
| 533 |
+
@pytest.mark.parametrize("a,chars,out", [
|
| 534 |
+
("", None, ""),
|
| 535 |
+
(" hello ", None, "hello "),
|
| 536 |
+
("hello", None, "hello"),
|
| 537 |
+
(" \t\n\r\f\vabc \t\n\r\f\v", None, "abc \t\n\r\f\v"),
|
| 538 |
+
([" hello ", "hello"], None, ["hello ", "hello"]),
|
| 539 |
+
("", "", ""),
|
| 540 |
+
("", "xyz", ""),
|
| 541 |
+
("hello", "", "hello"),
|
| 542 |
+
("xyzzyhelloxyzzy", "xyz", "helloxyzzy"),
|
| 543 |
+
("hello", "xyz", "hello"),
|
| 544 |
+
("xyxz", "xyxz", ""),
|
| 545 |
+
("xyxzx", "x", "yxzx"),
|
| 546 |
+
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
|
| 547 |
+
["helloxyzzy", "hello"]),
|
| 548 |
+
(["ba", "ac", "baa", "bba"], "b", ["a", "ac", "aa", "a"]),
|
| 549 |
+
])
|
| 550 |
+
def test_lstrip(self, a, chars, out, dt):
|
| 551 |
+
a = np.array(a, dtype=dt)
|
| 552 |
+
out = np.array(out, dtype=dt)
|
| 553 |
+
if chars is not None:
|
| 554 |
+
chars = np.array(chars, dtype=dt)
|
| 555 |
+
assert_array_equal(np.strings.lstrip(a, chars), out)
|
| 556 |
+
else:
|
| 557 |
+
assert_array_equal(np.strings.lstrip(a), out)
|
| 558 |
+
|
| 559 |
+
@pytest.mark.parametrize("a,chars,out", [
|
| 560 |
+
("", None, ""),
|
| 561 |
+
(" hello ", None, " hello"),
|
| 562 |
+
("hello", None, "hello"),
|
| 563 |
+
(" \t\n\r\f\vabc \t\n\r\f\v", None, " \t\n\r\f\vabc"),
|
| 564 |
+
([" hello ", "hello"], None, [" hello", "hello"]),
|
| 565 |
+
("", "", ""),
|
| 566 |
+
("", "xyz", ""),
|
| 567 |
+
("hello", "", "hello"),
|
| 568 |
+
(["hello ", "abcdefghijklmnop"], None,
|
| 569 |
+
["hello", "abcdefghijklmnop"]),
|
| 570 |
+
("xyzzyhelloxyzzy", "xyz", "xyzzyhello"),
|
| 571 |
+
("hello", "xyz", "hello"),
|
| 572 |
+
("xyxz", "xyxz", ""),
|
| 573 |
+
(" ", None, ""),
|
| 574 |
+
("xyxzx", "x", "xyxz"),
|
| 575 |
+
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
|
| 576 |
+
["xyzzyhello", "hello"]),
|
| 577 |
+
(["ab", "ac", "aab", "abb"], "b", ["a", "ac", "aa", "a"]),
|
| 578 |
+
])
|
| 579 |
+
def test_rstrip(self, a, chars, out, dt):
|
| 580 |
+
a = np.array(a, dtype=dt)
|
| 581 |
+
out = np.array(out, dtype=dt)
|
| 582 |
+
if chars is not None:
|
| 583 |
+
chars = np.array(chars, dtype=dt)
|
| 584 |
+
assert_array_equal(np.strings.rstrip(a, chars), out)
|
| 585 |
+
else:
|
| 586 |
+
assert_array_equal(np.strings.rstrip(a), out)
|
| 587 |
+
|
| 588 |
+
@pytest.mark.parametrize("a,chars,out", [
|
| 589 |
+
("", None, ""),
|
| 590 |
+
(" hello ", None, "hello"),
|
| 591 |
+
("hello", None, "hello"),
|
| 592 |
+
(" \t\n\r\f\vabc \t\n\r\f\v", None, "abc"),
|
| 593 |
+
([" hello ", "hello"], None, ["hello", "hello"]),
|
| 594 |
+
("", "", ""),
|
| 595 |
+
("", "xyz", ""),
|
| 596 |
+
("hello", "", "hello"),
|
| 597 |
+
("xyzzyhelloxyzzy", "xyz", "hello"),
|
| 598 |
+
("hello", "xyz", "hello"),
|
| 599 |
+
("xyxz", "xyxz", ""),
|
| 600 |
+
("xyxzx", "x", "yxz"),
|
| 601 |
+
(["xyzzyhelloxyzzy", "hello"], ["xyz", "xyz"],
|
| 602 |
+
["hello", "hello"]),
|
| 603 |
+
(["bab", "ac", "baab", "bbabb"], "b", ["a", "ac", "aa", "a"]),
|
| 604 |
+
])
|
| 605 |
+
def test_strip(self, a, chars, out, dt):
|
| 606 |
+
a = np.array(a, dtype=dt)
|
| 607 |
+
if chars is not None:
|
| 608 |
+
chars = np.array(chars, dtype=dt)
|
| 609 |
+
out = np.array(out, dtype=dt)
|
| 610 |
+
assert_array_equal(np.strings.strip(a, chars), out)
|
| 611 |
+
|
| 612 |
+
@pytest.mark.parametrize("buf,old,new,count,res", [
|
| 613 |
+
("", "", "", -1, ""),
|
| 614 |
+
("", "", "A", -1, "A"),
|
| 615 |
+
("", "A", "", -1, ""),
|
| 616 |
+
("", "A", "A", -1, ""),
|
| 617 |
+
("", "", "", 100, ""),
|
| 618 |
+
("", "", "A", 100, "A"),
|
| 619 |
+
("A", "", "", -1, "A"),
|
| 620 |
+
("A", "", "*", -1, "*A*"),
|
| 621 |
+
("A", "", "*1", -1, "*1A*1"),
|
| 622 |
+
("A", "", "*-#", -1, "*-#A*-#"),
|
| 623 |
+
("AA", "", "*-", -1, "*-A*-A*-"),
|
| 624 |
+
("AA", "", "*-", -1, "*-A*-A*-"),
|
| 625 |
+
("AA", "", "*-", 4, "*-A*-A*-"),
|
| 626 |
+
("AA", "", "*-", 3, "*-A*-A*-"),
|
| 627 |
+
("AA", "", "*-", 2, "*-A*-A"),
|
| 628 |
+
("AA", "", "*-", 1, "*-AA"),
|
| 629 |
+
("AA", "", "*-", 0, "AA"),
|
| 630 |
+
("A", "A", "", -1, ""),
|
| 631 |
+
("AAA", "A", "", -1, ""),
|
| 632 |
+
("AAA", "A", "", -1, ""),
|
| 633 |
+
("AAA", "A", "", 4, ""),
|
| 634 |
+
("AAA", "A", "", 3, ""),
|
| 635 |
+
("AAA", "A", "", 2, "A"),
|
| 636 |
+
("AAA", "A", "", 1, "AA"),
|
| 637 |
+
("AAA", "A", "", 0, "AAA"),
|
| 638 |
+
("AAAAAAAAAA", "A", "", -1, ""),
|
| 639 |
+
("ABACADA", "A", "", -1, "BCD"),
|
| 640 |
+
("ABACADA", "A", "", -1, "BCD"),
|
| 641 |
+
("ABACADA", "A", "", 5, "BCD"),
|
| 642 |
+
("ABACADA", "A", "", 4, "BCD"),
|
| 643 |
+
("ABACADA", "A", "", 3, "BCDA"),
|
| 644 |
+
("ABACADA", "A", "", 2, "BCADA"),
|
| 645 |
+
("ABACADA", "A", "", 1, "BACADA"),
|
| 646 |
+
("ABACADA", "A", "", 0, "ABACADA"),
|
| 647 |
+
("ABCAD", "A", "", -1, "BCD"),
|
| 648 |
+
("ABCADAA", "A", "", -1, "BCD"),
|
| 649 |
+
("BCD", "A", "", -1, "BCD"),
|
| 650 |
+
("*************", "A", "", -1, "*************"),
|
| 651 |
+
("^"+"A"*1000+"^", "A", "", 999, "^A^"),
|
| 652 |
+
("the", "the", "", -1, ""),
|
| 653 |
+
("theater", "the", "", -1, "ater"),
|
| 654 |
+
("thethe", "the", "", -1, ""),
|
| 655 |
+
("thethethethe", "the", "", -1, ""),
|
| 656 |
+
("theatheatheathea", "the", "", -1, "aaaa"),
|
| 657 |
+
("that", "the", "", -1, "that"),
|
| 658 |
+
("thaet", "the", "", -1, "thaet"),
|
| 659 |
+
("here and there", "the", "", -1, "here and re"),
|
| 660 |
+
("here and there and there", "the", "", -1, "here and re and re"),
|
| 661 |
+
("here and there and there", "the", "", 3, "here and re and re"),
|
| 662 |
+
("here and there and there", "the", "", 2, "here and re and re"),
|
| 663 |
+
("here and there and there", "the", "", 1, "here and re and there"),
|
| 664 |
+
("here and there and there", "the", "", 0, "here and there and there"),
|
| 665 |
+
("here and there and there", "the", "", -1, "here and re and re"),
|
| 666 |
+
("abc", "the", "", -1, "abc"),
|
| 667 |
+
("abcdefg", "the", "", -1, "abcdefg"),
|
| 668 |
+
("bbobob", "bob", "", -1, "bob"),
|
| 669 |
+
("bbobobXbbobob", "bob", "", -1, "bobXbob"),
|
| 670 |
+
("aaaaaaabob", "bob", "", -1, "aaaaaaa"),
|
| 671 |
+
("aaaaaaa", "bob", "", -1, "aaaaaaa"),
|
| 672 |
+
("Who goes there?", "o", "o", -1, "Who goes there?"),
|
| 673 |
+
("Who goes there?", "o", "O", -1, "WhO gOes there?"),
|
| 674 |
+
("Who goes there?", "o", "O", -1, "WhO gOes there?"),
|
| 675 |
+
("Who goes there?", "o", "O", 3, "WhO gOes there?"),
|
| 676 |
+
("Who goes there?", "o", "O", 2, "WhO gOes there?"),
|
| 677 |
+
("Who goes there?", "o", "O", 1, "WhO goes there?"),
|
| 678 |
+
("Who goes there?", "o", "O", 0, "Who goes there?"),
|
| 679 |
+
("Who goes there?", "a", "q", -1, "Who goes there?"),
|
| 680 |
+
("Who goes there?", "W", "w", -1, "who goes there?"),
|
| 681 |
+
("WWho goes there?WW", "W", "w", -1, "wwho goes there?ww"),
|
| 682 |
+
("Who goes there?", "?", "!", -1, "Who goes there!"),
|
| 683 |
+
("Who goes there??", "?", "!", -1, "Who goes there!!"),
|
| 684 |
+
("Who goes there?", ".", "!", -1, "Who goes there?"),
|
| 685 |
+
("This is a tissue", "is", "**", -1, "Th** ** a t**sue"),
|
| 686 |
+
("This is a tissue", "is", "**", -1, "Th** ** a t**sue"),
|
| 687 |
+
("This is a tissue", "is", "**", 4, "Th** ** a t**sue"),
|
| 688 |
+
("This is a tissue", "is", "**", 3, "Th** ** a t**sue"),
|
| 689 |
+
("This is a tissue", "is", "**", 2, "Th** ** a tissue"),
|
| 690 |
+
("This is a tissue", "is", "**", 1, "Th** is a tissue"),
|
| 691 |
+
("This is a tissue", "is", "**", 0, "This is a tissue"),
|
| 692 |
+
("bobob", "bob", "cob", -1, "cobob"),
|
| 693 |
+
("bobobXbobobob", "bob", "cob", -1, "cobobXcobocob"),
|
| 694 |
+
("bobob", "bot", "bot", -1, "bobob"),
|
| 695 |
+
("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"),
|
| 696 |
+
("Reykjavik", "k", "KK", -1, "ReyKKjaviKK"),
|
| 697 |
+
("Reykjavik", "k", "KK", 2, "ReyKKjaviKK"),
|
| 698 |
+
("Reykjavik", "k", "KK", 1, "ReyKKjavik"),
|
| 699 |
+
("Reykjavik", "k", "KK", 0, "Reykjavik"),
|
| 700 |
+
("A.B.C.", ".", "----", -1, "A----B----C----"),
|
| 701 |
+
("Reykjavik", "q", "KK", -1, "Reykjavik"),
|
| 702 |
+
("spam, spam, eggs and spam", "spam", "ham", -1,
|
| 703 |
+
"ham, ham, eggs and ham"),
|
| 704 |
+
("spam, spam, eggs and spam", "spam", "ham", -1,
|
| 705 |
+
"ham, ham, eggs and ham"),
|
| 706 |
+
("spam, spam, eggs and spam", "spam", "ham", 4,
|
| 707 |
+
"ham, ham, eggs and ham"),
|
| 708 |
+
("spam, spam, eggs and spam", "spam", "ham", 3,
|
| 709 |
+
"ham, ham, eggs and ham"),
|
| 710 |
+
("spam, spam, eggs and spam", "spam", "ham", 2,
|
| 711 |
+
"ham, ham, eggs and spam"),
|
| 712 |
+
("spam, spam, eggs and spam", "spam", "ham", 1,
|
| 713 |
+
"ham, spam, eggs and spam"),
|
| 714 |
+
("spam, spam, eggs and spam", "spam", "ham", 0,
|
| 715 |
+
"spam, spam, eggs and spam"),
|
| 716 |
+
("bobobob", "bobob", "bob", -1, "bobob"),
|
| 717 |
+
("bobobobXbobobob", "bobob", "bob", -1, "bobobXbobob"),
|
| 718 |
+
("BOBOBOB", "bob", "bobby", -1, "BOBOBOB"),
|
| 719 |
+
("one!two!three!", "!", "@", 1, "one@two!three!"),
|
| 720 |
+
("one!two!three!", "!", "", -1, "onetwothree"),
|
| 721 |
+
("one!two!three!", "!", "@", 2, "one@two@three!"),
|
| 722 |
+
("one!two!three!", "!", "@", 3, "one@two@three@"),
|
| 723 |
+
("one!two!three!", "!", "@", 4, "one@two@three@"),
|
| 724 |
+
("one!two!three!", "!", "@", 0, "one!two!three!"),
|
| 725 |
+
("one!two!three!", "!", "@", -1, "one@two@three@"),
|
| 726 |
+
("one!two!three!", "x", "@", -1, "one!two!three!"),
|
| 727 |
+
("one!two!three!", "x", "@", 2, "one!two!three!"),
|
| 728 |
+
("abc", "", "-", -1, "-a-b-c-"),
|
| 729 |
+
("abc", "", "-", 3, "-a-b-c"),
|
| 730 |
+
("abc", "", "-", 0, "abc"),
|
| 731 |
+
("abc", "ab", "--", 0, "abc"),
|
| 732 |
+
("abc", "xy", "--", -1, "abc"),
|
| 733 |
+
(["abbc", "abbd"], "b", "z", [1, 2], ["azbc", "azzd"]),
|
| 734 |
+
])
|
| 735 |
+
def test_replace(self, buf, old, new, count, res, dt):
|
| 736 |
+
if "😊" in buf and dt == "S":
|
| 737 |
+
pytest.skip("Bytes dtype does not support non-ascii input")
|
| 738 |
+
buf = np.array(buf, dtype=dt)
|
| 739 |
+
old = np.array(old, dtype=dt)
|
| 740 |
+
new = np.array(new, dtype=dt)
|
| 741 |
+
res = np.array(res, dtype=dt)
|
| 742 |
+
assert_array_equal(np.strings.replace(buf, old, new, count), res)
|
| 743 |
+
|
| 744 |
+
@pytest.mark.parametrize("buf,sub,start,end,res", [
|
| 745 |
+
("abcdefghiabc", "", 0, None, 0),
|
| 746 |
+
("abcdefghiabc", "def", 0, None, 3),
|
| 747 |
+
("abcdefghiabc", "abc", 0, None, 0),
|
| 748 |
+
("abcdefghiabc", "abc", 1, None, 9),
|
| 749 |
+
])
|
| 750 |
+
def test_index(self, buf, sub, start, end, res, dt):
|
| 751 |
+
buf = np.array(buf, dtype=dt)
|
| 752 |
+
sub = np.array(sub, dtype=dt)
|
| 753 |
+
assert_array_equal(np.strings.index(buf, sub, start, end), res)
|
| 754 |
+
|
| 755 |
+
@pytest.mark.parametrize("buf,sub,start,end", [
|
| 756 |
+
("abcdefghiabc", "hib", 0, None),
|
| 757 |
+
("abcdefghiab", "abc", 1, None),
|
| 758 |
+
("abcdefghi", "ghi", 8, None),
|
| 759 |
+
("abcdefghi", "ghi", -1, None),
|
| 760 |
+
("rrarrrrrrrrra", "a", 4, 6),
|
| 761 |
+
])
|
| 762 |
+
def test_index_raises(self, buf, sub, start, end, dt):
|
| 763 |
+
buf = np.array(buf, dtype=dt)
|
| 764 |
+
sub = np.array(sub, dtype=dt)
|
| 765 |
+
with pytest.raises(ValueError, match="substring not found"):
|
| 766 |
+
np.strings.index(buf, sub, start, end)
|
| 767 |
+
|
| 768 |
+
@pytest.mark.parametrize("buf,sub,start,end,res", [
|
| 769 |
+
("abcdefghiabc", "", 0, None, 12),
|
| 770 |
+
("abcdefghiabc", "def", 0, None, 3),
|
| 771 |
+
("abcdefghiabc", "abc", 0, None, 9),
|
| 772 |
+
("abcdefghiabc", "abc", 0, -1, 0),
|
| 773 |
+
])
|
| 774 |
+
def test_rindex(self, buf, sub, start, end, res, dt):
|
| 775 |
+
buf = np.array(buf, dtype=dt)
|
| 776 |
+
sub = np.array(sub, dtype=dt)
|
| 777 |
+
assert_array_equal(np.strings.rindex(buf, sub, start, end), res)
|
| 778 |
+
|
| 779 |
+
@pytest.mark.parametrize("buf,sub,start,end", [
|
| 780 |
+
("abcdefghiabc", "hib", 0, None),
|
| 781 |
+
("defghiabc", "def", 1, None),
|
| 782 |
+
("defghiabc", "abc", 0, -1),
|
| 783 |
+
("abcdefghi", "ghi", 0, 8),
|
| 784 |
+
("abcdefghi", "ghi", 0, -1),
|
| 785 |
+
("rrarrrrrrrrra", "a", 4, 6),
|
| 786 |
+
])
|
| 787 |
+
def test_rindex_raises(self, buf, sub, start, end, dt):
|
| 788 |
+
buf = np.array(buf, dtype=dt)
|
| 789 |
+
sub = np.array(sub, dtype=dt)
|
| 790 |
+
with pytest.raises(ValueError, match="substring not found"):
|
| 791 |
+
np.strings.rindex(buf, sub, start, end)
|
| 792 |
+
|
| 793 |
+
@pytest.mark.parametrize("buf,tabsize,res", [
|
| 794 |
+
("abc\rab\tdef\ng\thi", 8, "abc\rab def\ng hi"),
|
| 795 |
+
("abc\rab\tdef\ng\thi", 4, "abc\rab def\ng hi"),
|
| 796 |
+
("abc\r\nab\tdef\ng\thi", 8, "abc\r\nab def\ng hi"),
|
| 797 |
+
("abc\r\nab\tdef\ng\thi", 4, "abc\r\nab def\ng hi"),
|
| 798 |
+
("abc\r\nab\r\ndef\ng\r\nhi", 4, "abc\r\nab\r\ndef\ng\r\nhi"),
|
| 799 |
+
(" \ta\n\tb", 1, " a\n b"),
|
| 800 |
+
])
|
| 801 |
+
def test_expandtabs(self, buf, tabsize, res, dt):
|
| 802 |
+
buf = np.array(buf, dtype=dt)
|
| 803 |
+
res = np.array(res, dtype=dt)
|
| 804 |
+
assert_array_equal(np.strings.expandtabs(buf, tabsize), res)
|
| 805 |
+
|
| 806 |
+
def test_expandtabs_raises_overflow(self, dt):
|
| 807 |
+
with pytest.raises(OverflowError, match="new string is too long"):
|
| 808 |
+
np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), sys.maxsize)
|
| 809 |
+
np.strings.expandtabs(np.array("\ta\n\tb", dtype=dt), 2**61)
|
| 810 |
+
|
| 811 |
+
FILL_ERROR = "The fill character must be exactly one character long"
|
| 812 |
+
|
| 813 |
+
def test_center_raises_multiple_character_fill(self, dt):
|
| 814 |
+
buf = np.array("abc", dtype=dt)
|
| 815 |
+
fill = np.array("**", dtype=dt)
|
| 816 |
+
with pytest.raises(TypeError, match=self.FILL_ERROR):
|
| 817 |
+
np.strings.center(buf, 10, fill)
|
| 818 |
+
|
| 819 |
+
def test_ljust_raises_multiple_character_fill(self, dt):
|
| 820 |
+
buf = np.array("abc", dtype=dt)
|
| 821 |
+
fill = np.array("**", dtype=dt)
|
| 822 |
+
with pytest.raises(TypeError, match=self.FILL_ERROR):
|
| 823 |
+
np.strings.ljust(buf, 10, fill)
|
| 824 |
+
|
| 825 |
+
def test_rjust_raises_multiple_character_fill(self, dt):
|
| 826 |
+
buf = np.array("abc", dtype=dt)
|
| 827 |
+
fill = np.array("**", dtype=dt)
|
| 828 |
+
with pytest.raises(TypeError, match=self.FILL_ERROR):
|
| 829 |
+
np.strings.rjust(buf, 10, fill)
|
| 830 |
+
|
| 831 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 832 |
+
('abc', 10, ' ', ' abc '),
|
| 833 |
+
('abc', 6, ' ', ' abc '),
|
| 834 |
+
('abc', 3, ' ', 'abc'),
|
| 835 |
+
('abc', 2, ' ', 'abc'),
|
| 836 |
+
('abc', 10, '*', '***abc****'),
|
| 837 |
+
])
|
| 838 |
+
def test_center(self, buf, width, fillchar, res, dt):
|
| 839 |
+
buf = np.array(buf, dtype=dt)
|
| 840 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 841 |
+
res = np.array(res, dtype=dt)
|
| 842 |
+
assert_array_equal(np.strings.center(buf, width, fillchar), res)
|
| 843 |
+
|
| 844 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 845 |
+
('abc', 10, ' ', 'abc '),
|
| 846 |
+
('abc', 6, ' ', 'abc '),
|
| 847 |
+
('abc', 3, ' ', 'abc'),
|
| 848 |
+
('abc', 2, ' ', 'abc'),
|
| 849 |
+
('abc', 10, '*', 'abc*******'),
|
| 850 |
+
])
|
| 851 |
+
def test_ljust(self, buf, width, fillchar, res, dt):
|
| 852 |
+
buf = np.array(buf, dtype=dt)
|
| 853 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 854 |
+
res = np.array(res, dtype=dt)
|
| 855 |
+
assert_array_equal(np.strings.ljust(buf, width, fillchar), res)
|
| 856 |
+
|
| 857 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 858 |
+
('abc', 10, ' ', ' abc'),
|
| 859 |
+
('abc', 6, ' ', ' abc'),
|
| 860 |
+
('abc', 3, ' ', 'abc'),
|
| 861 |
+
('abc', 2, ' ', 'abc'),
|
| 862 |
+
('abc', 10, '*', '*******abc'),
|
| 863 |
+
])
|
| 864 |
+
def test_rjust(self, buf, width, fillchar, res, dt):
|
| 865 |
+
buf = np.array(buf, dtype=dt)
|
| 866 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 867 |
+
res = np.array(res, dtype=dt)
|
| 868 |
+
assert_array_equal(np.strings.rjust(buf, width, fillchar), res)
|
| 869 |
+
|
| 870 |
+
@pytest.mark.parametrize("buf,width,res", [
|
| 871 |
+
('123', 2, '123'),
|
| 872 |
+
('123', 3, '123'),
|
| 873 |
+
('0123', 4, '0123'),
|
| 874 |
+
('+123', 3, '+123'),
|
| 875 |
+
('+123', 4, '+123'),
|
| 876 |
+
('+123', 5, '+0123'),
|
| 877 |
+
('+0123', 5, '+0123'),
|
| 878 |
+
('-123', 3, '-123'),
|
| 879 |
+
('-123', 4, '-123'),
|
| 880 |
+
('-0123', 5, '-0123'),
|
| 881 |
+
('000', 3, '000'),
|
| 882 |
+
('34', 1, '34'),
|
| 883 |
+
('0034', 4, '0034'),
|
| 884 |
+
])
|
| 885 |
+
def test_zfill(self, buf, width, res, dt):
|
| 886 |
+
buf = np.array(buf, dtype=dt)
|
| 887 |
+
res = np.array(res, dtype=dt)
|
| 888 |
+
assert_array_equal(np.strings.zfill(buf, width), res)
|
| 889 |
+
|
| 890 |
+
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
|
| 891 |
+
("this is the partition method", "ti", "this is the par",
|
| 892 |
+
"ti", "tion method"),
|
| 893 |
+
("http://www.python.org", "://", "http", "://", "www.python.org"),
|
| 894 |
+
("http://www.python.org", "?", "http://www.python.org", "", ""),
|
| 895 |
+
("http://www.python.org", "http://", "", "http://", "www.python.org"),
|
| 896 |
+
("http://www.python.org", "org", "http://www.python.", "org", ""),
|
| 897 |
+
("http://www.python.org", ["://", "?", "http://", "org"],
|
| 898 |
+
["http", "http://www.python.org", "", "http://www.python."],
|
| 899 |
+
["://", "", "http://", "org"],
|
| 900 |
+
["www.python.org", "", "www.python.org", ""]),
|
| 901 |
+
("mississippi", "ss", "mi", "ss", "issippi"),
|
| 902 |
+
("mississippi", "i", "m", "i", "ssissippi"),
|
| 903 |
+
("mississippi", "w", "mississippi", "", ""),
|
| 904 |
+
])
|
| 905 |
+
def test_partition(self, buf, sep, res1, res2, res3, dt):
|
| 906 |
+
buf = np.array(buf, dtype=dt)
|
| 907 |
+
sep = np.array(sep, dtype=dt)
|
| 908 |
+
res1 = np.array(res1, dtype=dt)
|
| 909 |
+
res2 = np.array(res2, dtype=dt)
|
| 910 |
+
res3 = np.array(res3, dtype=dt)
|
| 911 |
+
act1, act2, act3 = np.strings.partition(buf, sep)
|
| 912 |
+
assert_array_equal(act1, res1)
|
| 913 |
+
assert_array_equal(act2, res2)
|
| 914 |
+
assert_array_equal(act3, res3)
|
| 915 |
+
assert_array_equal(act1 + act2 + act3, buf)
|
| 916 |
+
|
| 917 |
+
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
|
| 918 |
+
("this is the partition method", "ti", "this is the parti",
|
| 919 |
+
"ti", "on method"),
|
| 920 |
+
("http://www.python.org", "://", "http", "://", "www.python.org"),
|
| 921 |
+
("http://www.python.org", "?", "", "", "http://www.python.org"),
|
| 922 |
+
("http://www.python.org", "http://", "", "http://", "www.python.org"),
|
| 923 |
+
("http://www.python.org", "org", "http://www.python.", "org", ""),
|
| 924 |
+
("http://www.python.org", ["://", "?", "http://", "org"],
|
| 925 |
+
["http", "", "", "http://www.python."],
|
| 926 |
+
["://", "", "http://", "org"],
|
| 927 |
+
["www.python.org", "http://www.python.org", "www.python.org", ""]),
|
| 928 |
+
("mississippi", "ss", "missi", "ss", "ippi"),
|
| 929 |
+
("mississippi", "i", "mississipp", "i", ""),
|
| 930 |
+
("mississippi", "w", "", "", "mississippi"),
|
| 931 |
+
])
|
| 932 |
+
def test_rpartition(self, buf, sep, res1, res2, res3, dt):
|
| 933 |
+
buf = np.array(buf, dtype=dt)
|
| 934 |
+
sep = np.array(sep, dtype=dt)
|
| 935 |
+
res1 = np.array(res1, dtype=dt)
|
| 936 |
+
res2 = np.array(res2, dtype=dt)
|
| 937 |
+
res3 = np.array(res3, dtype=dt)
|
| 938 |
+
act1, act2, act3 = np.strings.rpartition(buf, sep)
|
| 939 |
+
assert_array_equal(act1, res1)
|
| 940 |
+
assert_array_equal(act2, res2)
|
| 941 |
+
assert_array_equal(act3, res3)
|
| 942 |
+
assert_array_equal(act1 + act2 + act3, buf)
|
| 943 |
+
|
| 944 |
+
|
| 945 |
+
@pytest.mark.parametrize("dt", ["U", "T"])
|
| 946 |
+
class TestMethodsWithUnicode:
|
| 947 |
+
@pytest.mark.parametrize("in_,out", [
|
| 948 |
+
("", False),
|
| 949 |
+
("a", False),
|
| 950 |
+
("0", True),
|
| 951 |
+
("\u2460", False), # CIRCLED DIGIT 1
|
| 952 |
+
("\xbc", False), # VULGAR FRACTION ONE QUARTER
|
| 953 |
+
("\u0660", True), # ARABIC_INDIC DIGIT ZERO
|
| 954 |
+
("012345", True),
|
| 955 |
+
("012345a", False),
|
| 956 |
+
(["0", "a"], [True, False]),
|
| 957 |
+
])
|
| 958 |
+
def test_isdecimal_unicode(self, in_, out, dt):
|
| 959 |
+
buf = np.array(in_, dtype=dt)
|
| 960 |
+
assert_array_equal(np.strings.isdecimal(buf), out)
|
| 961 |
+
|
| 962 |
+
@pytest.mark.parametrize("in_,out", [
|
| 963 |
+
("", False),
|
| 964 |
+
("a", False),
|
| 965 |
+
("0", True),
|
| 966 |
+
("\u2460", True), # CIRCLED DIGIT 1
|
| 967 |
+
("\xbc", True), # VULGAR FRACTION ONE QUARTER
|
| 968 |
+
("\u0660", True), # ARABIC_INDIC DIGIT ZERO
|
| 969 |
+
("012345", True),
|
| 970 |
+
("012345a", False),
|
| 971 |
+
(["0", "a"], [True, False]),
|
| 972 |
+
])
|
| 973 |
+
def test_isnumeric_unicode(self, in_, out, dt):
|
| 974 |
+
buf = np.array(in_, dtype=dt)
|
| 975 |
+
assert_array_equal(np.strings.isnumeric(buf), out)
|
| 976 |
+
|
| 977 |
+
@pytest.mark.parametrize("buf,old,new,count,res", [
|
| 978 |
+
("...\u043c......<", "<", "<", -1, "...\u043c......<"),
|
| 979 |
+
("Ae¢☃€ 😊" * 2, "A", "B", -1, "Be¢☃€ 😊Be¢☃€ 😊"),
|
| 980 |
+
("Ae¢☃€ 😊" * 2, "😊", "B", -1, "Ae¢☃€ BAe¢☃€ B"),
|
| 981 |
+
])
|
| 982 |
+
def test_replace_unicode(self, buf, old, new, count, res, dt):
|
| 983 |
+
buf = np.array(buf, dtype=dt)
|
| 984 |
+
old = np.array(old, dtype=dt)
|
| 985 |
+
new = np.array(new, dtype=dt)
|
| 986 |
+
res = np.array(res, dtype=dt)
|
| 987 |
+
assert_array_equal(np.strings.replace(buf, old, new, count), res)
|
| 988 |
+
|
| 989 |
+
@pytest.mark.parametrize("in_", [
|
| 990 |
+
'\U00010401',
|
| 991 |
+
'\U00010427',
|
| 992 |
+
'\U00010429',
|
| 993 |
+
'\U0001044E',
|
| 994 |
+
'\U0001D7F6',
|
| 995 |
+
'\U00011066',
|
| 996 |
+
'\U000104A0',
|
| 997 |
+
pytest.param('\U0001F107', marks=pytest.mark.xfail(
|
| 998 |
+
sys.platform == 'win32' and IS_PYPY_LT_7_3_16,
|
| 999 |
+
reason="PYPY bug in Py_UNICODE_ISALNUM",
|
| 1000 |
+
strict=True)),
|
| 1001 |
+
])
|
| 1002 |
+
def test_isalnum_unicode(self, in_, dt):
|
| 1003 |
+
in_ = np.array(in_, dtype=dt)
|
| 1004 |
+
assert_array_equal(np.strings.isalnum(in_), True)
|
| 1005 |
+
|
| 1006 |
+
@pytest.mark.parametrize("in_,out", [
|
| 1007 |
+
('\u1FFc', False),
|
| 1008 |
+
('\u2167', False),
|
| 1009 |
+
('\U00010401', False),
|
| 1010 |
+
('\U00010427', False),
|
| 1011 |
+
('\U0001F40D', False),
|
| 1012 |
+
('\U0001F46F', False),
|
| 1013 |
+
('\u2177', True),
|
| 1014 |
+
pytest.param('\U00010429', True, marks=pytest.mark.xfail(
|
| 1015 |
+
sys.platform == 'win32' and IS_PYPY_LT_7_3_16,
|
| 1016 |
+
reason="PYPY bug in Py_UNICODE_ISLOWER",
|
| 1017 |
+
strict=True)),
|
| 1018 |
+
('\U0001044E', True),
|
| 1019 |
+
])
|
| 1020 |
+
def test_islower_unicode(self, in_, out, dt):
|
| 1021 |
+
in_ = np.array(in_, dtype=dt)
|
| 1022 |
+
assert_array_equal(np.strings.islower(in_), out)
|
| 1023 |
+
|
| 1024 |
+
@pytest.mark.parametrize("in_,out", [
|
| 1025 |
+
('\u1FFc', False),
|
| 1026 |
+
('\u2167', True),
|
| 1027 |
+
('\U00010401', True),
|
| 1028 |
+
('\U00010427', True),
|
| 1029 |
+
('\U0001F40D', False),
|
| 1030 |
+
('\U0001F46F', False),
|
| 1031 |
+
('\u2177', False),
|
| 1032 |
+
pytest.param('\U00010429', False, marks=pytest.mark.xfail(
|
| 1033 |
+
sys.platform == 'win32' and IS_PYPY_LT_7_3_16,
|
| 1034 |
+
reason="PYPY bug in Py_UNICODE_ISUPPER",
|
| 1035 |
+
strict=True)),
|
| 1036 |
+
('\U0001044E', False),
|
| 1037 |
+
])
|
| 1038 |
+
def test_isupper_unicode(self, in_, out, dt):
|
| 1039 |
+
in_ = np.array(in_, dtype=dt)
|
| 1040 |
+
assert_array_equal(np.strings.isupper(in_), out)
|
| 1041 |
+
|
| 1042 |
+
@pytest.mark.parametrize("in_,out", [
|
| 1043 |
+
('\u1FFc', True),
|
| 1044 |
+
('Greek \u1FFcitlecases ...', True),
|
| 1045 |
+
pytest.param('\U00010401\U00010429', True, marks=pytest.mark.xfail(
|
| 1046 |
+
sys.platform == 'win32' and IS_PYPY_LT_7_3_16,
|
| 1047 |
+
reason="PYPY bug in Py_UNICODE_ISISTITLE",
|
| 1048 |
+
strict=True)),
|
| 1049 |
+
('\U00010427\U0001044E', True),
|
| 1050 |
+
pytest.param('\U00010429', False, marks=pytest.mark.xfail(
|
| 1051 |
+
sys.platform == 'win32' and IS_PYPY_LT_7_3_16,
|
| 1052 |
+
reason="PYPY bug in Py_UNICODE_ISISTITLE",
|
| 1053 |
+
strict=True)),
|
| 1054 |
+
('\U0001044E', False),
|
| 1055 |
+
('\U0001F40D', False),
|
| 1056 |
+
('\U0001F46F', False),
|
| 1057 |
+
])
|
| 1058 |
+
def test_istitle_unicode(self, in_, out, dt):
|
| 1059 |
+
in_ = np.array(in_, dtype=dt)
|
| 1060 |
+
assert_array_equal(np.strings.istitle(in_), out)
|
| 1061 |
+
|
| 1062 |
+
@pytest.mark.parametrize("buf,sub,start,end,res", [
|
| 1063 |
+
("Ae¢☃€ 😊" * 2, "😊", 0, None, 6),
|
| 1064 |
+
("Ae¢☃€ 😊" * 2, "😊", 7, None, 13),
|
| 1065 |
+
])
|
| 1066 |
+
def test_index_unicode(self, buf, sub, start, end, res, dt):
|
| 1067 |
+
buf = np.array(buf, dtype=dt)
|
| 1068 |
+
sub = np.array(sub, dtype=dt)
|
| 1069 |
+
assert_array_equal(np.strings.index(buf, sub, start, end), res)
|
| 1070 |
+
|
| 1071 |
+
def test_index_raises_unicode(self, dt):
|
| 1072 |
+
with pytest.raises(ValueError, match="substring not found"):
|
| 1073 |
+
np.strings.index("Ae¢☃€ 😊", "😀")
|
| 1074 |
+
|
| 1075 |
+
@pytest.mark.parametrize("buf,res", [
|
| 1076 |
+
("Ae¢☃€ \t 😊", "Ae¢☃€ 😊"),
|
| 1077 |
+
("\t\U0001044E", " \U0001044E"),
|
| 1078 |
+
])
|
| 1079 |
+
def test_expandtabs(self, buf, res, dt):
|
| 1080 |
+
buf = np.array(buf, dtype=dt)
|
| 1081 |
+
res = np.array(res, dtype=dt)
|
| 1082 |
+
assert_array_equal(np.strings.expandtabs(buf), res)
|
| 1083 |
+
|
| 1084 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 1085 |
+
('x', 2, '\U0001044E', 'x\U0001044E'),
|
| 1086 |
+
('x', 3, '\U0001044E', '\U0001044Ex\U0001044E'),
|
| 1087 |
+
('x', 4, '\U0001044E', '\U0001044Ex\U0001044E\U0001044E'),
|
| 1088 |
+
])
|
| 1089 |
+
def test_center(self, buf, width, fillchar, res, dt):
|
| 1090 |
+
buf = np.array(buf, dtype=dt)
|
| 1091 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 1092 |
+
res = np.array(res, dtype=dt)
|
| 1093 |
+
assert_array_equal(np.strings.center(buf, width, fillchar), res)
|
| 1094 |
+
|
| 1095 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 1096 |
+
('x', 2, '\U0001044E', 'x\U0001044E'),
|
| 1097 |
+
('x', 3, '\U0001044E', 'x\U0001044E\U0001044E'),
|
| 1098 |
+
('x', 4, '\U0001044E', 'x\U0001044E\U0001044E\U0001044E'),
|
| 1099 |
+
])
|
| 1100 |
+
def test_ljust(self, buf, width, fillchar, res, dt):
|
| 1101 |
+
buf = np.array(buf, dtype=dt)
|
| 1102 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 1103 |
+
res = np.array(res, dtype=dt)
|
| 1104 |
+
assert_array_equal(np.strings.ljust(buf, width, fillchar), res)
|
| 1105 |
+
|
| 1106 |
+
@pytest.mark.parametrize("buf,width,fillchar,res", [
|
| 1107 |
+
('x', 2, '\U0001044E', '\U0001044Ex'),
|
| 1108 |
+
('x', 3, '\U0001044E', '\U0001044E\U0001044Ex'),
|
| 1109 |
+
('x', 4, '\U0001044E', '\U0001044E\U0001044E\U0001044Ex'),
|
| 1110 |
+
])
|
| 1111 |
+
def test_rjust(self, buf, width, fillchar, res, dt):
|
| 1112 |
+
buf = np.array(buf, dtype=dt)
|
| 1113 |
+
fillchar = np.array(fillchar, dtype=dt)
|
| 1114 |
+
res = np.array(res, dtype=dt)
|
| 1115 |
+
assert_array_equal(np.strings.rjust(buf, width, fillchar), res)
|
| 1116 |
+
|
| 1117 |
+
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
|
| 1118 |
+
("āāāāĀĀĀĀ", "Ă", "āāāāĀĀĀĀ", "", ""),
|
| 1119 |
+
("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"),
|
| 1120 |
+
("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"),
|
| 1121 |
+
("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "", ""),
|
| 1122 |
+
("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"),
|
| 1123 |
+
("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"),
|
| 1124 |
+
("𐌁𐌁𐌁𐌁𐌂𐌂𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂𐌂𐌂", "𐌀𐌀𐌀𐌀"),
|
| 1125 |
+
])
|
| 1126 |
+
def test_partition(self, buf, sep, res1, res2, res3, dt):
|
| 1127 |
+
buf = np.array(buf, dtype=dt)
|
| 1128 |
+
sep = np.array(sep, dtype=dt)
|
| 1129 |
+
res1 = np.array(res1, dtype=dt)
|
| 1130 |
+
res2 = np.array(res2, dtype=dt)
|
| 1131 |
+
res3 = np.array(res3, dtype=dt)
|
| 1132 |
+
act1, act2, act3 = np.strings.partition(buf, sep)
|
| 1133 |
+
assert_array_equal(act1, res1)
|
| 1134 |
+
assert_array_equal(act2, res2)
|
| 1135 |
+
assert_array_equal(act3, res3)
|
| 1136 |
+
assert_array_equal(act1 + act2 + act3, buf)
|
| 1137 |
+
|
| 1138 |
+
@pytest.mark.parametrize("buf,sep,res1,res2,res3", [
|
| 1139 |
+
("āāāāĀĀĀĀ", "Ă", "", "", "āāāāĀĀĀĀ"),
|
| 1140 |
+
("āāāāĂĀĀĀĀ", "Ă", "āāāā", "Ă", "ĀĀĀĀ"),
|
| 1141 |
+
("āāāāĂĂĀĀĀĀ", "ĂĂ", "āāāā", "ĂĂ", "ĀĀĀĀ"),
|
| 1142 |
+
("𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀", "𐌂", "", "", "𐌁𐌁𐌁𐌁𐌀𐌀𐌀𐌀"),
|
| 1143 |
+
("𐌁𐌁𐌁𐌁𐌂𐌀𐌀𐌀𐌀", "𐌂", "𐌁𐌁𐌁𐌁", "𐌂", "𐌀𐌀𐌀𐌀"),
|
| 1144 |
+
("𐌁𐌁𐌁𐌁𐌂𐌂𐌀𐌀𐌀𐌀", "𐌂𐌂", "𐌁𐌁𐌁𐌁", "𐌂𐌂", "𐌀𐌀𐌀𐌀"),
|
| 1145 |
+
])
|
| 1146 |
+
def test_rpartition(self, buf, sep, res1, res2, res3, dt):
|
| 1147 |
+
buf = np.array(buf, dtype=dt)
|
| 1148 |
+
sep = np.array(sep, dtype=dt)
|
| 1149 |
+
res1 = np.array(res1, dtype=dt)
|
| 1150 |
+
res2 = np.array(res2, dtype=dt)
|
| 1151 |
+
res3 = np.array(res3, dtype=dt)
|
| 1152 |
+
act1, act2, act3 = np.strings.rpartition(buf, sep)
|
| 1153 |
+
assert_array_equal(act1, res1)
|
| 1154 |
+
assert_array_equal(act2, res2)
|
| 1155 |
+
assert_array_equal(act3, res3)
|
| 1156 |
+
assert_array_equal(act1 + act2 + act3, buf)
|
| 1157 |
+
|
| 1158 |
+
@pytest.mark.parametrize("method", ["strip", "lstrip", "rstrip"])
|
| 1159 |
+
@pytest.mark.parametrize(
|
| 1160 |
+
"source,strip",
|
| 1161 |
+
[
|
| 1162 |
+
("λμ", "μ"),
|
| 1163 |
+
("λμ", "λ"),
|
| 1164 |
+
("λ"*5 + "μ"*2, "μ"),
|
| 1165 |
+
("λ" * 5 + "μ" * 2, "λ"),
|
| 1166 |
+
("λ" * 5 + "A" + "μ" * 2, "μλ"),
|
| 1167 |
+
("λμ" * 5, "μ"),
|
| 1168 |
+
("λμ" * 5, "λ"),
|
| 1169 |
+
])
|
| 1170 |
+
def test_strip_functions_unicode(self, source, strip, method, dt):
|
| 1171 |
+
src_array = np.array([source], dtype=dt)
|
| 1172 |
+
|
| 1173 |
+
npy_func = getattr(np.strings, method)
|
| 1174 |
+
py_func = getattr(str, method)
|
| 1175 |
+
|
| 1176 |
+
expected = np.array([py_func(source, strip)], dtype=dt)
|
| 1177 |
+
actual = npy_func(src_array, strip)
|
| 1178 |
+
|
| 1179 |
+
assert_array_equal(actual, expected)
|
| 1180 |
+
|
| 1181 |
+
|
| 1182 |
+
class TestMixedTypeMethods:
|
| 1183 |
+
def test_center(self):
|
| 1184 |
+
buf = np.array("😊", dtype="U")
|
| 1185 |
+
fill = np.array("*", dtype="S")
|
| 1186 |
+
res = np.array("*😊*", dtype="U")
|
| 1187 |
+
assert_array_equal(np.strings.center(buf, 3, fill), res)
|
| 1188 |
+
|
| 1189 |
+
buf = np.array("s", dtype="S")
|
| 1190 |
+
fill = np.array("*", dtype="U")
|
| 1191 |
+
res = np.array("*s*", dtype="S")
|
| 1192 |
+
assert_array_equal(np.strings.center(buf, 3, fill), res)
|
| 1193 |
+
|
| 1194 |
+
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
|
| 1195 |
+
buf = np.array("s", dtype="S")
|
| 1196 |
+
fill = np.array("😊", dtype="U")
|
| 1197 |
+
np.strings.center(buf, 3, fill)
|
| 1198 |
+
|
| 1199 |
+
def test_ljust(self):
|
| 1200 |
+
buf = np.array("😊", dtype="U")
|
| 1201 |
+
fill = np.array("*", dtype="S")
|
| 1202 |
+
res = np.array("😊**", dtype="U")
|
| 1203 |
+
assert_array_equal(np.strings.ljust(buf, 3, fill), res)
|
| 1204 |
+
|
| 1205 |
+
buf = np.array("s", dtype="S")
|
| 1206 |
+
fill = np.array("*", dtype="U")
|
| 1207 |
+
res = np.array("s**", dtype="S")
|
| 1208 |
+
assert_array_equal(np.strings.ljust(buf, 3, fill), res)
|
| 1209 |
+
|
| 1210 |
+
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
|
| 1211 |
+
buf = np.array("s", dtype="S")
|
| 1212 |
+
fill = np.array("😊", dtype="U")
|
| 1213 |
+
np.strings.ljust(buf, 3, fill)
|
| 1214 |
+
|
| 1215 |
+
def test_rjust(self):
|
| 1216 |
+
buf = np.array("😊", dtype="U")
|
| 1217 |
+
fill = np.array("*", dtype="S")
|
| 1218 |
+
res = np.array("**😊", dtype="U")
|
| 1219 |
+
assert_array_equal(np.strings.rjust(buf, 3, fill), res)
|
| 1220 |
+
|
| 1221 |
+
buf = np.array("s", dtype="S")
|
| 1222 |
+
fill = np.array("*", dtype="U")
|
| 1223 |
+
res = np.array("**s", dtype="S")
|
| 1224 |
+
assert_array_equal(np.strings.rjust(buf, 3, fill), res)
|
| 1225 |
+
|
| 1226 |
+
with pytest.raises(ValueError, match="'ascii' codec can't encode"):
|
| 1227 |
+
buf = np.array("s", dtype="S")
|
| 1228 |
+
fill = np.array("😊", dtype="U")
|
| 1229 |
+
np.strings.rjust(buf, 3, fill)
|
| 1230 |
+
|
| 1231 |
+
|
| 1232 |
+
class TestUnicodeOnlyMethodsRaiseWithBytes:
|
| 1233 |
+
def test_isdecimal_raises(self):
|
| 1234 |
+
in_ = np.array(b"1")
|
| 1235 |
+
with assert_raises(TypeError):
|
| 1236 |
+
np.strings.isdecimal(in_)
|
| 1237 |
+
|
| 1238 |
+
def test_isnumeric_bytes(self):
|
| 1239 |
+
in_ = np.array(b"1")
|
| 1240 |
+
with assert_raises(TypeError):
|
| 1241 |
+
np.strings.isnumeric(in_)
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
def check_itemsize(n_elem, dt):
|
| 1245 |
+
if dt == "T":
|
| 1246 |
+
return np.dtype(dt).itemsize
|
| 1247 |
+
if dt == "S":
|
| 1248 |
+
return n_elem
|
| 1249 |
+
if dt == "U":
|
| 1250 |
+
return n_elem * 4
|
| 1251 |
+
|
| 1252 |
+
@pytest.mark.parametrize("dt", ["S", "U", "T"])
|
| 1253 |
+
class TestReplaceOnArrays:
|
| 1254 |
+
|
| 1255 |
+
def test_replace_count_and_size(self, dt):
|
| 1256 |
+
a = np.array(["0123456789" * i for i in range(4)], dtype=dt)
|
| 1257 |
+
r1 = np.strings.replace(a, "5", "ABCDE")
|
| 1258 |
+
assert r1.dtype.itemsize == check_itemsize(3*10 + 3*4, dt)
|
| 1259 |
+
r1_res = np.array(["01234ABCDE6789" * i for i in range(4)], dtype=dt)
|
| 1260 |
+
assert_array_equal(r1, r1_res)
|
| 1261 |
+
r2 = np.strings.replace(a, "5", "ABCDE", 1)
|
| 1262 |
+
assert r2.dtype.itemsize == check_itemsize(3*10 + 4, dt)
|
| 1263 |
+
r3 = np.strings.replace(a, "5", "ABCDE", 0)
|
| 1264 |
+
assert r3.dtype.itemsize == a.dtype.itemsize
|
| 1265 |
+
assert_array_equal(r3, a)
|
| 1266 |
+
# Negative values mean to replace all.
|
| 1267 |
+
r4 = np.strings.replace(a, "5", "ABCDE", -1)
|
| 1268 |
+
assert r4.dtype.itemsize == check_itemsize(3*10 + 3*4, dt)
|
| 1269 |
+
assert_array_equal(r4, r1)
|
| 1270 |
+
# We can do count on an element-by-element basis.
|
| 1271 |
+
r5 = np.strings.replace(a, "5", "ABCDE", [-1, -1, -1, 1])
|
| 1272 |
+
assert r5.dtype.itemsize == check_itemsize(3*10 + 4, dt)
|
| 1273 |
+
assert_array_equal(r5, np.array(
|
| 1274 |
+
["01234ABCDE6789" * i for i in range(3)]
|
| 1275 |
+
+ ["01234ABCDE6789" + "0123456789" * 2], dtype=dt))
|
| 1276 |
+
|
| 1277 |
+
def test_replace_broadcasting(self, dt):
|
| 1278 |
+
a = np.array("0,0,0", dtype=dt)
|
| 1279 |
+
r1 = np.strings.replace(a, "0", "1", np.arange(3))
|
| 1280 |
+
assert r1.dtype == a.dtype
|
| 1281 |
+
assert_array_equal(r1, np.array(["0,0,0", "1,0,0", "1,1,0"], dtype=dt))
|
| 1282 |
+
r2 = np.strings.replace(a, "0", [["1"], ["2"]], np.arange(1, 4))
|
| 1283 |
+
assert_array_equal(r2, np.array([["1,0,0", "1,1,0", "1,1,1"],
|
| 1284 |
+
["2,0,0", "2,2,0", "2,2,2"]],
|
| 1285 |
+
dtype=dt))
|
| 1286 |
+
r3 = np.strings.replace(a, ["0", "0,0", "0,0,0"], "X")
|
| 1287 |
+
assert_array_equal(r3, np.array(["X,X,X", "X,0", "X"], dtype=dt))
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_ufunc.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_umath.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
mantis_evalkit/lib/python3.10/site-packages/numpy/_core/tests/test_umath_accuracy.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import os
|
| 3 |
+
from os import path
|
| 4 |
+
import sys
|
| 5 |
+
import pytest
|
| 6 |
+
from ctypes import c_longlong, c_double, c_float, c_int, cast, pointer, POINTER
|
| 7 |
+
from numpy.testing import assert_array_max_ulp
|
| 8 |
+
from numpy.testing._private.utils import _glibc_older_than
|
| 9 |
+
from numpy._core._multiarray_umath import __cpu_features__
|
| 10 |
+
|
| 11 |
+
UNARY_UFUNCS = [obj for obj in np._core.umath.__dict__.values() if
|
| 12 |
+
isinstance(obj, np.ufunc)]
|
| 13 |
+
UNARY_OBJECT_UFUNCS = [uf for uf in UNARY_UFUNCS if "O->O" in uf.types]
|
| 14 |
+
|
| 15 |
+
# Remove functions that do not support `floats`
|
| 16 |
+
UNARY_OBJECT_UFUNCS.remove(np.invert)
|
| 17 |
+
UNARY_OBJECT_UFUNCS.remove(np.bitwise_count)
|
| 18 |
+
|
| 19 |
+
IS_AVX = __cpu_features__.get('AVX512F', False) or \
|
| 20 |
+
(__cpu_features__.get('FMA3', False) and __cpu_features__.get('AVX2', False))
|
| 21 |
+
|
| 22 |
+
IS_AVX512FP16 = __cpu_features__.get('AVX512FP16', False)
|
| 23 |
+
|
| 24 |
+
# only run on linux with AVX, also avoid old glibc (numpy/numpy#20448).
|
| 25 |
+
runtest = (sys.platform.startswith('linux')
|
| 26 |
+
and IS_AVX and not _glibc_older_than("2.17"))
|
| 27 |
+
platform_skip = pytest.mark.skipif(not runtest,
|
| 28 |
+
reason="avoid testing inconsistent platform "
|
| 29 |
+
"library implementations")
|
| 30 |
+
|
| 31 |
+
# convert string to hex function taken from:
|
| 32 |
+
# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
|
| 33 |
+
def convert(s, datatype="np.float32"):
|
| 34 |
+
i = int(s, 16) # convert from hex to a Python int
|
| 35 |
+
if (datatype == "np.float64"):
|
| 36 |
+
cp = pointer(c_longlong(i)) # make this into a c long long integer
|
| 37 |
+
fp = cast(cp, POINTER(c_double)) # cast the int pointer to a double pointer
|
| 38 |
+
else:
|
| 39 |
+
cp = pointer(c_int(i)) # make this into a c integer
|
| 40 |
+
fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
|
| 41 |
+
|
| 42 |
+
return fp.contents.value # dereference the pointer, get the float
|
| 43 |
+
|
| 44 |
+
str_to_float = np.vectorize(convert)
|
| 45 |
+
|
| 46 |
+
class TestAccuracy:
|
| 47 |
+
@platform_skip
|
| 48 |
+
def test_validate_transcendentals(self):
|
| 49 |
+
with np.errstate(all='ignore'):
|
| 50 |
+
data_dir = path.join(path.dirname(__file__), 'data')
|
| 51 |
+
files = os.listdir(data_dir)
|
| 52 |
+
files = list(filter(lambda f: f.endswith('.csv'), files))
|
| 53 |
+
for filename in files:
|
| 54 |
+
filepath = path.join(data_dir, filename)
|
| 55 |
+
with open(filepath) as fid:
|
| 56 |
+
file_without_comments = (
|
| 57 |
+
r for r in fid if r[0] not in ('$', '#')
|
| 58 |
+
)
|
| 59 |
+
data = np.genfromtxt(file_without_comments,
|
| 60 |
+
dtype=('|S39','|S39','|S39',int),
|
| 61 |
+
names=('type','input','output','ulperr'),
|
| 62 |
+
delimiter=',',
|
| 63 |
+
skip_header=1)
|
| 64 |
+
npname = path.splitext(filename)[0].split('-')[3]
|
| 65 |
+
npfunc = getattr(np, npname)
|
| 66 |
+
for datatype in np.unique(data['type']):
|
| 67 |
+
data_subset = data[data['type'] == datatype]
|
| 68 |
+
inval = np.array(str_to_float(data_subset['input'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
|
| 69 |
+
outval = np.array(str_to_float(data_subset['output'].astype(str), data_subset['type'].astype(str)), dtype=eval(datatype))
|
| 70 |
+
perm = np.random.permutation(len(inval))
|
| 71 |
+
inval = inval[perm]
|
| 72 |
+
outval = outval[perm]
|
| 73 |
+
maxulperr = data_subset['ulperr'].max()
|
| 74 |
+
assert_array_max_ulp(npfunc(inval), outval, maxulperr)
|
| 75 |
+
|
| 76 |
+
@pytest.mark.skipif(IS_AVX512FP16,
|
| 77 |
+
reason = "SVML FP16 have slightly higher ULP errors")
|
| 78 |
+
@pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS)
|
| 79 |
+
def test_validate_fp16_transcendentals(self, ufunc):
|
| 80 |
+
with np.errstate(all='ignore'):
|
| 81 |
+
arr = np.arange(65536, dtype=np.int16)
|
| 82 |
+
datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16)
|
| 83 |
+
datafp32 = datafp16.astype(np.float32)
|
| 84 |
+
assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32),
|
| 85 |
+
maxulp=1, dtype=np.float16)
|
| 86 |
+
|
| 87 |
+
@pytest.mark.skipif(not IS_AVX512FP16,
|
| 88 |
+
reason="lower ULP only apply for SVML FP16")
|
| 89 |
+
def test_validate_svml_fp16(self):
|
| 90 |
+
max_ulp_err = {
|
| 91 |
+
"arccos": 2.54,
|
| 92 |
+
"arccosh": 2.09,
|
| 93 |
+
"arcsin": 3.06,
|
| 94 |
+
"arcsinh": 1.51,
|
| 95 |
+
"arctan": 2.61,
|
| 96 |
+
"arctanh": 1.88,
|
| 97 |
+
"cbrt": 1.57,
|
| 98 |
+
"cos": 1.43,
|
| 99 |
+
"cosh": 1.33,
|
| 100 |
+
"exp2": 1.33,
|
| 101 |
+
"exp": 1.27,
|
| 102 |
+
"expm1": 0.53,
|
| 103 |
+
"log": 1.80,
|
| 104 |
+
"log10": 1.27,
|
| 105 |
+
"log1p": 1.88,
|
| 106 |
+
"log2": 1.80,
|
| 107 |
+
"sin": 1.88,
|
| 108 |
+
"sinh": 2.05,
|
| 109 |
+
"tan": 2.26,
|
| 110 |
+
"tanh": 3.00,
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
with np.errstate(all='ignore'):
|
| 114 |
+
arr = np.arange(65536, dtype=np.int16)
|
| 115 |
+
datafp16 = np.frombuffer(arr.tobytes(), dtype=np.float16)
|
| 116 |
+
datafp32 = datafp16.astype(np.float32)
|
| 117 |
+
for func in max_ulp_err:
|
| 118 |
+
ufunc = getattr(np, func)
|
| 119 |
+
ulp = np.ceil(max_ulp_err[func])
|
| 120 |
+
assert_array_max_ulp(ufunc(datafp16), ufunc(datafp32),
|
| 121 |
+
maxulp=ulp, dtype=np.float16)
|