ZTWHHH commited on
Commit
80c0f1f
·
verified ·
1 Parent(s): cfc2c07

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/_locales.cpython-310.pyc +0 -0
  2. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test__exceptions.cpython-310.pyc +0 -0
  3. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-310.pyc +0 -0
  4. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-310.pyc +0 -0
  5. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-310.pyc +0 -0
  6. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-310.pyc +0 -0
  7. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-310.pyc +0 -0
  8. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_defchararray.cpython-310.pyc +0 -0
  9. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-310.pyc +0 -0
  10. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_dlpack.cpython-310.pyc +0 -0
  11. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-310.pyc +0 -0
  12. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-310.pyc +0 -0
  13. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-310.pyc +0 -0
  14. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_indexerrors.cpython-310.pyc +0 -0
  15. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_limited_api.cpython-310.pyc +0 -0
  16. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-310.pyc +0 -0
  17. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-310.pyc +0 -0
  18. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_mem_policy.cpython-310.pyc +0 -0
  19. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-310.pyc +0 -0
  20. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-310.pyc +0 -0
  21. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc +0 -0
  22. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_methods.cpython-310.pyc +0 -0
  23. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-310.pyc +0 -0
  24. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-310.pyc +0 -0
  25. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd.cpython-310.pyc +0 -0
  26. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc +0 -0
  27. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-310.pyc +0 -0
  28. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc +0 -0
  29. wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_unicode.cpython-310.pyc +0 -0
  30. wemm/lib/python3.10/site-packages/numpy/core/tests/_locales.py +74 -0
  31. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/__pycache__/setup.cpython-310.pyc +0 -0
  32. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/checks.pyx +32 -0
  33. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/setup.py +25 -0
  34. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/__pycache__/setup.cpython-310.pyc +0 -0
  35. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/limited_api.c +17 -0
  36. wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/setup.py +22 -0
  37. wemm/lib/python3.10/site-packages/numpy/core/tests/test__exceptions.py +88 -0
  38. wemm/lib/python3.10/site-packages/numpy/core/tests/test_abc.py +54 -0
  39. wemm/lib/python3.10/site-packages/numpy/core/tests/test_api.py +606 -0
  40. wemm/lib/python3.10/site-packages/numpy/core/tests/test_argparse.py +62 -0
  41. wemm/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py +767 -0
  42. wemm/lib/python3.10/site-packages/numpy/core/tests/test_array_interface.py +216 -0
  43. wemm/lib/python3.10/site-packages/numpy/core/tests/test_arraymethod.py +93 -0
  44. wemm/lib/python3.10/site-packages/numpy/core/tests/test_arrayprint.py +967 -0
  45. wemm/lib/python3.10/site-packages/numpy/core/tests/test_casting_unittests.py +811 -0
  46. wemm/lib/python3.10/site-packages/numpy/core/tests/test_cpu_dispatcher.py +42 -0
  47. wemm/lib/python3.10/site-packages/numpy/core/tests/test_cpu_features.py +185 -0
  48. wemm/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py +201 -0
  49. wemm/lib/python3.10/site-packages/numpy/core/tests/test_cython.py +134 -0
  50. wemm/lib/python3.10/site-packages/numpy/core/tests/test_defchararray.py +673 -0
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/_locales.cpython-310.pyc ADDED
Binary file (2.49 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test__exceptions.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_abc.cpython-310.pyc ADDED
Binary file (2.05 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_array_interface.cpython-310.pyc ADDED
Binary file (5.93 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_conversion_utils.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cpu_dispatcher.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_cython.cpython-310.pyc ADDED
Binary file (3 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_defchararray.cpython-310.pyc ADDED
Binary file (26.2 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_deprecations.cpython-310.pyc ADDED
Binary file (58.1 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_dlpack.cpython-310.pyc ADDED
Binary file (5.03 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_einsum.cpython-310.pyc ADDED
Binary file (31.4 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_extint128.cpython-310.pyc ADDED
Binary file (6.58 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_hashtable.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_indexerrors.cpython-310.pyc ADDED
Binary file (7.37 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_limited_api.cpython-310.pyc ADDED
Binary file (1.03 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_longdouble.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_machar.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_mem_policy.cpython-310.pyc ADDED
Binary file (12.9 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_numerictypes.cpython-310.pyc ADDED
Binary file (21 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_overrides.cpython-310.pyc ADDED
Binary file (24.6 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_ctors.cpython-310.pyc ADDED
Binary file (4.41 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalar_methods.cpython-310.pyc ADDED
Binary file (8.18 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalarinherit.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_scalarprint.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd.cpython-310.pyc ADDED
Binary file (34.2 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_simd_module.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_ufunc.cpython-310.pyc ADDED
Binary file (83.9 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_umath_accuracy.cpython-310.pyc ADDED
Binary file (2.89 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/__pycache__/test_unicode.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/_locales.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provide class for testing in French locale
2
+
3
+ """
4
+ import sys
5
+ import locale
6
+
7
+ import pytest
8
+
9
+ __ALL__ = ['CommaDecimalPointLocale']
10
+
11
+
12
+ def find_comma_decimal_point_locale():
13
+ """See if platform has a decimal point as comma locale.
14
+
15
+ Find a locale that uses a comma instead of a period as the
16
+ decimal point.
17
+
18
+ Returns
19
+ -------
20
+ old_locale: str
21
+ Locale when the function was called.
22
+ new_locale: {str, None)
23
+ First French locale found, None if none found.
24
+
25
+ """
26
+ if sys.platform == 'win32':
27
+ locales = ['FRENCH']
28
+ else:
29
+ locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
30
+
31
+ old_locale = locale.getlocale(locale.LC_NUMERIC)
32
+ new_locale = None
33
+ try:
34
+ for loc in locales:
35
+ try:
36
+ locale.setlocale(locale.LC_NUMERIC, loc)
37
+ new_locale = loc
38
+ break
39
+ except locale.Error:
40
+ pass
41
+ finally:
42
+ locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
43
+ return old_locale, new_locale
44
+
45
+
46
+ class CommaDecimalPointLocale:
47
+ """Sets LC_NUMERIC to a locale with comma as decimal point.
48
+
49
+ Classes derived from this class have setup and teardown methods that run
50
+ tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
51
+ the decimal point instead of periods ('.'). On exit the locale is restored
52
+ to the initial locale. It also serves as context manager with the same
53
+ effect. If no such locale is available, the test is skipped.
54
+
55
+ .. versionadded:: 1.15.0
56
+
57
+ """
58
+ (cur_locale, tst_locale) = find_comma_decimal_point_locale()
59
+
60
+ def setup(self):
61
+ if self.tst_locale is None:
62
+ pytest.skip("No French locale available")
63
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
64
+
65
+ def teardown(self):
66
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
67
+
68
+ def __enter__(self):
69
+ if self.tst_locale is None:
70
+ pytest.skip("No French locale available")
71
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
72
+
73
+ def __exit__(self, type, value, traceback):
74
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/__pycache__/setup.cpython-310.pyc ADDED
Binary file (704 Bytes). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/checks.pyx ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #cython: language_level=3
2
+
3
+ """
4
+ Functions in this module give python-space wrappers for cython functions
5
+ exposed in numpy/__init__.pxd, so they can be tested in test_cython.py
6
+ """
7
+ cimport numpy as cnp
8
+ cnp.import_array()
9
+
10
+
11
+ def is_td64(obj):
12
+ return cnp.is_timedelta64_object(obj)
13
+
14
+
15
+ def is_dt64(obj):
16
+ return cnp.is_datetime64_object(obj)
17
+
18
+
19
+ def get_dt64_value(obj):
20
+ return cnp.get_datetime64_value(obj)
21
+
22
+
23
+ def get_td64_value(obj):
24
+ return cnp.get_timedelta64_value(obj)
25
+
26
+
27
+ def get_dt64_unit(obj):
28
+ return cnp.get_datetime64_unit(obj)
29
+
30
+
31
+ def is_integer(obj):
32
+ return isinstance(obj, (cnp.integer, int))
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/cython/setup.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Provide python-space access to the functions exposed in numpy/__init__.pxd
3
+ for testing.
4
+ """
5
+
6
+ import numpy as np
7
+ from distutils.core import setup
8
+ from Cython.Build import cythonize
9
+ from setuptools.extension import Extension
10
+ import os
11
+
12
+ macros = [("NPY_NO_DEPRECATED_API", 0)]
13
+
14
+ checks = Extension(
15
+ "checks",
16
+ sources=[os.path.join('.', "checks.pyx")],
17
+ include_dirs=[np.get_include()],
18
+ define_macros=macros,
19
+ )
20
+
21
+ extensions = [checks]
22
+
23
+ setup(
24
+ ext_modules=cythonize(extensions)
25
+ )
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/__pycache__/setup.cpython-310.pyc ADDED
Binary file (630 Bytes). View file
 
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/limited_api.c ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #define Py_LIMITED_API 0x03060000
2
+
3
+ #include <Python.h>
4
+ #include <numpy/arrayobject.h>
5
+ #include <numpy/ufuncobject.h>
6
+
7
+ static PyModuleDef moduledef = {
8
+ .m_base = PyModuleDef_HEAD_INIT,
9
+ .m_name = "limited_api"
10
+ };
11
+
12
+ PyMODINIT_FUNC PyInit_limited_api(void)
13
+ {
14
+ import_array();
15
+ import_umath();
16
+ return PyModule_Create(&moduledef);
17
+ }
wemm/lib/python3.10/site-packages/numpy/core/tests/examples/limited_api/setup.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Build an example package using the limited Python C API.
3
+ """
4
+
5
+ import numpy as np
6
+ from setuptools import setup, Extension
7
+ import os
8
+
9
+ macros = [("NPY_NO_DEPRECATED_API", 0), ("Py_LIMITED_API", "0x03060000")]
10
+
11
+ limited_api = Extension(
12
+ "limited_api",
13
+ sources=[os.path.join('.', "limited_api.c")],
14
+ include_dirs=[np.get_include()],
15
+ define_macros=macros,
16
+ )
17
+
18
+ extensions = [limited_api]
19
+
20
+ setup(
21
+ ext_modules=extensions
22
+ )
wemm/lib/python3.10/site-packages/numpy/core/tests/test__exceptions.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
3
+ """
4
+
5
+ import pickle
6
+
7
+ import pytest
8
+ import numpy as np
9
+
10
+ _ArrayMemoryError = np.core._exceptions._ArrayMemoryError
11
+ _UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
12
+
13
+ class TestArrayMemoryError:
14
+ def test_pickling(self):
15
+ """ Test that _ArrayMemoryError can be pickled """
16
+ error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
17
+ res = pickle.loads(pickle.dumps(error))
18
+ assert res._total_size == error._total_size
19
+
20
+ def test_str(self):
21
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
22
+ str(e) # not crashing is enough
23
+
24
+ # testing these properties is easier than testing the full string repr
25
+ def test__size_to_string(self):
26
+ """ Test e._size_to_string """
27
+ f = _ArrayMemoryError._size_to_string
28
+ Ki = 1024
29
+ assert f(0) == '0 bytes'
30
+ assert f(1) == '1 bytes'
31
+ assert f(1023) == '1023 bytes'
32
+ assert f(Ki) == '1.00 KiB'
33
+ assert f(Ki+1) == '1.00 KiB'
34
+ assert f(10*Ki) == '10.0 KiB'
35
+ assert f(int(999.4*Ki)) == '999. KiB'
36
+ assert f(int(1023.4*Ki)) == '1023. KiB'
37
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
38
+ assert f(Ki*Ki) == '1.00 MiB'
39
+
40
+ # 1023.9999 Mib should round to 1 GiB
41
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
42
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
43
+ # larger than sys.maxsize, adding larger prefixes isn't going to help
44
+ # anyway.
45
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
46
+
47
+ def test__total_size(self):
48
+ """ Test e._total_size """
49
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
50
+ assert e._total_size == 1
51
+
52
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
53
+ assert e._total_size == 1024
54
+
55
+
56
+ class TestUFuncNoLoopError:
57
+ def test_pickling(self):
58
+ """ Test that _UFuncNoLoopError can be pickled """
59
+ assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
60
+
61
+
62
+ @pytest.mark.parametrize("args", [
63
+ (2, 1, None),
64
+ (2, 1, "test_prefix"),
65
+ ("test message",),
66
+ ])
67
+ class TestAxisError:
68
+ def test_attr(self, args):
69
+ """Validate attribute types."""
70
+ exc = np.AxisError(*args)
71
+ if len(args) == 1:
72
+ assert exc.axis is None
73
+ assert exc.ndim is None
74
+ else:
75
+ axis, ndim, *_ = args
76
+ assert exc.axis == axis
77
+ assert exc.ndim == ndim
78
+
79
+ def test_pickling(self, args):
80
+ """Test that `AxisError` can be pickled."""
81
+ exc = np.AxisError(*args)
82
+ exc2 = pickle.loads(pickle.dumps(exc))
83
+
84
+ assert type(exc) is type(exc2)
85
+ for name in ("axis", "ndim", "args"):
86
+ attr1 = getattr(exc, name)
87
+ attr2 = getattr(exc2, name)
88
+ assert attr1 == attr2, name
wemm/lib/python3.10/site-packages/numpy/core/tests/test_abc.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.testing import assert_
2
+
3
+ import numbers
4
+
5
+ import numpy as np
6
+ from numpy.core.numerictypes import sctypes
7
+
8
+ class TestABC:
9
+ def test_abstract(self):
10
+ assert_(issubclass(np.number, numbers.Number))
11
+
12
+ assert_(issubclass(np.inexact, numbers.Complex))
13
+ assert_(issubclass(np.complexfloating, numbers.Complex))
14
+ assert_(issubclass(np.floating, numbers.Real))
15
+
16
+ assert_(issubclass(np.integer, numbers.Integral))
17
+ assert_(issubclass(np.signedinteger, numbers.Integral))
18
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
19
+
20
+ def test_floats(self):
21
+ for t in sctypes['float']:
22
+ assert_(isinstance(t(), numbers.Real),
23
+ "{0} is not instance of Real".format(t.__name__))
24
+ assert_(issubclass(t, numbers.Real),
25
+ "{0} is not subclass of Real".format(t.__name__))
26
+ assert_(not isinstance(t(), numbers.Rational),
27
+ "{0} is instance of Rational".format(t.__name__))
28
+ assert_(not issubclass(t, numbers.Rational),
29
+ "{0} is subclass of Rational".format(t.__name__))
30
+
31
+ def test_complex(self):
32
+ for t in sctypes['complex']:
33
+ assert_(isinstance(t(), numbers.Complex),
34
+ "{0} is not instance of Complex".format(t.__name__))
35
+ assert_(issubclass(t, numbers.Complex),
36
+ "{0} is not subclass of Complex".format(t.__name__))
37
+ assert_(not isinstance(t(), numbers.Real),
38
+ "{0} is instance of Real".format(t.__name__))
39
+ assert_(not issubclass(t, numbers.Real),
40
+ "{0} is subclass of Real".format(t.__name__))
41
+
42
+ def test_int(self):
43
+ for t in sctypes['int']:
44
+ assert_(isinstance(t(), numbers.Integral),
45
+ "{0} is not instance of Integral".format(t.__name__))
46
+ assert_(issubclass(t, numbers.Integral),
47
+ "{0} is not subclass of Integral".format(t.__name__))
48
+
49
+ def test_uint(self):
50
+ for t in sctypes['uint']:
51
+ assert_(isinstance(t(), numbers.Integral),
52
+ "{0} is not instance of Integral".format(t.__name__))
53
+ assert_(issubclass(t, numbers.Integral),
54
+ "{0} is not subclass of Integral".format(t.__name__))
wemm/lib/python3.10/site-packages/numpy/core/tests/test_api.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import numpy as np
4
+ from numpy.core._rational_tests import rational
5
+ import pytest
6
+ from numpy.testing import (
7
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
8
+ HAS_REFCOUNT
9
+ )
10
+
11
+
12
+ def test_array_array():
13
+ tobj = type(object)
14
+ ones11 = np.ones((1, 1), np.float64)
15
+ tndarray = type(ones11)
16
+ # Test is_ndarray
17
+ assert_equal(np.array(ones11, dtype=np.float64), ones11)
18
+ if HAS_REFCOUNT:
19
+ old_refcount = sys.getrefcount(tndarray)
20
+ np.array(ones11)
21
+ assert_equal(old_refcount, sys.getrefcount(tndarray))
22
+
23
+ # test None
24
+ assert_equal(np.array(None, dtype=np.float64),
25
+ np.array(np.nan, dtype=np.float64))
26
+ if HAS_REFCOUNT:
27
+ old_refcount = sys.getrefcount(tobj)
28
+ np.array(None, dtype=np.float64)
29
+ assert_equal(old_refcount, sys.getrefcount(tobj))
30
+
31
+ # test scalar
32
+ assert_equal(np.array(1.0, dtype=np.float64),
33
+ np.ones((), dtype=np.float64))
34
+ if HAS_REFCOUNT:
35
+ old_refcount = sys.getrefcount(np.float64)
36
+ np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
37
+ assert_equal(old_refcount, sys.getrefcount(np.float64))
38
+
39
+ # test string
40
+ S2 = np.dtype((bytes, 2))
41
+ S3 = np.dtype((bytes, 3))
42
+ S5 = np.dtype((bytes, 5))
43
+ assert_equal(np.array(b"1.0", dtype=np.float64),
44
+ np.ones((), dtype=np.float64))
45
+ assert_equal(np.array(b"1.0").dtype, S3)
46
+ assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
47
+ assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
48
+ assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
49
+
50
+ # test string
51
+ U2 = np.dtype((str, 2))
52
+ U3 = np.dtype((str, 3))
53
+ U5 = np.dtype((str, 5))
54
+ assert_equal(np.array("1.0", dtype=np.float64),
55
+ np.ones((), dtype=np.float64))
56
+ assert_equal(np.array("1.0").dtype, U3)
57
+ assert_equal(np.array("1.0", dtype=str).dtype, U3)
58
+ assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
59
+ assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
60
+
61
+ builtins = getattr(__builtins__, '__dict__', __builtins__)
62
+ assert_(hasattr(builtins, 'get'))
63
+
64
+ # test memoryview
65
+ dat = np.array(memoryview(b'1.0'), dtype=np.float64)
66
+ assert_equal(dat, [49.0, 46.0, 48.0])
67
+ assert_(dat.dtype.type is np.float64)
68
+
69
+ dat = np.array(memoryview(b'1.0'))
70
+ assert_equal(dat, [49, 46, 48])
71
+ assert_(dat.dtype.type is np.uint8)
72
+
73
+ # test array interface
74
+ a = np.array(100.0, dtype=np.float64)
75
+ o = type("o", (object,),
76
+ dict(__array_interface__=a.__array_interface__))
77
+ assert_equal(np.array(o, dtype=np.float64), a)
78
+
79
+ # test array_struct interface
80
+ a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
81
+ dtype=[('f0', int), ('f1', float), ('f2', str)])
82
+ o = type("o", (object,),
83
+ dict(__array_struct__=a.__array_struct__))
84
+ ## wasn't what I expected... is np.array(o) supposed to equal a ?
85
+ ## instead we get a array([...], dtype=">V18")
86
+ assert_equal(bytes(np.array(o).data), bytes(a.data))
87
+
88
+ # test array
89
+ o = type("o", (object,),
90
+ dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
91
+ assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
92
+
93
+ # test recursion
94
+ nested = 1.5
95
+ for i in range(np.MAXDIMS):
96
+ nested = [nested]
97
+
98
+ # no error
99
+ np.array(nested)
100
+
101
+ # Exceeds recursion limit
102
+ assert_raises(ValueError, np.array, [nested], dtype=np.float64)
103
+
104
+ # Try with lists...
105
+ assert_equal(np.array([None] * 10, dtype=np.float64),
106
+ np.full((10,), np.nan, dtype=np.float64))
107
+ assert_equal(np.array([[None]] * 10, dtype=np.float64),
108
+ np.full((10, 1), np.nan, dtype=np.float64))
109
+ assert_equal(np.array([[None] * 10], dtype=np.float64),
110
+ np.full((1, 10), np.nan, dtype=np.float64))
111
+ assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
112
+ np.full((10, 10), np.nan, dtype=np.float64))
113
+
114
+ assert_equal(np.array([1.0] * 10, dtype=np.float64),
115
+ np.ones((10,), dtype=np.float64))
116
+ assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
117
+ np.ones((10, 1), dtype=np.float64))
118
+ assert_equal(np.array([[1.0] * 10], dtype=np.float64),
119
+ np.ones((1, 10), dtype=np.float64))
120
+ assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
121
+ np.ones((10, 10), dtype=np.float64))
122
+
123
+ # Try with tuples
124
+ assert_equal(np.array((None,) * 10, dtype=np.float64),
125
+ np.full((10,), np.nan, dtype=np.float64))
126
+ assert_equal(np.array([(None,)] * 10, dtype=np.float64),
127
+ np.full((10, 1), np.nan, dtype=np.float64))
128
+ assert_equal(np.array([(None,) * 10], dtype=np.float64),
129
+ np.full((1, 10), np.nan, dtype=np.float64))
130
+ assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
131
+ np.full((10, 10), np.nan, dtype=np.float64))
132
+
133
+ assert_equal(np.array((1.0,) * 10, dtype=np.float64),
134
+ np.ones((10,), dtype=np.float64))
135
+ assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
136
+ np.ones((10, 1), dtype=np.float64))
137
+ assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
138
+ np.ones((1, 10), dtype=np.float64))
139
+ assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
140
+ np.ones((10, 10), dtype=np.float64))
141
+
142
+ @pytest.mark.parametrize("array", [True, False])
143
+ def test_array_impossible_casts(array):
144
+ # All builtin types can be forcibly cast, at least theoretically,
145
+ # but user dtypes cannot necessarily.
146
+ rt = rational(1, 2)
147
+ if array:
148
+ rt = np.array(rt)
149
+ with assert_raises(TypeError):
150
+ np.array(rt, dtype="M8")
151
+
152
+
153
+ def test_fastCopyAndTranspose():
154
+ # 0D array
155
+ a = np.array(2)
156
+ b = np.fastCopyAndTranspose(a)
157
+ assert_equal(b, a.T)
158
+ assert_(b.flags.owndata)
159
+
160
+ # 1D array
161
+ a = np.array([3, 2, 7, 0])
162
+ b = np.fastCopyAndTranspose(a)
163
+ assert_equal(b, a.T)
164
+ assert_(b.flags.owndata)
165
+
166
+ # 2D array
167
+ a = np.arange(6).reshape(2, 3)
168
+ b = np.fastCopyAndTranspose(a)
169
+ assert_equal(b, a.T)
170
+ assert_(b.flags.owndata)
171
+
172
+ def test_array_astype():
173
+ a = np.arange(6, dtype='f4').reshape(2, 3)
174
+ # Default behavior: allows unsafe casts, keeps memory layout,
175
+ # always copies.
176
+ b = a.astype('i4')
177
+ assert_equal(a, b)
178
+ assert_equal(b.dtype, np.dtype('i4'))
179
+ assert_equal(a.strides, b.strides)
180
+ b = a.T.astype('i4')
181
+ assert_equal(a.T, b)
182
+ assert_equal(b.dtype, np.dtype('i4'))
183
+ assert_equal(a.T.strides, b.strides)
184
+ b = a.astype('f4')
185
+ assert_equal(a, b)
186
+ assert_(not (a is b))
187
+
188
+ # copy=False parameter can sometimes skip a copy
189
+ b = a.astype('f4', copy=False)
190
+ assert_(a is b)
191
+
192
+ # order parameter allows overriding of the memory layout,
193
+ # forcing a copy if the layout is wrong
194
+ b = a.astype('f4', order='F', copy=False)
195
+ assert_equal(a, b)
196
+ assert_(not (a is b))
197
+ assert_(b.flags.f_contiguous)
198
+
199
+ b = a.astype('f4', order='C', copy=False)
200
+ assert_equal(a, b)
201
+ assert_(a is b)
202
+ assert_(b.flags.c_contiguous)
203
+
204
+ # casting parameter allows catching bad casts
205
+ b = a.astype('c8', casting='safe')
206
+ assert_equal(a, b)
207
+ assert_equal(b.dtype, np.dtype('c8'))
208
+
209
+ assert_raises(TypeError, a.astype, 'i4', casting='safe')
210
+
211
+ # subok=False passes through a non-subclassed array
212
+ b = a.astype('f4', subok=0, copy=False)
213
+ assert_(a is b)
214
+
215
+ class MyNDArray(np.ndarray):
216
+ pass
217
+
218
+ a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
219
+
220
+ # subok=True passes through a subclass
221
+ b = a.astype('f4', subok=True, copy=False)
222
+ assert_(a is b)
223
+
224
+ # subok=True is default, and creates a subtype on a cast
225
+ b = a.astype('i4', copy=False)
226
+ assert_equal(a, b)
227
+ assert_equal(type(b), MyNDArray)
228
+
229
+ # subok=False never returns a subclass
230
+ b = a.astype('f4', subok=False, copy=False)
231
+ assert_equal(a, b)
232
+ assert_(not (a is b))
233
+ assert_(type(b) is not MyNDArray)
234
+
235
+ # Make sure converting from string object to fixed length string
236
+ # does not truncate.
237
+ a = np.array([b'a'*100], dtype='O')
238
+ b = a.astype('S')
239
+ assert_equal(a, b)
240
+ assert_equal(b.dtype, np.dtype('S100'))
241
+ a = np.array([u'a'*100], dtype='O')
242
+ b = a.astype('U')
243
+ assert_equal(a, b)
244
+ assert_equal(b.dtype, np.dtype('U100'))
245
+
246
+ # Same test as above but for strings shorter than 64 characters
247
+ a = np.array([b'a'*10], dtype='O')
248
+ b = a.astype('S')
249
+ assert_equal(a, b)
250
+ assert_equal(b.dtype, np.dtype('S10'))
251
+ a = np.array([u'a'*10], dtype='O')
252
+ b = a.astype('U')
253
+ assert_equal(a, b)
254
+ assert_equal(b.dtype, np.dtype('U10'))
255
+
256
+ a = np.array(123456789012345678901234567890, dtype='O').astype('S')
257
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
258
+ a = np.array(123456789012345678901234567890, dtype='O').astype('U')
259
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
260
+
261
+ a = np.array([123456789012345678901234567890], dtype='O').astype('S')
262
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
263
+ a = np.array([123456789012345678901234567890], dtype='O').astype('U')
264
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
265
+
266
+ a = np.array(123456789012345678901234567890, dtype='S')
267
+ assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
268
+ a = np.array(123456789012345678901234567890, dtype='U')
269
+ assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
270
+
271
+ a = np.array(u'a\u0140', dtype='U')
272
+ b = np.ndarray(buffer=a, dtype='uint32', shape=2)
273
+ assert_(b.size == 2)
274
+
275
+ a = np.array([1000], dtype='i4')
276
+ assert_raises(TypeError, a.astype, 'S1', casting='safe')
277
+
278
+ a = np.array(1000, dtype='i4')
279
+ assert_raises(TypeError, a.astype, 'U1', casting='safe')
280
+
281
+ @pytest.mark.parametrize("dt", ["S", "U"])
282
+ def test_array_astype_to_string_discovery_empty(dt):
283
+ # See also gh-19085
284
+ arr = np.array([""], dtype=object)
285
+ # Note, the itemsize is the `0 -> 1` logic, which should change.
286
+ # The important part the test is rather that it does not error.
287
+ assert arr.astype(dt).dtype.itemsize == np.dtype(f"{dt}1").itemsize
288
+
289
+ # check the same thing for `np.can_cast` (since it accepts arrays)
290
+ assert np.can_cast(arr, dt, casting="unsafe")
291
+ assert not np.can_cast(arr, dt, casting="same_kind")
292
+ # as well as for the object as a descriptor:
293
+ assert np.can_cast("O", dt, casting="unsafe")
294
+
295
+ @pytest.mark.parametrize("dt", ["d", "f", "S13", "U32"])
296
+ def test_array_astype_to_void(dt):
297
+ dt = np.dtype(dt)
298
+ arr = np.array([], dtype=dt)
299
+ assert arr.astype("V").dtype.itemsize == dt.itemsize
300
+
301
+ def test_object_array_astype_to_void():
302
+ # This is different to `test_array_astype_to_void` as object arrays
303
+ # are inspected. The default void is "V8" (8 is the length of double)
304
+ arr = np.array([], dtype="O").astype("V")
305
+ assert arr.dtype == "V8"
306
+
307
+ @pytest.mark.parametrize("t",
308
+ np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
309
+ )
310
+ def test_array_astype_warning(t):
311
+ # test ComplexWarning when casting from complex to float or int
312
+ a = np.array(10, dtype=np.complex_)
313
+ assert_warns(np.ComplexWarning, a.astype, t)
314
+
315
+ @pytest.mark.parametrize(["dtype", "out_dtype"],
316
+ [(np.bytes_, np.bool_),
317
+ (np.unicode_, np.bool_),
318
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
319
+ def test_string_to_boolean_cast(dtype, out_dtype):
320
+ """
321
+ Currently, for `astype` strings are cast to booleans effectively by
322
+ calling `bool(int(string)`. This is not consistent (see gh-9875) and
323
+ will eventually be deprecated.
324
+ """
325
+ arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
326
+ expected = np.array([True, True, False, False], dtype=out_dtype)
327
+ assert_array_equal(arr.astype(out_dtype), expected)
328
+
329
+ @pytest.mark.parametrize(["dtype", "out_dtype"],
330
+ [(np.bytes_, np.bool_),
331
+ (np.unicode_, np.bool_),
332
+ (np.dtype("S10,S9"), np.dtype("?,?"))])
333
+ def test_string_to_boolean_cast_errors(dtype, out_dtype):
334
+ """
335
+ These currently error out, since cast to integers fails, but should not
336
+ error out in the future.
337
+ """
338
+ for invalid in ["False", "True", "", "\0", "non-empty"]:
339
+ arr = np.array([invalid], dtype=dtype)
340
+ with assert_raises(ValueError):
341
+ arr.astype(out_dtype)
342
+
343
+ @pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
344
+ @pytest.mark.parametrize("scalar_type",
345
+ [np.complex64, np.complex128, np.clongdouble])
346
+ def test_string_to_complex_cast(str_type, scalar_type):
347
+ value = scalar_type(b"1+3j")
348
+ assert scalar_type(value) == 1+3j
349
+ assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
350
+ assert np.array(value).astype(scalar_type)[()] == 1+3j
351
+ arr = np.zeros(1, dtype=scalar_type)
352
+ arr[0] = value
353
+ assert arr[0] == 1+3j
354
+
355
+ @pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
356
+ def test_none_to_nan_cast(dtype):
357
+ # Note that at the time of writing this test, the scalar constructors
358
+ # reject None
359
+ arr = np.zeros(1, dtype=dtype)
360
+ arr[0] = None
361
+ assert np.isnan(arr)[0]
362
+ assert np.isnan(np.array(None, dtype=dtype))[()]
363
+ assert np.isnan(np.array([None], dtype=dtype))[0]
364
+ assert np.isnan(np.array(None).astype(dtype))[()]
365
+
366
+ def test_copyto_fromscalar():
367
+ a = np.arange(6, dtype='f4').reshape(2, 3)
368
+
369
+ # Simple copy
370
+ np.copyto(a, 1.5)
371
+ assert_equal(a, 1.5)
372
+ np.copyto(a.T, 2.5)
373
+ assert_equal(a, 2.5)
374
+
375
+ # Where-masked copy
376
+ mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
377
+ np.copyto(a, 3.5, where=mask)
378
+ assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
379
+ mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
380
+ np.copyto(a.T, 4.5, where=mask)
381
+ assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
382
+
383
+ def test_copyto():
384
+ a = np.arange(6, dtype='i4').reshape(2, 3)
385
+
386
+ # Simple copy
387
+ np.copyto(a, [[3, 1, 5], [6, 2, 1]])
388
+ assert_equal(a, [[3, 1, 5], [6, 2, 1]])
389
+
390
+ # Overlapping copy should work
391
+ np.copyto(a[:, :2], a[::-1, 1::-1])
392
+ assert_equal(a, [[2, 6, 5], [1, 3, 1]])
393
+
394
+ # Defaults to 'same_kind' casting
395
+ assert_raises(TypeError, np.copyto, a, 1.5)
396
+
397
+ # Force a copy with 'unsafe' casting, truncating 1.5 to 1
398
+ np.copyto(a, 1.5, casting='unsafe')
399
+ assert_equal(a, 1)
400
+
401
+ # Copying with a mask
402
+ np.copyto(a, 3, where=[True, False, True])
403
+ assert_equal(a, [[3, 1, 3], [3, 1, 3]])
404
+
405
+ # Casting rule still applies with a mask
406
+ assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
407
+
408
+ # Lists of integer 0's and 1's is ok too
409
+ np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
410
+ assert_equal(a, [[3, 4, 4], [4, 1, 3]])
411
+
412
+ # Overlapping copy with mask should work
413
+ np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
414
+ assert_equal(a, [[3, 4, 4], [4, 3, 3]])
415
+
416
+ # 'dst' must be an array
417
+ assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
418
+
419
+ def test_copyto_permut():
420
+ # test explicit overflow case
421
+ pad = 500
422
+ l = [True] * pad + [True, True, True, True]
423
+ r = np.zeros(len(l)-pad)
424
+ d = np.ones(len(l)-pad)
425
+ mask = np.array(l)[pad:]
426
+ np.copyto(r, d, where=mask[::-1])
427
+
428
+ # test all permutation of possible masks, 9 should be sufficient for
429
+ # current 4 byte unrolled code
430
+ power = 9
431
+ d = np.ones(power)
432
+ for i in range(2**power):
433
+ r = np.zeros(power)
434
+ l = [(i & x) != 0 for x in range(power)]
435
+ mask = np.array(l)
436
+ np.copyto(r, d, where=mask)
437
+ assert_array_equal(r == 1, l)
438
+ assert_equal(r.sum(), sum(l))
439
+
440
+ r = np.zeros(power)
441
+ np.copyto(r, d, where=mask[::-1])
442
+ assert_array_equal(r == 1, l[::-1])
443
+ assert_equal(r.sum(), sum(l))
444
+
445
+ r = np.zeros(power)
446
+ np.copyto(r[::2], d[::2], where=mask[::2])
447
+ assert_array_equal(r[::2] == 1, l[::2])
448
+ assert_equal(r[::2].sum(), sum(l[::2]))
449
+
450
+ r = np.zeros(power)
451
+ np.copyto(r[::2], d[::2], where=mask[::-2])
452
+ assert_array_equal(r[::2] == 1, l[::-2])
453
+ assert_equal(r[::2].sum(), sum(l[::-2]))
454
+
455
+ for c in [0xFF, 0x7F, 0x02, 0x10]:
456
+ r = np.zeros(power)
457
+ mask = np.array(l)
458
+ imask = np.array(l).view(np.uint8)
459
+ imask[mask != 0] = c
460
+ np.copyto(r, d, where=mask)
461
+ assert_array_equal(r == 1, l)
462
+ assert_equal(r.sum(), sum(l))
463
+
464
+ r = np.zeros(power)
465
+ np.copyto(r, d, where=True)
466
+ assert_equal(r.sum(), r.size)
467
+ r = np.ones(power)
468
+ d = np.zeros(power)
469
+ np.copyto(r, d, where=False)
470
+ assert_equal(r.sum(), r.size)
471
+
472
+ def test_copy_order():
473
+ a = np.arange(24).reshape(2, 1, 3, 4)
474
+ b = a.copy(order='F')
475
+ c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
476
+
477
+ def check_copy_result(x, y, ccontig, fcontig, strides=False):
478
+ assert_(not (x is y))
479
+ assert_equal(x, y)
480
+ assert_equal(res.flags.c_contiguous, ccontig)
481
+ assert_equal(res.flags.f_contiguous, fcontig)
482
+
483
+ # Validate the initial state of a, b, and c
484
+ assert_(a.flags.c_contiguous)
485
+ assert_(not a.flags.f_contiguous)
486
+ assert_(not b.flags.c_contiguous)
487
+ assert_(b.flags.f_contiguous)
488
+ assert_(not c.flags.c_contiguous)
489
+ assert_(not c.flags.f_contiguous)
490
+
491
+ # Copy with order='C'
492
+ res = a.copy(order='C')
493
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
494
+ res = b.copy(order='C')
495
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
496
+ res = c.copy(order='C')
497
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
498
+ res = np.copy(a, order='C')
499
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
500
+ res = np.copy(b, order='C')
501
+ check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
502
+ res = np.copy(c, order='C')
503
+ check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
504
+
505
+ # Copy with order='F'
506
+ res = a.copy(order='F')
507
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
508
+ res = b.copy(order='F')
509
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
510
+ res = c.copy(order='F')
511
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
512
+ res = np.copy(a, order='F')
513
+ check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
514
+ res = np.copy(b, order='F')
515
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
516
+ res = np.copy(c, order='F')
517
+ check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
518
+
519
+ # Copy with order='K'
520
+ res = a.copy(order='K')
521
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
522
+ res = b.copy(order='K')
523
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
524
+ res = c.copy(order='K')
525
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
526
+ res = np.copy(a, order='K')
527
+ check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
528
+ res = np.copy(b, order='K')
529
+ check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
530
+ res = np.copy(c, order='K')
531
+ check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
532
+
533
+ def test_contiguous_flags():
534
+ a = np.ones((4, 4, 1))[::2,:,:]
535
+ a.strides = a.strides[:2] + (-123,)
536
+ b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
537
+
538
+ def check_contig(a, ccontig, fcontig):
539
+ assert_(a.flags.c_contiguous == ccontig)
540
+ assert_(a.flags.f_contiguous == fcontig)
541
+
542
+ # Check if new arrays are correct:
543
+ check_contig(a, False, False)
544
+ check_contig(b, False, False)
545
+ check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
546
+ check_contig(np.array([[[1], [2]]], order='F'), True, True)
547
+ check_contig(np.empty((2, 2)), True, False)
548
+ check_contig(np.empty((2, 2), order='F'), False, True)
549
+
550
+ # Check that np.array creates correct contiguous flags:
551
+ check_contig(np.array(a, copy=False), False, False)
552
+ check_contig(np.array(a, copy=False, order='C'), True, False)
553
+ check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
554
+
555
+ # Check slicing update of flags and :
556
+ check_contig(a[0], True, True)
557
+ check_contig(a[None, ::4, ..., None], True, True)
558
+ check_contig(b[0, 0, ...], False, True)
559
+ check_contig(b[:, :, 0:0, :, :], True, True)
560
+
561
+ # Test ravel and squeeze.
562
+ check_contig(a.ravel(), True, True)
563
+ check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
564
+
565
+ def test_broadcast_arrays():
566
+ # Test user defined dtypes
567
+ a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
568
+ b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
569
+ result = np.broadcast_arrays(a, b)
570
+ assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
571
+ assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
572
+
573
+ @pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
574
+ [((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
575
+ ((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
576
+ def test_full_from_list(shape, fill_value, expected_output):
577
+ output = np.full(shape, fill_value)
578
+ assert_equal(output, expected_output)
579
+
580
+ def test_astype_copyflag():
581
+ # test the various copyflag options
582
+ arr = np.arange(10, dtype=np.intp)
583
+
584
+ res_true = arr.astype(np.intp, copy=True)
585
+ assert not np.may_share_memory(arr, res_true)
586
+ res_always = arr.astype(np.intp, copy=np._CopyMode.ALWAYS)
587
+ assert not np.may_share_memory(arr, res_always)
588
+
589
+ res_false = arr.astype(np.intp, copy=False)
590
+ # `res_false is arr` currently, but check `may_share_memory`.
591
+ assert np.may_share_memory(arr, res_false)
592
+ res_if_needed = arr.astype(np.intp, copy=np._CopyMode.IF_NEEDED)
593
+ # `res_if_needed is arr` currently, but check `may_share_memory`.
594
+ assert np.may_share_memory(arr, res_if_needed)
595
+
596
+ res_never = arr.astype(np.intp, copy=np._CopyMode.NEVER)
597
+ assert np.may_share_memory(arr, res_never)
598
+
599
+ # Simple tests for when a copy is necessary:
600
+ res_false = arr.astype(np.float64, copy=False)
601
+ assert_array_equal(res_false, arr)
602
+ res_if_needed = arr.astype(np.float64,
603
+ copy=np._CopyMode.IF_NEEDED)
604
+ assert_array_equal(res_if_needed, arr)
605
+ assert_raises(ValueError, arr.astype, np.float64,
606
+ copy=np._CopyMode.NEVER)
wemm/lib/python3.10/site-packages/numpy/core/tests/test_argparse.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the private NumPy argument parsing functionality.
3
+ They mainly exists to ensure good test coverage without having to try the
4
+ weirder cases on actual numpy functions but test them in one place.
5
+
6
+ The test function is defined in C to be equivalent to (errors may not always
7
+ match exactly, and could be adjusted):
8
+
9
+ def func(arg1, /, arg2, *, arg3):
10
+ i = integer(arg1) # reproducing the 'i' parsing in Python.
11
+ return None
12
+ """
13
+
14
+ import pytest
15
+
16
+ import numpy as np
17
+ from numpy.core._multiarray_tests import argparse_example_function as func
18
+
19
+
20
+ def test_invalid_integers():
21
+ with pytest.raises(TypeError,
22
+ match="integer argument expected, got float"):
23
+ func(1.)
24
+ with pytest.raises(OverflowError):
25
+ func(2**100)
26
+
27
+
28
+ def test_missing_arguments():
29
+ with pytest.raises(TypeError,
30
+ match="missing required positional argument 0"):
31
+ func()
32
+ with pytest.raises(TypeError,
33
+ match="missing required positional argument 0"):
34
+ func(arg2=1, arg3=4)
35
+ with pytest.raises(TypeError,
36
+ match=r"missing required argument \'arg2\' \(pos 1\)"):
37
+ func(1, arg3=5)
38
+
39
+
40
+ def test_too_many_positional():
41
+ # the second argument is positional but can be passed as keyword.
42
+ with pytest.raises(TypeError,
43
+ match="takes from 2 to 3 positional arguments but 4 were given"):
44
+ func(1, 2, 3, 4)
45
+
46
+
47
+ def test_multiple_values():
48
+ with pytest.raises(TypeError,
49
+ match=r"given by name \('arg2'\) and position \(position 1\)"):
50
+ func(1, 2, arg2=3)
51
+
52
+
53
+ def test_string_fallbacks():
54
+ # We can (currently?) use numpy strings to test the "slow" fallbacks
55
+ # that should normally not be taken due to string interning.
56
+ arg2 = np.unicode_("arg2")
57
+ missing_arg = np.unicode_("missing_arg")
58
+ func(1, **{arg2: 3})
59
+ with pytest.raises(TypeError,
60
+ match="got an unexpected keyword argument 'missing_arg'"):
61
+ func(2, **{missing_arg: 3})
62
+
wemm/lib/python3.10/site-packages/numpy/core/tests/test_array_coercion.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for array coercion, mainly through testing `np.array` results directly.
3
+ Note that other such tests exist e.g. in `test_api.py` and many corner-cases
4
+ are tested (sometimes indirectly) elsewhere.
5
+ """
6
+
7
+ import pytest
8
+ from pytest import param
9
+
10
+ from itertools import product
11
+
12
+ import numpy as np
13
+ from numpy.core._rational_tests import rational
14
+ from numpy.core._multiarray_umath import _discover_array_parameters
15
+
16
+ from numpy.testing import (
17
+ assert_array_equal, assert_warns, IS_PYPY)
18
+
19
+
20
+ def arraylikes():
21
+ """
22
+ Generator for functions converting an array into various array-likes.
23
+ If full is True (default) includes array-likes not capable of handling
24
+ all dtypes
25
+ """
26
+ # base array:
27
+ def ndarray(a):
28
+ return a
29
+
30
+ yield param(ndarray, id="ndarray")
31
+
32
+ # subclass:
33
+ class MyArr(np.ndarray):
34
+ pass
35
+
36
+ def subclass(a):
37
+ return a.view(MyArr)
38
+
39
+ yield subclass
40
+
41
+ class _SequenceLike():
42
+ # We are giving a warning that array-like's were also expected to be
43
+ # sequence-like in `np.array([array_like])`, this can be removed
44
+ # when the deprecation exired (started NumPy 1.20)
45
+ def __len__(self):
46
+ raise TypeError
47
+
48
+ def __getitem__(self):
49
+ raise TypeError
50
+
51
+ # Array-interface
52
+ class ArrayDunder(_SequenceLike):
53
+ def __init__(self, a):
54
+ self.a = a
55
+
56
+ def __array__(self, dtype=None):
57
+ return self.a
58
+
59
+ yield param(ArrayDunder, id="__array__")
60
+
61
+ # memory-view
62
+ yield param(memoryview, id="memoryview")
63
+
64
+ # Array-interface
65
+ class ArrayInterface(_SequenceLike):
66
+ def __init__(self, a):
67
+ self.a = a # need to hold on to keep interface valid
68
+ self.__array_interface__ = a.__array_interface__
69
+
70
+ yield param(ArrayInterface, id="__array_interface__")
71
+
72
+ # Array-Struct
73
+ class ArrayStruct(_SequenceLike):
74
+ def __init__(self, a):
75
+ self.a = a # need to hold on to keep struct valid
76
+ self.__array_struct__ = a.__array_struct__
77
+
78
+ yield param(ArrayStruct, id="__array_struct__")
79
+
80
+
81
+ def scalar_instances(times=True, extended_precision=True, user_dtype=True):
82
+ # Hard-coded list of scalar instances.
83
+ # Floats:
84
+ yield param(np.sqrt(np.float16(5)), id="float16")
85
+ yield param(np.sqrt(np.float32(5)), id="float32")
86
+ yield param(np.sqrt(np.float64(5)), id="float64")
87
+ if extended_precision:
88
+ yield param(np.sqrt(np.longdouble(5)), id="longdouble")
89
+
90
+ # Complex:
91
+ yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
92
+ yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
93
+ if extended_precision:
94
+ yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
95
+
96
+ # Bool:
97
+ # XFAIL: Bool should be added, but has some bad properties when it
98
+ # comes to strings, see also gh-9875
99
+ # yield param(np.bool_(0), id="bool")
100
+
101
+ # Integers:
102
+ yield param(np.int8(2), id="int8")
103
+ yield param(np.int16(2), id="int16")
104
+ yield param(np.int32(2), id="int32")
105
+ yield param(np.int64(2), id="int64")
106
+
107
+ yield param(np.uint8(2), id="uint8")
108
+ yield param(np.uint16(2), id="uint16")
109
+ yield param(np.uint32(2), id="uint32")
110
+ yield param(np.uint64(2), id="uint64")
111
+
112
+ # Rational:
113
+ if user_dtype:
114
+ yield param(rational(1, 2), id="rational")
115
+
116
+ # Cannot create a structured void scalar directly:
117
+ structured = np.array([(1, 3)], "i,i")[0]
118
+ assert isinstance(structured, np.void)
119
+ assert structured.dtype == np.dtype("i,i")
120
+ yield param(structured, id="structured")
121
+
122
+ if times:
123
+ # Datetimes and timedelta
124
+ yield param(np.timedelta64(2), id="timedelta64[generic]")
125
+ yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
126
+ yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
127
+
128
+ yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
129
+ yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
130
+
131
+ # Strings and unstructured void:
132
+ yield param(np.bytes_(b"1234"), id="bytes")
133
+ yield param(np.unicode_("2345"), id="unicode")
134
+ yield param(np.void(b"4321"), id="unstructured_void")
135
+
136
+
137
+ def is_parametric_dtype(dtype):
138
+ """Returns True if the the dtype is a parametric legacy dtype (itemsize
139
+ is 0, or a datetime without units)
140
+ """
141
+ if dtype.itemsize == 0:
142
+ return True
143
+ if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
144
+ if dtype.name.endswith("64"):
145
+ # Generic time units
146
+ return True
147
+ return False
148
+
149
+
150
+ class TestStringDiscovery:
151
+ @pytest.mark.parametrize("obj",
152
+ [object(), 1.2, 10**43, None, "string"],
153
+ ids=["object", "1.2", "10**43", "None", "string"])
154
+ def test_basic_stringlength(self, obj):
155
+ length = len(str(obj))
156
+ expected = np.dtype(f"S{length}")
157
+
158
+ assert np.array(obj, dtype="S").dtype == expected
159
+ assert np.array([obj], dtype="S").dtype == expected
160
+
161
+ # A nested array is also discovered correctly
162
+ arr = np.array(obj, dtype="O")
163
+ assert np.array(arr, dtype="S").dtype == expected
164
+ # Check that .astype() behaves identical
165
+ assert arr.astype("S").dtype == expected
166
+
167
+ @pytest.mark.parametrize("obj",
168
+ [object(), 1.2, 10**43, None, "string"],
169
+ ids=["object", "1.2", "10**43", "None", "string"])
170
+ def test_nested_arrays_stringlength(self, obj):
171
+ length = len(str(obj))
172
+ expected = np.dtype(f"S{length}")
173
+ arr = np.array(obj, dtype="O")
174
+ assert np.array([arr, arr], dtype="S").dtype == expected
175
+
176
+ @pytest.mark.parametrize("arraylike", arraylikes())
177
+ def test_unpack_first_level(self, arraylike):
178
+ # We unpack exactly one level of array likes
179
+ obj = np.array([None])
180
+ obj[0] = np.array(1.2)
181
+ # the length of the included item, not of the float dtype
182
+ length = len(str(obj[0]))
183
+ expected = np.dtype(f"S{length}")
184
+
185
+ obj = arraylike(obj)
186
+ # casting to string usually calls str(obj)
187
+ arr = np.array([obj], dtype="S")
188
+ assert arr.shape == (1, 1)
189
+ assert arr.dtype == expected
190
+
191
+
192
+ class TestScalarDiscovery:
193
+ def test_void_special_case(self):
194
+ # Void dtypes with structures discover tuples as elements
195
+ arr = np.array((1, 2, 3), dtype="i,i,i")
196
+ assert arr.shape == ()
197
+ arr = np.array([(1, 2, 3)], dtype="i,i,i")
198
+ assert arr.shape == (1,)
199
+
200
+ def test_char_special_case(self):
201
+ arr = np.array("string", dtype="c")
202
+ assert arr.shape == (6,)
203
+ assert arr.dtype.char == "c"
204
+ arr = np.array(["string"], dtype="c")
205
+ assert arr.shape == (1, 6)
206
+ assert arr.dtype.char == "c"
207
+
208
+ def test_char_special_case_deep(self):
209
+ # Check that the character special case errors correctly if the
210
+ # array is too deep:
211
+ nested = ["string"] # 2 dimensions (due to string being sequence)
212
+ for i in range(np.MAXDIMS - 2):
213
+ nested = [nested]
214
+
215
+ arr = np.array(nested, dtype='c')
216
+ assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
217
+ with pytest.raises(ValueError):
218
+ np.array([nested], dtype="c")
219
+
220
+ def test_unknown_object(self):
221
+ arr = np.array(object())
222
+ assert arr.shape == ()
223
+ assert arr.dtype == np.dtype("O")
224
+
225
+ @pytest.mark.parametrize("scalar", scalar_instances())
226
+ def test_scalar(self, scalar):
227
+ arr = np.array(scalar)
228
+ assert arr.shape == ()
229
+ assert arr.dtype == scalar.dtype
230
+
231
+ arr = np.array([[scalar, scalar]])
232
+ assert arr.shape == (1, 2)
233
+ assert arr.dtype == scalar.dtype
234
+
235
+ # Additionally to string this test also runs into a corner case
236
+ # with datetime promotion (the difference is the promotion order).
237
+ @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
238
+ def test_scalar_promotion(self):
239
+ for sc1, sc2 in product(scalar_instances(), scalar_instances()):
240
+ sc1, sc2 = sc1.values[0], sc2.values[0]
241
+ # test all combinations:
242
+ try:
243
+ arr = np.array([sc1, sc2])
244
+ except (TypeError, ValueError):
245
+ # The promotion between two times can fail
246
+ # XFAIL (ValueError): Some object casts are currently undefined
247
+ continue
248
+ assert arr.shape == (2,)
249
+ try:
250
+ dt1, dt2 = sc1.dtype, sc2.dtype
251
+ expected_dtype = np.promote_types(dt1, dt2)
252
+ assert arr.dtype == expected_dtype
253
+ except TypeError as e:
254
+ # Will currently always go to object dtype
255
+ assert arr.dtype == np.dtype("O")
256
+
257
+ @pytest.mark.parametrize("scalar", scalar_instances())
258
+ def test_scalar_coercion(self, scalar):
259
+ # This tests various scalar coercion paths, mainly for the numerical
260
+ # types. It includes some paths not directly related to `np.array`
261
+ if isinstance(scalar, np.inexact):
262
+ # Ensure we have a full-precision number if available
263
+ scalar = type(scalar)((scalar * 2)**0.5)
264
+
265
+ if type(scalar) is rational:
266
+ # Rational generally fails due to a missing cast. In the future
267
+ # object casts should automatically be defined based on `setitem`.
268
+ pytest.xfail("Rational to object cast is undefined currently.")
269
+
270
+ # Use casting from object:
271
+ arr = np.array(scalar, dtype=object).astype(scalar.dtype)
272
+
273
+ # Test various ways to create an array containing this scalar:
274
+ arr1 = np.array(scalar).reshape(1)
275
+ arr2 = np.array([scalar])
276
+ arr3 = np.empty(1, dtype=scalar.dtype)
277
+ arr3[0] = scalar
278
+ arr4 = np.empty(1, dtype=scalar.dtype)
279
+ arr4[:] = [scalar]
280
+ # All of these methods should yield the same results
281
+ assert_array_equal(arr, arr1)
282
+ assert_array_equal(arr, arr2)
283
+ assert_array_equal(arr, arr3)
284
+ assert_array_equal(arr, arr4)
285
+
286
+ @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
287
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
288
+ @pytest.mark.parametrize("cast_to", scalar_instances())
289
+ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
290
+ """
291
+ Test that in most cases:
292
+ * `np.array(scalar, dtype=dtype)`
293
+ * `np.empty((), dtype=dtype)[()] = scalar`
294
+ * `np.array(scalar).astype(dtype)`
295
+ should behave the same. The only exceptions are paramteric dtypes
296
+ (mainly datetime/timedelta without unit) and void without fields.
297
+ """
298
+ dtype = cast_to.dtype # use to parametrize only the target dtype
299
+
300
+ for scalar in scalar_instances(times=False):
301
+ scalar = scalar.values[0]
302
+
303
+ if dtype.type == np.void:
304
+ if scalar.dtype.fields is not None and dtype.fields is None:
305
+ # Here, coercion to "V6" works, but the cast fails.
306
+ # Since the types are identical, SETITEM takes care of
307
+ # this, but has different rules than the cast.
308
+ with pytest.raises(TypeError):
309
+ np.array(scalar).astype(dtype)
310
+ np.array(scalar, dtype=dtype)
311
+ np.array([scalar], dtype=dtype)
312
+ continue
313
+
314
+ # The main test, we first try to use casting and if it succeeds
315
+ # continue below testing that things are the same, otherwise
316
+ # test that the alternative paths at least also fail.
317
+ try:
318
+ cast = np.array(scalar).astype(dtype)
319
+ except (TypeError, ValueError, RuntimeError):
320
+ # coercion should also raise (error type may change)
321
+ with pytest.raises(Exception):
322
+ np.array(scalar, dtype=dtype)
323
+
324
+ if (isinstance(scalar, rational) and
325
+ np.issubdtype(dtype, np.signedinteger)):
326
+ return
327
+
328
+ with pytest.raises(Exception):
329
+ np.array([scalar], dtype=dtype)
330
+ # assignment should also raise
331
+ res = np.zeros((), dtype=dtype)
332
+ with pytest.raises(Exception):
333
+ res[()] = scalar
334
+
335
+ return
336
+
337
+ # Non error path:
338
+ arr = np.array(scalar, dtype=dtype)
339
+ assert_array_equal(arr, cast)
340
+ # assignment behaves the same
341
+ ass = np.zeros((), dtype=dtype)
342
+ ass[()] = scalar
343
+ assert_array_equal(ass, cast)
344
+
345
+ @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
346
+ def test_pyscalar_subclasses(self, pyscalar):
347
+ """NumPy arrays are read/write which means that anything but invariant
348
+ behaviour is on thin ice. However, we currently are happy to discover
349
+ subclasses of Python float, int, complex the same as the base classes.
350
+ This should potentially be deprecated.
351
+ """
352
+ class MyScalar(type(pyscalar)):
353
+ pass
354
+
355
+ res = np.array(MyScalar(pyscalar))
356
+ expected = np.array(pyscalar)
357
+ assert_array_equal(res, expected)
358
+
359
+ @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
360
+ def test_default_dtype_instance(self, dtype_char):
361
+ if dtype_char in "SU":
362
+ dtype = np.dtype(dtype_char + "1")
363
+ elif dtype_char == "V":
364
+ # Legacy behaviour was to use V8. The reason was float64 being the
365
+ # default dtype and that having 8 bytes.
366
+ dtype = np.dtype("V8")
367
+ else:
368
+ dtype = np.dtype(dtype_char)
369
+
370
+ discovered_dtype, _ = _discover_array_parameters([], type(dtype))
371
+
372
+ assert discovered_dtype == dtype
373
+ assert discovered_dtype.itemsize == dtype.itemsize
374
+
375
+ @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
376
+ def test_scalar_to_int_coerce_does_not_cast(self, dtype):
377
+ """
378
+ Signed integers are currently different in that they do not cast other
379
+ NumPy scalar, but instead use scalar.__int__(). The hardcoded
380
+ exception to this rule is `np.array(scalar, dtype=integer)`.
381
+ """
382
+ dtype = np.dtype(dtype)
383
+ invalid_int = np.ulonglong(-1)
384
+
385
+ float_nan = np.float64(np.nan)
386
+
387
+ for scalar in [float_nan, invalid_int]:
388
+ # This is a special case using casting logic and thus not failing:
389
+ coerced = np.array(scalar, dtype=dtype)
390
+ cast = np.array(scalar).astype(dtype)
391
+ assert_array_equal(coerced, cast)
392
+
393
+ # However these fail:
394
+ with pytest.raises((ValueError, OverflowError)):
395
+ np.array([scalar], dtype=dtype)
396
+ with pytest.raises((ValueError, OverflowError)):
397
+ cast[()] = scalar
398
+
399
+
400
+ class TestTimeScalars:
401
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
402
+ @pytest.mark.parametrize("scalar",
403
+ [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
404
+ param(np.timedelta64(123, "s"), id="timedelta64[s]"),
405
+ param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
406
+ param(np.datetime64(1, "D"), id="datetime64[D]")],)
407
+ def test_coercion_basic(self, dtype, scalar):
408
+ # Note the `[scalar]` is there because np.array(scalar) uses stricter
409
+ # `scalar.__int__()` rules for backward compatibility right now.
410
+ arr = np.array(scalar, dtype=dtype)
411
+ cast = np.array(scalar).astype(dtype)
412
+ assert_array_equal(arr, cast)
413
+
414
+ ass = np.ones((), dtype=dtype)
415
+ if issubclass(dtype, np.integer):
416
+ with pytest.raises(TypeError):
417
+ # raises, as would np.array([scalar], dtype=dtype), this is
418
+ # conversion from times, but behaviour of integers.
419
+ ass[()] = scalar
420
+ else:
421
+ ass[()] = scalar
422
+ assert_array_equal(ass, cast)
423
+
424
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
425
+ @pytest.mark.parametrize("scalar",
426
+ [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
427
+ param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
428
+ def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
429
+ # Only "ns" and "generic" timedeltas can be converted to numbers
430
+ # so these are slightly special.
431
+ arr = np.array(scalar, dtype=dtype)
432
+ cast = np.array(scalar).astype(dtype)
433
+ ass = np.ones((), dtype=dtype)
434
+ ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
435
+
436
+ assert_array_equal(arr, cast)
437
+ assert_array_equal(cast, cast)
438
+
439
+ @pytest.mark.parametrize("dtype", ["S6", "U6"])
440
+ @pytest.mark.parametrize(["val", "unit"],
441
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
442
+ def test_coercion_assignment_datetime(self, val, unit, dtype):
443
+ # String from datetime64 assignment is currently special cased to
444
+ # never use casting. This is because casting will error in this
445
+ # case, and traditionally in most cases the behaviour is maintained
446
+ # like this. (`np.array(scalar, dtype="U6")` would have failed before)
447
+ # TODO: This discrepancy _should_ be resolved, either by relaxing the
448
+ # cast, or by deprecating the first part.
449
+ scalar = np.datetime64(val, unit)
450
+ dtype = np.dtype(dtype)
451
+ cut_string = dtype.type(str(scalar)[:6])
452
+
453
+ arr = np.array(scalar, dtype=dtype)
454
+ assert arr[()] == cut_string
455
+ ass = np.ones((), dtype=dtype)
456
+ ass[()] = scalar
457
+ assert ass[()] == cut_string
458
+
459
+ with pytest.raises(RuntimeError):
460
+ # However, unlike the above assignment using `str(scalar)[:6]`
461
+ # due to being handled by the string DType and not be casting
462
+ # the explicit cast fails:
463
+ np.array(scalar).astype(dtype)
464
+
465
+
466
+ @pytest.mark.parametrize(["val", "unit"],
467
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
468
+ def test_coercion_assignment_timedelta(self, val, unit):
469
+ scalar = np.timedelta64(val, unit)
470
+
471
+ # Unlike datetime64, timedelta allows the unsafe cast:
472
+ np.array(scalar, dtype="S6")
473
+ cast = np.array(scalar).astype("S6")
474
+ ass = np.ones((), dtype="S6")
475
+ ass[()] = scalar
476
+ expected = scalar.astype("S")[:6]
477
+ assert cast[()] == expected
478
+ assert ass[()] == expected
479
+
480
+ class TestNested:
481
+ def test_nested_simple(self):
482
+ initial = [1.2]
483
+ nested = initial
484
+ for i in range(np.MAXDIMS - 1):
485
+ nested = [nested]
486
+
487
+ arr = np.array(nested, dtype="float64")
488
+ assert arr.shape == (1,) * np.MAXDIMS
489
+ with pytest.raises(ValueError):
490
+ np.array([nested], dtype="float64")
491
+
492
+ # We discover object automatically at this time:
493
+ with assert_warns(np.VisibleDeprecationWarning):
494
+ arr = np.array([nested])
495
+ assert arr.dtype == np.dtype("O")
496
+ assert arr.shape == (1,) * np.MAXDIMS
497
+ assert arr.item() is initial
498
+
499
+ def test_pathological_self_containing(self):
500
+ # Test that this also works for two nested sequences
501
+ l = []
502
+ l.append(l)
503
+ arr = np.array([l, l, l], dtype=object)
504
+ assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
505
+
506
+ # Also check a ragged case:
507
+ arr = np.array([l, [None], l], dtype=object)
508
+ assert arr.shape == (3, 1)
509
+
510
+ @pytest.mark.parametrize("arraylike", arraylikes())
511
+ def test_nested_arraylikes(self, arraylike):
512
+ # We try storing an array like into an array, but the array-like
513
+ # will have too many dimensions. This means the shape discovery
514
+ # decides that the array-like must be treated as an object (a special
515
+ # case of ragged discovery). The result will be an array with one
516
+ # dimension less than the maximum dimensions, and the array being
517
+ # assigned to it (which does work for object or if `float(arraylike)`
518
+ # works).
519
+ initial = arraylike(np.ones((1, 1)))
520
+
521
+ nested = initial
522
+ for i in range(np.MAXDIMS - 1):
523
+ nested = [nested]
524
+
525
+ with pytest.warns(DeprecationWarning):
526
+ # It will refuse to assign the array into
527
+ np.array(nested, dtype="float64")
528
+
529
+ # If this is object, we end up assigning a (1, 1) array into (1,)
530
+ # (due to running out of dimensions), this is currently supported but
531
+ # a special case which is not ideal.
532
+ arr = np.array(nested, dtype=object)
533
+ assert arr.shape == (1,) * np.MAXDIMS
534
+ assert arr.item() == np.array(initial).item()
535
+
536
+ @pytest.mark.parametrize("arraylike", arraylikes())
537
+ def test_uneven_depth_ragged(self, arraylike):
538
+ arr = np.arange(4).reshape((2, 2))
539
+ arr = arraylike(arr)
540
+
541
+ # Array is ragged in the second dimension already:
542
+ out = np.array([arr, [arr]], dtype=object)
543
+ assert out.shape == (2,)
544
+ assert out[0] is arr
545
+ assert type(out[1]) is list
546
+
547
+ # Array is ragged in the third dimension:
548
+ with pytest.raises(ValueError):
549
+ # This is a broadcast error during assignment, because
550
+ # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
551
+ np.array([arr, [arr, arr]], dtype=object)
552
+
553
+ def test_empty_sequence(self):
554
+ arr = np.array([[], [1], [[1]]], dtype=object)
555
+ assert arr.shape == (3,)
556
+
557
+ # The empty sequence stops further dimension discovery, so the
558
+ # result shape will be (0,) which leads to an error during:
559
+ with pytest.raises(ValueError):
560
+ np.array([[], np.empty((0, 1))], dtype=object)
561
+
562
+ def test_array_of_different_depths(self):
563
+ # When multiple arrays (or array-likes) are included in a
564
+ # sequences and have different depth, we currently discover
565
+ # as many dimensions as they share. (see also gh-17224)
566
+ arr = np.zeros((3, 2))
567
+ mismatch_first_dim = np.zeros((1, 2))
568
+ mismatch_second_dim = np.zeros((3, 3))
569
+
570
+ dtype, shape = _discover_array_parameters(
571
+ [arr, mismatch_second_dim], dtype=np.dtype("O"))
572
+ assert shape == (2, 3)
573
+
574
+ dtype, shape = _discover_array_parameters(
575
+ [arr, mismatch_first_dim], dtype=np.dtype("O"))
576
+ assert shape == (2,)
577
+ # The second case is currently supported because the arrays
578
+ # can be stored as objects:
579
+ res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
580
+ assert res[0] is arr
581
+ assert res[1] is mismatch_first_dim
582
+
583
+
584
+ class TestBadSequences:
585
+ # These are tests for bad objects passed into `np.array`, in general
586
+ # these have undefined behaviour. In the old code they partially worked
587
+ # when now they will fail. We could (and maybe should) create a copy
588
+ # of all sequences to be safe against bad-actors.
589
+
590
+ def test_growing_list(self):
591
+ # List to coerce, `mylist` will append to it during coercion
592
+ obj = []
593
+ class mylist(list):
594
+ def __len__(self):
595
+ obj.append([1, 2])
596
+ return super().__len__()
597
+
598
+ obj.append(mylist([1, 2]))
599
+
600
+ with pytest.raises(RuntimeError):
601
+ np.array(obj)
602
+
603
+ # Note: We do not test a shrinking list. These do very evil things
604
+ # and the only way to fix them would be to copy all sequences.
605
+ # (which may be a real option in the future).
606
+
607
+ def test_mutated_list(self):
608
+ # List to coerce, `mylist` will mutate the first element
609
+ obj = []
610
+ class mylist(list):
611
+ def __len__(self):
612
+ obj[0] = [2, 3] # replace with a different list.
613
+ return super().__len__()
614
+
615
+ obj.append([2, 3])
616
+ obj.append(mylist([1, 2]))
617
+ with pytest.raises(RuntimeError):
618
+ np.array(obj)
619
+
620
+ def test_replace_0d_array(self):
621
+ # List to coerce, `mylist` will mutate the first element
622
+ obj = []
623
+ class baditem:
624
+ def __len__(self):
625
+ obj[0][0] = 2 # replace with a different list.
626
+ raise ValueError("not actually a sequence!")
627
+
628
+ def __getitem__(self):
629
+ pass
630
+
631
+ # Runs into a corner case in the new code, the `array(2)` is cached
632
+ # so replacing it invalidates the cache.
633
+ obj.append([np.array(2), baditem()])
634
+ with pytest.raises(RuntimeError):
635
+ np.array(obj)
636
+
637
+
638
+ class TestArrayLikes:
639
+ @pytest.mark.parametrize("arraylike", arraylikes())
640
+ def test_0d_object_special_case(self, arraylike):
641
+ arr = np.array(0.)
642
+ obj = arraylike(arr)
643
+ # A single array-like is always converted:
644
+ res = np.array(obj, dtype=object)
645
+ assert_array_equal(arr, res)
646
+
647
+ # But a single 0-D nested array-like never:
648
+ res = np.array([obj], dtype=object)
649
+ assert res[0] is obj
650
+
651
+ def test_0d_generic_special_case(self):
652
+ class ArraySubclass(np.ndarray):
653
+ def __float__(self):
654
+ raise TypeError("e.g. quantities raise on this")
655
+
656
+ arr = np.array(0.)
657
+ obj = arr.view(ArraySubclass)
658
+ res = np.array(obj)
659
+ # The subclass is simply cast:
660
+ assert_array_equal(arr, res)
661
+
662
+ # If the 0-D array-like is included, __float__ is currently
663
+ # guaranteed to be used. We may want to change that, quantities
664
+ # and masked arrays half make use of this.
665
+ with pytest.raises(TypeError):
666
+ np.array([obj])
667
+
668
+ # The same holds for memoryview:
669
+ obj = memoryview(arr)
670
+ res = np.array(obj)
671
+ assert_array_equal(arr, res)
672
+ with pytest.raises(ValueError):
673
+ # The error type does not matter much here.
674
+ np.array([obj])
675
+
676
+ def test_arraylike_classes(self):
677
+ # The classes of array-likes should generally be acceptable to be
678
+ # stored inside a numpy (object) array. This tests all of the
679
+ # special attributes (since all are checked during coercion).
680
+ arr = np.array(np.int64)
681
+ assert arr[()] is np.int64
682
+ arr = np.array([np.int64])
683
+ assert arr[0] is np.int64
684
+
685
+ # This also works for properties/unbound methods:
686
+ class ArrayLike:
687
+ @property
688
+ def __array_interface__(self):
689
+ pass
690
+
691
+ @property
692
+ def __array_struct__(self):
693
+ pass
694
+
695
+ def __array__(self):
696
+ pass
697
+
698
+ arr = np.array(ArrayLike)
699
+ assert arr[()] is ArrayLike
700
+ arr = np.array([ArrayLike])
701
+ assert arr[0] is ArrayLike
702
+
703
+ @pytest.mark.skipif(
704
+ np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
705
+ def test_too_large_array_error_paths(self):
706
+ """Test the error paths, including for memory leaks"""
707
+ arr = np.array(0, dtype="uint8")
708
+ # Guarantees that a contiguous copy won't work:
709
+ arr = np.broadcast_to(arr, 2**62)
710
+
711
+ for i in range(5):
712
+ # repeat, to ensure caching cannot have an effect:
713
+ with pytest.raises(MemoryError):
714
+ np.array(arr)
715
+ with pytest.raises(MemoryError):
716
+ np.array([arr])
717
+
718
+ @pytest.mark.parametrize("attribute",
719
+ ["__array_interface__", "__array__", "__array_struct__"])
720
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
721
+ def test_bad_array_like_attributes(self, attribute, error):
722
+ # RecursionError and MemoryError are considered fatal. All errors
723
+ # (except AttributeError) should probably be raised in the future,
724
+ # but shapely made use of it, so it will require a deprecation.
725
+
726
+ class BadInterface:
727
+ def __getattr__(self, attr):
728
+ if attr == attribute:
729
+ raise error
730
+ super().__getattr__(attr)
731
+
732
+ with pytest.raises(error):
733
+ np.array(BadInterface())
734
+
735
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
736
+ def test_bad_array_like_bad_length(self, error):
737
+ # RecursionError and MemoryError are considered "critical" in
738
+ # sequences. We could expand this more generally though. (NumPy 1.20)
739
+ class BadSequence:
740
+ def __len__(self):
741
+ raise error
742
+ def __getitem__(self):
743
+ # must have getitem to be a Sequence
744
+ return 1
745
+
746
+ with pytest.raises(error):
747
+ np.array(BadSequence())
748
+
749
+
750
+ class TestSpecialAttributeLookupFailure:
751
+ # An exception was raised while fetching the attribute
752
+
753
+ class WeirdArrayLike:
754
+ @property
755
+ def __array__(self):
756
+ raise RuntimeError("oops!")
757
+
758
+ class WeirdArrayInterface:
759
+ @property
760
+ def __array_interface__(self):
761
+ raise RuntimeError("oops!")
762
+
763
+ def test_deprecated(self):
764
+ with pytest.raises(RuntimeError):
765
+ np.array(self.WeirdArrayLike())
766
+ with pytest.raises(RuntimeError):
767
+ np.array(self.WeirdArrayInterface())
wemm/lib/python3.10/site-packages/numpy/core/tests/test_array_interface.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pytest
3
+ import numpy as np
4
+ from numpy.testing import extbuild
5
+
6
+
7
+ @pytest.fixture
8
+ def get_module(tmp_path):
9
+ """ Some codes to generate data and manage temporary buffers use when
10
+ sharing with numpy via the array interface protocol.
11
+ """
12
+
13
+ if not sys.platform.startswith('linux'):
14
+ pytest.skip('link fails on cygwin')
15
+
16
+ prologue = '''
17
+ #include <Python.h>
18
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
19
+ #include <numpy/arrayobject.h>
20
+ #include <stdio.h>
21
+ #include <math.h>
22
+
23
+ NPY_NO_EXPORT
24
+ void delete_array_struct(PyObject *cap) {
25
+
26
+ /* get the array interface structure */
27
+ PyArrayInterface *inter = (PyArrayInterface*)
28
+ PyCapsule_GetPointer(cap, NULL);
29
+
30
+ /* get the buffer by which data was shared */
31
+ double *ptr = (double*)PyCapsule_GetContext(cap);
32
+
33
+ /* for the purposes of the regression test set the elements
34
+ to nan */
35
+ for (npy_intp i = 0; i < inter->shape[0]; ++i)
36
+ ptr[i] = nan("");
37
+
38
+ /* free the shared buffer */
39
+ free(ptr);
40
+
41
+ /* free the array interface structure */
42
+ free(inter->shape);
43
+ free(inter);
44
+
45
+ fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
46
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
47
+ }
48
+ '''
49
+
50
+ functions = [
51
+ ("new_array_struct", "METH_VARARGS", """
52
+
53
+ long long n_elem = 0;
54
+ double value = 0.0;
55
+
56
+ if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
57
+ Py_RETURN_NONE;
58
+ }
59
+
60
+ /* allocate and initialize the data to share with numpy */
61
+ long long n_bytes = n_elem*sizeof(double);
62
+ double *data = (double*)malloc(n_bytes);
63
+
64
+ if (!data) {
65
+ PyErr_Format(PyExc_MemoryError,
66
+ "Failed to malloc %lld bytes", n_bytes);
67
+
68
+ Py_RETURN_NONE;
69
+ }
70
+
71
+ for (long long i = 0; i < n_elem; ++i) {
72
+ data[i] = value;
73
+ }
74
+
75
+ /* calculate the shape and stride */
76
+ int nd = 1;
77
+
78
+ npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
79
+ npy_intp *shape = ss;
80
+ npy_intp *stride = ss + nd;
81
+
82
+ shape[0] = n_elem;
83
+ stride[0] = sizeof(double);
84
+
85
+ /* construct the array interface */
86
+ PyArrayInterface *inter = (PyArrayInterface*)
87
+ malloc(sizeof(PyArrayInterface));
88
+
89
+ memset(inter, 0, sizeof(PyArrayInterface));
90
+
91
+ inter->two = 2;
92
+ inter->nd = nd;
93
+ inter->typekind = 'f';
94
+ inter->itemsize = sizeof(double);
95
+ inter->shape = shape;
96
+ inter->strides = stride;
97
+ inter->data = data;
98
+ inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
99
+ NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
100
+
101
+ /* package into a capsule */
102
+ PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
103
+
104
+ /* save the pointer to the data */
105
+ PyCapsule_SetContext(cap, data);
106
+
107
+ fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
108
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)data);
109
+
110
+ return cap;
111
+ """)
112
+ ]
113
+
114
+ more_init = "import_array();"
115
+
116
+ try:
117
+ import array_interface_testing
118
+ return array_interface_testing
119
+ except ImportError:
120
+ pass
121
+
122
+ # if it does not exist, build and load it
123
+ return extbuild.build_and_import_extension('array_interface_testing',
124
+ functions,
125
+ prologue=prologue,
126
+ include_dirs=[np.get_include()],
127
+ build_dir=tmp_path,
128
+ more_init=more_init)
129
+
130
+
131
+ @pytest.mark.slow
132
+ def test_cstruct(get_module):
133
+
134
+ class data_source:
135
+ """
136
+ This class is for testing the timing of the PyCapsule destructor
137
+ invoked when numpy release its reference to the shared data as part of
138
+ the numpy array interface protocol. If the PyCapsule destructor is
139
+ called early the shared data is freed and invlaid memory accesses will
140
+ occur.
141
+ """
142
+
143
+ def __init__(self, size, value):
144
+ self.size = size
145
+ self.value = value
146
+
147
+ @property
148
+ def __array_struct__(self):
149
+ return get_module.new_array_struct(self.size, self.value)
150
+
151
+ # write to the same stream as the C code
152
+ stderr = sys.__stderr__
153
+
154
+ # used to validate the shared data.
155
+ expected_value = -3.1415
156
+ multiplier = -10000.0
157
+
158
+ # create some data to share with numpy via the array interface
159
+ # assign the data an expected value.
160
+ stderr.write(' ---- create an object to share data ---- \n')
161
+ buf = data_source(256, expected_value)
162
+ stderr.write(' ---- OK!\n\n')
163
+
164
+ # share the data
165
+ stderr.write(' ---- share data via the array interface protocol ---- \n')
166
+ arr = np.array(buf, copy=False)
167
+ stderr.write('arr.__array_interface___ = %s\n' % (
168
+ str(arr.__array_interface__)))
169
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
170
+ stderr.write(' ---- OK!\n\n')
171
+
172
+ # release the source of the shared data. this will not release the data
173
+ # that was shared with numpy, that is done in the PyCapsule destructor.
174
+ stderr.write(' ---- destroy the object that shared data ---- \n')
175
+ buf = None
176
+ stderr.write(' ---- OK!\n\n')
177
+
178
+ # check that we got the expected data. If the PyCapsule destructor we
179
+ # defined was prematurely called then this test will fail because our
180
+ # destructor sets the elements of the array to NaN before free'ing the
181
+ # buffer. Reading the values here may also cause a SEGV
182
+ assert np.allclose(arr, expected_value)
183
+
184
+ # read the data. If the PyCapsule destructor we defined was prematurely
185
+ # called then reading the values here may cause a SEGV and will be reported
186
+ # as invalid reads by valgrind
187
+ stderr.write(' ---- read shared data ---- \n')
188
+ stderr.write('arr = %s\n' % (str(arr)))
189
+ stderr.write(' ---- OK!\n\n')
190
+
191
+ # write to the shared buffer. If the shared data was prematurely deleted
192
+ # this will may cause a SEGV and valgrind will report invalid writes
193
+ stderr.write(' ---- modify shared data ---- \n')
194
+ arr *= multiplier
195
+ expected_value *= multiplier
196
+ stderr.write('arr.__array_interface___ = %s\n' % (
197
+ str(arr.__array_interface__)))
198
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
199
+ stderr.write(' ---- OK!\n\n')
200
+
201
+ # read the data. If the shared data was prematurely deleted this
202
+ # will may cause a SEGV and valgrind will report invalid reads
203
+ stderr.write(' ---- read modified shared data ---- \n')
204
+ stderr.write('arr = %s\n' % (str(arr)))
205
+ stderr.write(' ---- OK!\n\n')
206
+
207
+ # check that we got the expected data. If the PyCapsule destructor we
208
+ # defined was prematurely called then this test will fail because our
209
+ # destructor sets the elements of the array to NaN before free'ing the
210
+ # buffer. Reading the values here may also cause a SEGV
211
+ assert np.allclose(arr, expected_value)
212
+
213
+ # free the shared data, the PyCapsule destructor should run here
214
+ stderr.write(' ---- free shared data ---- \n')
215
+ arr = None
216
+ stderr.write(' ---- OK!\n\n')
wemm/lib/python3.10/site-packages/numpy/core/tests/test_arraymethod.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file tests the generic aspects of ArrayMethod. At the time of writing
3
+ this is private API, but when added, public API may be added here.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import sys
9
+ import types
10
+ from typing import Any
11
+
12
+ import pytest
13
+
14
+ import numpy as np
15
+ from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
16
+
17
+
18
+ class TestResolveDescriptors:
19
+ # Test mainly error paths of the resolve_descriptors function,
20
+ # note that the `casting_unittests` tests exercise this non-error paths.
21
+
22
+ # Casting implementations are the main/only current user:
23
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
24
+
25
+ @pytest.mark.parametrize("args", [
26
+ (True,), # Not a tuple.
27
+ ((None,)), # Too few elements
28
+ ((None, None, None),), # Too many
29
+ ((None, None),), # Input dtype is None, which is invalid.
30
+ ((np.dtype("d"), True),), # Output dtype is not a dtype
31
+ ((np.dtype("f"), None),), # Input dtype does not match method
32
+ ])
33
+ def test_invalid_arguments(self, args):
34
+ with pytest.raises(TypeError):
35
+ self.method._resolve_descriptors(*args)
36
+
37
+
38
+ class TestSimpleStridedCall:
39
+ # Test mainly error paths of the resolve_descriptors function,
40
+ # note that the `casting_unittests` tests exercise this non-error paths.
41
+
42
+ # Casting implementations are the main/only current user:
43
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
44
+
45
+ @pytest.mark.parametrize(["args", "error"], [
46
+ ((True,), TypeError), # Not a tuple
47
+ (((None,),), TypeError), # Too few elements
48
+ ((None, None), TypeError), # Inputs are not arrays.
49
+ (((None, None, None),), TypeError), # Too many
50
+ (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
51
+ (((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
52
+ TypeError), # Does not support byte-swapping
53
+ (((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
54
+ ValueError), # not 1-D
55
+ (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
56
+ ValueError), # different length
57
+ (((np.frombuffer(b"\0x00"*3*2, dtype="d"),
58
+ np.frombuffer(b"\0x00"*3, dtype="f")),),
59
+ ValueError), # output not writeable
60
+ ])
61
+ def test_invalid_arguments(self, args, error):
62
+ # This is private API, which may be modified freely
63
+ with pytest.raises(error):
64
+ self.method._simple_strided_call(*args)
65
+
66
+
67
+ @pytest.mark.skipif(sys.version_info < (3, 9), reason="Requires python 3.9")
68
+ @pytest.mark.parametrize(
69
+ "cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
70
+ )
71
+ class TestClassGetItem:
72
+ def test_class_getitem(self, cls: type[np.ndarray]) -> None:
73
+ """Test `ndarray.__class_getitem__`."""
74
+ alias = cls[Any, Any]
75
+ assert isinstance(alias, types.GenericAlias)
76
+ assert alias.__origin__ is cls
77
+
78
+ @pytest.mark.parametrize("arg_len", range(4))
79
+ def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
80
+ arg_tup = (Any,) * arg_len
81
+ if arg_len in (1, 2):
82
+ assert cls[arg_tup]
83
+ else:
84
+ match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
85
+ with pytest.raises(TypeError, match=match):
86
+ cls[arg_tup]
87
+
88
+
89
+ @pytest.mark.skipif(sys.version_info >= (3, 9), reason="Requires python 3.8")
90
+ def test_class_getitem_38() -> None:
91
+ match = "Type subscription requires python >= 3.9"
92
+ with pytest.raises(TypeError, match=match):
93
+ np.ndarray[Any, Any]
wemm/lib/python3.10/site-packages/numpy/core/tests/test_arrayprint.py ADDED
@@ -0,0 +1,967 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import gc
3
+ from hypothesis import given
4
+ from hypothesis.extra import numpy as hynp
5
+ import pytest
6
+
7
+ import numpy as np
8
+ from numpy.testing import (
9
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
10
+ assert_raises_regex,
11
+ )
12
+ import textwrap
13
+
14
+ class TestArrayRepr:
15
+ def test_nan_inf(self):
16
+ x = np.array([np.nan, np.inf])
17
+ assert_equal(repr(x), 'array([nan, inf])')
18
+
19
+ def test_subclass(self):
20
+ class sub(np.ndarray): pass
21
+
22
+ # one dimensional
23
+ x1d = np.array([1, 2]).view(sub)
24
+ assert_equal(repr(x1d), 'sub([1, 2])')
25
+
26
+ # two dimensional
27
+ x2d = np.array([[1, 2], [3, 4]]).view(sub)
28
+ assert_equal(repr(x2d),
29
+ 'sub([[1, 2],\n'
30
+ ' [3, 4]])')
31
+
32
+ # two dimensional with flexible dtype
33
+ xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
34
+ assert_equal(repr(xstruct),
35
+ "sub([[(1,), (1,)],\n"
36
+ " [(1,), (1,)]], dtype=[('a', '<i4')])"
37
+ )
38
+
39
+ @pytest.mark.xfail(reason="See gh-10544")
40
+ def test_object_subclass(self):
41
+ class sub(np.ndarray):
42
+ def __new__(cls, inp):
43
+ obj = np.asarray(inp).view(cls)
44
+ return obj
45
+
46
+ def __getitem__(self, ind):
47
+ ret = super().__getitem__(ind)
48
+ return sub(ret)
49
+
50
+ # test that object + subclass is OK:
51
+ x = sub([None, None])
52
+ assert_equal(repr(x), 'sub([None, None], dtype=object)')
53
+ assert_equal(str(x), '[None None]')
54
+
55
+ x = sub([None, sub([None, None])])
56
+ assert_equal(repr(x),
57
+ 'sub([None, sub([None, None], dtype=object)], dtype=object)')
58
+ assert_equal(str(x), '[None sub([None, None], dtype=object)]')
59
+
60
+ def test_0d_object_subclass(self):
61
+ # make sure that subclasses which return 0ds instead
62
+ # of scalars don't cause infinite recursion in str
63
+ class sub(np.ndarray):
64
+ def __new__(cls, inp):
65
+ obj = np.asarray(inp).view(cls)
66
+ return obj
67
+
68
+ def __getitem__(self, ind):
69
+ ret = super().__getitem__(ind)
70
+ return sub(ret)
71
+
72
+ x = sub(1)
73
+ assert_equal(repr(x), 'sub(1)')
74
+ assert_equal(str(x), '1')
75
+
76
+ x = sub([1, 1])
77
+ assert_equal(repr(x), 'sub([1, 1])')
78
+ assert_equal(str(x), '[1 1]')
79
+
80
+ # check it works properly with object arrays too
81
+ x = sub(None)
82
+ assert_equal(repr(x), 'sub(None, dtype=object)')
83
+ assert_equal(str(x), 'None')
84
+
85
+ # plus recursive object arrays (even depth > 1)
86
+ y = sub(None)
87
+ x[()] = y
88
+ y[()] = x
89
+ assert_equal(repr(x),
90
+ 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
91
+ assert_equal(str(x), '...')
92
+ x[()] = 0 # resolve circular references for garbage collector
93
+
94
+ # nested 0d-subclass-object
95
+ x = sub(None)
96
+ x[()] = sub(None)
97
+ assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
98
+ assert_equal(str(x), 'None')
99
+
100
+ # gh-10663
101
+ class DuckCounter(np.ndarray):
102
+ def __getitem__(self, item):
103
+ result = super().__getitem__(item)
104
+ if not isinstance(result, DuckCounter):
105
+ result = result[...].view(DuckCounter)
106
+ return result
107
+
108
+ def to_string(self):
109
+ return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
110
+
111
+ def __str__(self):
112
+ if self.shape == ():
113
+ return self.to_string()
114
+ else:
115
+ fmt = {'all': lambda x: x.to_string()}
116
+ return np.array2string(self, formatter=fmt)
117
+
118
+ dc = np.arange(5).view(DuckCounter)
119
+ assert_equal(str(dc), "[zero one two many many]")
120
+ assert_equal(str(dc[0]), "zero")
121
+
122
+ def test_self_containing(self):
123
+ arr0d = np.array(None)
124
+ arr0d[()] = arr0d
125
+ assert_equal(repr(arr0d),
126
+ 'array(array(..., dtype=object), dtype=object)')
127
+ arr0d[()] = 0 # resolve recursion for garbage collector
128
+
129
+ arr1d = np.array([None, None])
130
+ arr1d[1] = arr1d
131
+ assert_equal(repr(arr1d),
132
+ 'array([None, array(..., dtype=object)], dtype=object)')
133
+ arr1d[1] = 0 # resolve recursion for garbage collector
134
+
135
+ first = np.array(None)
136
+ second = np.array(None)
137
+ first[()] = second
138
+ second[()] = first
139
+ assert_equal(repr(first),
140
+ 'array(array(array(..., dtype=object), dtype=object), dtype=object)')
141
+ first[()] = 0 # resolve circular references for garbage collector
142
+
143
+ def test_containing_list(self):
144
+ # printing square brackets directly would be ambiguuous
145
+ arr1d = np.array([None, None])
146
+ arr1d[0] = [1, 2]
147
+ arr1d[1] = [3]
148
+ assert_equal(repr(arr1d),
149
+ 'array([list([1, 2]), list([3])], dtype=object)')
150
+
151
+ def test_void_scalar_recursion(self):
152
+ # gh-9345
153
+ repr(np.void(b'test')) # RecursionError ?
154
+
155
+ def test_fieldless_structured(self):
156
+ # gh-10366
157
+ no_fields = np.dtype([])
158
+ arr_no_fields = np.empty(4, dtype=no_fields)
159
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
160
+
161
+
162
+ class TestComplexArray:
163
+ def test_str(self):
164
+ rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
165
+ cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
166
+ dtypes = [np.complex64, np.cdouble, np.clongdouble]
167
+ actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
168
+ wanted = [
169
+ '[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
170
+ '[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
171
+ '[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
172
+ '[0.+infj]', '[0.+infj]', '[0.+infj]',
173
+ '[0.-infj]', '[0.-infj]', '[0.-infj]',
174
+ '[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
175
+ '[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
176
+ '[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
177
+ '[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
178
+ '[1.+infj]', '[1.+infj]', '[1.+infj]',
179
+ '[1.-infj]', '[1.-infj]', '[1.-infj]',
180
+ '[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
181
+ '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
182
+ '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
183
+ '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
184
+ '[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
185
+ '[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
186
+ '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
187
+ '[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
188
+ '[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
189
+ '[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
190
+ '[inf+infj]', '[inf+infj]', '[inf+infj]',
191
+ '[inf-infj]', '[inf-infj]', '[inf-infj]',
192
+ '[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
193
+ '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
194
+ '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
195
+ '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
196
+ '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
197
+ '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
198
+ '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
199
+ '[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
200
+ '[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
201
+ '[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
202
+ '[nan+infj]', '[nan+infj]', '[nan+infj]',
203
+ '[nan-infj]', '[nan-infj]', '[nan-infj]',
204
+ '[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
205
+
206
+ for res, val in zip(actual, wanted):
207
+ assert_equal(res, val)
208
+
209
+ class TestArray2String:
210
+ def test_basic(self):
211
+ """Basic test of array2string."""
212
+ a = np.arange(3)
213
+ assert_(np.array2string(a) == '[0 1 2]')
214
+ assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
215
+ assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
216
+
217
+ def test_unexpected_kwarg(self):
218
+ # ensure than an appropriate TypeError
219
+ # is raised when array2string receives
220
+ # an unexpected kwarg
221
+
222
+ with assert_raises_regex(TypeError, 'nonsense'):
223
+ np.array2string(np.array([1, 2, 3]),
224
+ nonsense=None)
225
+
226
+ def test_format_function(self):
227
+ """Test custom format function for each element in array."""
228
+ def _format_function(x):
229
+ if np.abs(x) < 1:
230
+ return '.'
231
+ elif np.abs(x) < 2:
232
+ return 'o'
233
+ else:
234
+ return 'O'
235
+
236
+ x = np.arange(3)
237
+ x_hex = "[0x0 0x1 0x2]"
238
+ x_oct = "[0o0 0o1 0o2]"
239
+ assert_(np.array2string(x, formatter={'all':_format_function}) ==
240
+ "[. o O]")
241
+ assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
242
+ "[. o O]")
243
+ assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
244
+ "[0.0000 1.0000 2.0000]")
245
+ assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
246
+ x_hex)
247
+ assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
248
+ x_oct)
249
+
250
+ x = np.arange(3.)
251
+ assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
252
+ "[0.00 1.00 2.00]")
253
+ assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
254
+ "[0.00 1.00 2.00]")
255
+
256
+ s = np.array(['abc', 'def'])
257
+ assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
258
+ '[abcabc defdef]')
259
+
260
+
261
+ def test_structure_format(self):
262
+ dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
263
+ x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
264
+ assert_equal(np.array2string(x),
265
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
266
+
267
+ np.set_printoptions(legacy='1.13')
268
+ try:
269
+ # for issue #5692
270
+ A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
271
+ A[5:].fill(np.datetime64('NaT'))
272
+ assert_equal(
273
+ np.array2string(A),
274
+ textwrap.dedent("""\
275
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
276
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
277
+ ('NaT',) ('NaT',) ('NaT',)]""")
278
+ )
279
+ finally:
280
+ np.set_printoptions(legacy=False)
281
+
282
+ # same again, but with non-legacy behavior
283
+ assert_equal(
284
+ np.array2string(A),
285
+ textwrap.dedent("""\
286
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
287
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
288
+ ('1970-01-01T00:00:00',) ( 'NaT',)
289
+ ( 'NaT',) ( 'NaT',)
290
+ ( 'NaT',) ( 'NaT',)]""")
291
+ )
292
+
293
+ # and again, with timedeltas
294
+ A = np.full(10, 123456, dtype=[("A", "m8[s]")])
295
+ A[5:].fill(np.datetime64('NaT'))
296
+ assert_equal(
297
+ np.array2string(A),
298
+ textwrap.dedent("""\
299
+ [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
300
+ ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
301
+ )
302
+
303
+ # See #8160
304
+ struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
305
+ assert_equal(np.array2string(struct_int),
306
+ "[([ 1, -1],) ([123, 1],)]")
307
+ struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
308
+ dtype=[('B', 'i4', (2, 2))])
309
+ assert_equal(np.array2string(struct_2dint),
310
+ "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
311
+
312
+ # See #8172
313
+ array_scalar = np.array(
314
+ (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
315
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
316
+
317
+ def test_unstructured_void_repr(self):
318
+ a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
319
+ 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
320
+ assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
321
+ assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
322
+ assert_equal(repr(a),
323
+ r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
324
+ r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
325
+
326
+ assert_equal(eval(repr(a), vars(np)), a)
327
+ assert_equal(eval(repr(a[0]), vars(np)), a[0])
328
+
329
+ def test_edgeitems_kwarg(self):
330
+ # previously the global print options would be taken over the kwarg
331
+ arr = np.zeros(3, int)
332
+ assert_equal(
333
+ np.array2string(arr, edgeitems=1, threshold=0),
334
+ "[0 ... 0]"
335
+ )
336
+
337
+ def test_summarize_1d(self):
338
+ A = np.arange(1001)
339
+ strA = '[ 0 1 2 ... 998 999 1000]'
340
+ assert_equal(str(A), strA)
341
+
342
+ reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
343
+ assert_equal(repr(A), reprA)
344
+
345
+ def test_summarize_2d(self):
346
+ A = np.arange(1002).reshape(2, 501)
347
+ strA = '[[ 0 1 2 ... 498 499 500]\n' \
348
+ ' [ 501 502 503 ... 999 1000 1001]]'
349
+ assert_equal(str(A), strA)
350
+
351
+ reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
352
+ ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
353
+ assert_equal(repr(A), reprA)
354
+
355
+ def test_linewidth(self):
356
+ a = np.full(6, 1)
357
+
358
+ def make_str(a, width, **kw):
359
+ return np.array2string(a, separator="", max_line_width=width, **kw)
360
+
361
+ assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
362
+ assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
363
+ assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
364
+ ' 11]')
365
+
366
+ assert_equal(make_str(a, 8), '[111111]')
367
+ assert_equal(make_str(a, 7), '[11111\n'
368
+ ' 1]')
369
+ assert_equal(make_str(a, 5), '[111\n'
370
+ ' 111]')
371
+
372
+ b = a[None,None,:]
373
+
374
+ assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
375
+ assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
376
+ assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
377
+ ' 1]]]')
378
+
379
+ assert_equal(make_str(b, 12), '[[[111111]]]')
380
+ assert_equal(make_str(b, 9), '[[[111\n'
381
+ ' 111]]]')
382
+ assert_equal(make_str(b, 8), '[[[11\n'
383
+ ' 11\n'
384
+ ' 11]]]')
385
+
386
+ def test_wide_element(self):
387
+ a = np.array(['xxxxx'])
388
+ assert_equal(
389
+ np.array2string(a, max_line_width=5),
390
+ "['xxxxx']"
391
+ )
392
+ assert_equal(
393
+ np.array2string(a, max_line_width=5, legacy='1.13'),
394
+ "[ 'xxxxx']"
395
+ )
396
+
397
+ def test_multiline_repr(self):
398
+ class MultiLine:
399
+ def __repr__(self):
400
+ return "Line 1\nLine 2"
401
+
402
+ a = np.array([[None, MultiLine()], [MultiLine(), None]])
403
+
404
+ assert_equal(
405
+ np.array2string(a),
406
+ '[[None Line 1\n'
407
+ ' Line 2]\n'
408
+ ' [Line 1\n'
409
+ ' Line 2 None]]'
410
+ )
411
+ assert_equal(
412
+ np.array2string(a, max_line_width=5),
413
+ '[[None\n'
414
+ ' Line 1\n'
415
+ ' Line 2]\n'
416
+ ' [Line 1\n'
417
+ ' Line 2\n'
418
+ ' None]]'
419
+ )
420
+ assert_equal(
421
+ repr(a),
422
+ 'array([[None, Line 1\n'
423
+ ' Line 2],\n'
424
+ ' [Line 1\n'
425
+ ' Line 2, None]], dtype=object)'
426
+ )
427
+
428
+ class MultiLineLong:
429
+ def __repr__(self):
430
+ return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
431
+
432
+ a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
433
+ assert_equal(
434
+ repr(a),
435
+ 'array([[None, Line 1\n'
436
+ ' LooooooooooongestLine2\n'
437
+ ' LongerLine 3 ],\n'
438
+ ' [Line 1\n'
439
+ ' LooooooooooongestLine2\n'
440
+ ' LongerLine 3 , None]], dtype=object)'
441
+ )
442
+ assert_equal(
443
+ np.array_repr(a, 20),
444
+ 'array([[None,\n'
445
+ ' Line 1\n'
446
+ ' LooooooooooongestLine2\n'
447
+ ' LongerLine 3 ],\n'
448
+ ' [Line 1\n'
449
+ ' LooooooooooongestLine2\n'
450
+ ' LongerLine 3 ,\n'
451
+ ' None]],\n'
452
+ ' dtype=object)'
453
+ )
454
+
455
+ def test_nested_array_repr(self):
456
+ a = np.empty((2, 2), dtype=object)
457
+ a[0, 0] = np.eye(2)
458
+ a[0, 1] = np.eye(3)
459
+ a[1, 0] = None
460
+ a[1, 1] = np.ones((3, 1))
461
+ assert_equal(
462
+ repr(a),
463
+ 'array([[array([[1., 0.],\n'
464
+ ' [0., 1.]]), array([[1., 0., 0.],\n'
465
+ ' [0., 1., 0.],\n'
466
+ ' [0., 0., 1.]])],\n'
467
+ ' [None, array([[1.],\n'
468
+ ' [1.],\n'
469
+ ' [1.]])]], dtype=object)'
470
+ )
471
+
472
+ @given(hynp.from_dtype(np.dtype("U")))
473
+ def test_any_text(self, text):
474
+ # This test checks that, given any value that can be represented in an
475
+ # array of dtype("U") (i.e. unicode string), ...
476
+ a = np.array([text, text, text])
477
+ # casting a list of them to an array does not e.g. truncate the value
478
+ assert_equal(a[0], text)
479
+ # and that np.array2string puts a newline in the expected location
480
+ expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
481
+ result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
482
+ assert_equal(result, expected_repr)
483
+
484
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
485
+ def test_refcount(self):
486
+ # make sure we do not hold references to the array due to a recursive
487
+ # closure (gh-10620)
488
+ gc.disable()
489
+ a = np.arange(2)
490
+ r1 = sys.getrefcount(a)
491
+ np.array2string(a)
492
+ np.array2string(a)
493
+ r2 = sys.getrefcount(a)
494
+ gc.collect()
495
+ gc.enable()
496
+ assert_(r1 == r2)
497
+
498
+ class TestPrintOptions:
499
+ """Test getting and setting global print options."""
500
+
501
+ def setup_method(self):
502
+ self.oldopts = np.get_printoptions()
503
+
504
+ def teardown_method(self):
505
+ np.set_printoptions(**self.oldopts)
506
+
507
+ def test_basic(self):
508
+ x = np.array([1.5, 0, 1.234567890])
509
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
510
+ np.set_printoptions(precision=4)
511
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
512
+
513
+ def test_precision_zero(self):
514
+ np.set_printoptions(precision=0)
515
+ for values, string in (
516
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
517
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
518
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
519
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
520
+ x = np.array(values)
521
+ assert_equal(repr(x), "array([%s])" % string)
522
+
523
+ def test_formatter(self):
524
+ x = np.arange(3)
525
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
526
+ assert_equal(repr(x), "array([-1, 0, 1])")
527
+
528
+ def test_formatter_reset(self):
529
+ x = np.arange(3)
530
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
531
+ assert_equal(repr(x), "array([-1, 0, 1])")
532
+ np.set_printoptions(formatter={'int':None})
533
+ assert_equal(repr(x), "array([0, 1, 2])")
534
+
535
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
536
+ assert_equal(repr(x), "array([-1, 0, 1])")
537
+ np.set_printoptions(formatter={'all':None})
538
+ assert_equal(repr(x), "array([0, 1, 2])")
539
+
540
+ np.set_printoptions(formatter={'int':lambda x: str(x-1)})
541
+ assert_equal(repr(x), "array([-1, 0, 1])")
542
+ np.set_printoptions(formatter={'int_kind':None})
543
+ assert_equal(repr(x), "array([0, 1, 2])")
544
+
545
+ x = np.arange(3.)
546
+ np.set_printoptions(formatter={'float':lambda x: str(x-1)})
547
+ assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
548
+ np.set_printoptions(formatter={'float_kind':None})
549
+ assert_equal(repr(x), "array([0., 1., 2.])")
550
+
551
+ def test_0d_arrays(self):
552
+ assert_equal(str(np.array(u'café', '<U4')), u'café')
553
+
554
+ assert_equal(repr(np.array('café', '<U4')),
555
+ "array('café', dtype='<U4')")
556
+ assert_equal(str(np.array('test', np.str_)), 'test')
557
+
558
+ a = np.zeros(1, dtype=[('a', '<i4', (3,))])
559
+ assert_equal(str(a[0]), '([0, 0, 0],)')
560
+
561
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
562
+ "array('2005-02-25', dtype='datetime64[D]')")
563
+
564
+ assert_equal(repr(np.timedelta64('10', 'Y')[...]),
565
+ "array(10, dtype='timedelta64[Y]')")
566
+
567
+ # repr of 0d arrays is affected by printoptions
568
+ x = np.array(1)
569
+ np.set_printoptions(formatter={'all':lambda x: "test"})
570
+ assert_equal(repr(x), "array(test)")
571
+ # str is unaffected
572
+ assert_equal(str(x), "1")
573
+
574
+ # check `style` arg raises
575
+ assert_warns(DeprecationWarning, np.array2string,
576
+ np.array(1.), style=repr)
577
+ # but not in legacy mode
578
+ np.array2string(np.array(1.), style=repr, legacy='1.13')
579
+ # gh-10934 style was broken in legacy mode, check it works
580
+ np.array2string(np.array(1.), legacy='1.13')
581
+
582
+ def test_float_spacing(self):
583
+ x = np.array([1., 2., 3.])
584
+ y = np.array([1., 2., -10.])
585
+ z = np.array([100., 2., -1.])
586
+ w = np.array([-100., 2., 1.])
587
+
588
+ assert_equal(repr(x), 'array([1., 2., 3.])')
589
+ assert_equal(repr(y), 'array([ 1., 2., -10.])')
590
+ assert_equal(repr(np.array(y[0])), 'array(1.)')
591
+ assert_equal(repr(np.array(y[-1])), 'array(-10.)')
592
+ assert_equal(repr(z), 'array([100., 2., -1.])')
593
+ assert_equal(repr(w), 'array([-100., 2., 1.])')
594
+
595
+ assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
596
+ assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
597
+
598
+ x = np.array([np.inf, 100000, 1.1234])
599
+ y = np.array([np.inf, 100000, -1.1234])
600
+ z = np.array([np.inf, 1.1234, -1e120])
601
+ np.set_printoptions(precision=2)
602
+ assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
603
+ assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
604
+ assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
605
+
606
+ def test_bool_spacing(self):
607
+ assert_equal(repr(np.array([True, True])),
608
+ 'array([ True, True])')
609
+ assert_equal(repr(np.array([True, False])),
610
+ 'array([ True, False])')
611
+ assert_equal(repr(np.array([True])),
612
+ 'array([ True])')
613
+ assert_equal(repr(np.array(True)),
614
+ 'array(True)')
615
+ assert_equal(repr(np.array(False)),
616
+ 'array(False)')
617
+
618
+ def test_sign_spacing(self):
619
+ a = np.arange(4.)
620
+ b = np.array([1.234e9])
621
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
622
+
623
+ assert_equal(repr(a), 'array([0., 1., 2., 3.])')
624
+ assert_equal(repr(np.array(1.)), 'array(1.)')
625
+ assert_equal(repr(b), 'array([1.234e+09])')
626
+ assert_equal(repr(np.array([0.])), 'array([0.])')
627
+ assert_equal(repr(c),
628
+ "array([1. +1.j , 1.12345679+1.12345679j])")
629
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
630
+
631
+ np.set_printoptions(sign=' ')
632
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
633
+ assert_equal(repr(np.array(1.)), 'array( 1.)')
634
+ assert_equal(repr(b), 'array([ 1.234e+09])')
635
+ assert_equal(repr(c),
636
+ "array([ 1. +1.j , 1.12345679+1.12345679j])")
637
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
638
+
639
+ np.set_printoptions(sign='+')
640
+ assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
641
+ assert_equal(repr(np.array(1.)), 'array(+1.)')
642
+ assert_equal(repr(b), 'array([+1.234e+09])')
643
+ assert_equal(repr(c),
644
+ "array([+1. +1.j , +1.12345679+1.12345679j])")
645
+
646
+ np.set_printoptions(legacy='1.13')
647
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
648
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
649
+ assert_equal(repr(-b), 'array([ -1.23400000e+09])')
650
+ assert_equal(repr(np.array(1.)), 'array(1.0)')
651
+ assert_equal(repr(np.array([0.])), 'array([ 0.])')
652
+ assert_equal(repr(c),
653
+ "array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
654
+ # gh-10383
655
+ assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
656
+
657
+ assert_raises(TypeError, np.set_printoptions, wrongarg=True)
658
+
659
+ def test_float_overflow_nowarn(self):
660
+ # make sure internal computations in FloatingFormat don't
661
+ # warn about overflow
662
+ repr(np.array([1e4, 0.1], dtype='f2'))
663
+
664
+ def test_sign_spacing_structured(self):
665
+ a = np.ones(2, dtype='<f,<f')
666
+ assert_equal(repr(a),
667
+ "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
668
+ assert_equal(repr(a[0]), "(1., 1.)")
669
+
670
+ def test_floatmode(self):
671
+ x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
672
+ 0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
673
+ y = np.array([0.2918820979355541, 0.5064172631089138,
674
+ 0.2848750619642916, 0.4342965294660567,
675
+ 0.7326538397312751, 0.3459503329096204,
676
+ 0.0862072768214508, 0.39112753029631175],
677
+ dtype=np.float64)
678
+ z = np.arange(6, dtype=np.float16)/10
679
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
680
+
681
+ # also make sure 1e23 is right (is between two fp numbers)
682
+ w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
683
+ # note: we construct w from the strings `1eXX` instead of doing
684
+ # `10.**arange(24)` because it turns out the two are not equivalent in
685
+ # python. On some architectures `1e23 != 10.**23`.
686
+ wp = np.array([1.234e1, 1e2, 1e123])
687
+
688
+ # unique mode
689
+ np.set_printoptions(floatmode='unique')
690
+ assert_equal(repr(x),
691
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
692
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
693
+ assert_equal(repr(y),
694
+ "array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
695
+ " 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
696
+ " 0.0862072768214508 , 0.39112753029631175])")
697
+ assert_equal(repr(z),
698
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
699
+ assert_equal(repr(w),
700
+ "array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
701
+ " 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
702
+ " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
703
+ " 1.e+24])")
704
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
705
+ assert_equal(repr(c),
706
+ "array([1. +1.j , 1.123456789+1.123456789j])")
707
+
708
+ # maxprec mode, precision=8
709
+ np.set_printoptions(floatmode='maxprec', precision=8)
710
+ assert_equal(repr(x),
711
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
712
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
713
+ assert_equal(repr(y),
714
+ "array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
715
+ " 0.34595033, 0.08620728, 0.39112753])")
716
+ assert_equal(repr(z),
717
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
718
+ assert_equal(repr(w[::5]),
719
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
720
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
721
+ assert_equal(repr(c),
722
+ "array([1. +1.j , 1.12345679+1.12345679j])")
723
+
724
+ # fixed mode, precision=4
725
+ np.set_printoptions(floatmode='fixed', precision=4)
726
+ assert_equal(repr(x),
727
+ "array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
728
+ " 0.2383, 0.4226], dtype=float16)")
729
+ assert_equal(repr(y),
730
+ "array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
731
+ assert_equal(repr(z),
732
+ "array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
733
+ assert_equal(repr(w[::5]),
734
+ "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
735
+ assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
736
+ assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
737
+ assert_equal(repr(c),
738
+ "array([1.0000+1.0000j, 1.1235+1.1235j])")
739
+ # for larger precision, representation error becomes more apparent:
740
+ np.set_printoptions(floatmode='fixed', precision=8)
741
+ assert_equal(repr(z),
742
+ "array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
743
+ " 0.50000000], dtype=float16)")
744
+
745
+ # maxprec_equal mode, precision=8
746
+ np.set_printoptions(floatmode='maxprec_equal', precision=8)
747
+ assert_equal(repr(x),
748
+ "array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
749
+ " 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
750
+ assert_equal(repr(y),
751
+ "array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
752
+ " 0.34595033, 0.08620728, 0.39112753])")
753
+ assert_equal(repr(z),
754
+ "array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
755
+ assert_equal(repr(w[::5]),
756
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
757
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
758
+ assert_equal(repr(c),
759
+ "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
760
+
761
+ # test unique special case (gh-18609)
762
+ a = np.float64.fromhex('-1p-97')
763
+ assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
764
+
765
+ def test_legacy_mode_scalars(self):
766
+ # in legacy mode, str of floats get truncated, and complex scalars
767
+ # use * for non-finite imaginary part
768
+ np.set_printoptions(legacy='1.13')
769
+ assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
770
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
771
+
772
+ np.set_printoptions(legacy=False)
773
+ assert_equal(str(np.float64(1.123456789123456789)),
774
+ '1.1234567891234568')
775
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
776
+
777
+ def test_legacy_stray_comma(self):
778
+ np.set_printoptions(legacy='1.13')
779
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
780
+
781
+ np.set_printoptions(legacy=False)
782
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
783
+
784
+ def test_dtype_linewidth_wrapping(self):
785
+ np.set_printoptions(linewidth=75)
786
+ assert_equal(repr(np.arange(10,20., dtype='f4')),
787
+ "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
788
+ assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
789
+ array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
790
+ dtype=float32)"""))
791
+
792
+ styp = '<U4'
793
+ assert_equal(repr(np.ones(3, dtype=styp)),
794
+ "array(['1', '1', '1'], dtype='{}')".format(styp))
795
+ assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
796
+ array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
797
+ dtype='{}')""".format(styp)))
798
+
799
+ def test_linewidth_repr(self):
800
+ a = np.full(7, fill_value=2)
801
+ np.set_printoptions(linewidth=17)
802
+ assert_equal(
803
+ repr(a),
804
+ textwrap.dedent("""\
805
+ array([2, 2, 2,
806
+ 2, 2, 2,
807
+ 2])""")
808
+ )
809
+ np.set_printoptions(linewidth=17, legacy='1.13')
810
+ assert_equal(
811
+ repr(a),
812
+ textwrap.dedent("""\
813
+ array([2, 2, 2,
814
+ 2, 2, 2, 2])""")
815
+ )
816
+
817
+ a = np.full(8, fill_value=2)
818
+
819
+ np.set_printoptions(linewidth=18, legacy=False)
820
+ assert_equal(
821
+ repr(a),
822
+ textwrap.dedent("""\
823
+ array([2, 2, 2,
824
+ 2, 2, 2,
825
+ 2, 2])""")
826
+ )
827
+
828
+ np.set_printoptions(linewidth=18, legacy='1.13')
829
+ assert_equal(
830
+ repr(a),
831
+ textwrap.dedent("""\
832
+ array([2, 2, 2, 2,
833
+ 2, 2, 2, 2])""")
834
+ )
835
+
836
+ def test_linewidth_str(self):
837
+ a = np.full(18, fill_value=2)
838
+ np.set_printoptions(linewidth=18)
839
+ assert_equal(
840
+ str(a),
841
+ textwrap.dedent("""\
842
+ [2 2 2 2 2 2 2 2
843
+ 2 2 2 2 2 2 2 2
844
+ 2 2]""")
845
+ )
846
+ np.set_printoptions(linewidth=18, legacy='1.13')
847
+ assert_equal(
848
+ str(a),
849
+ textwrap.dedent("""\
850
+ [2 2 2 2 2 2 2 2 2
851
+ 2 2 2 2 2 2 2 2 2]""")
852
+ )
853
+
854
+ def test_edgeitems(self):
855
+ np.set_printoptions(edgeitems=1, threshold=1)
856
+ a = np.arange(27).reshape((3, 3, 3))
857
+ assert_equal(
858
+ repr(a),
859
+ textwrap.dedent("""\
860
+ array([[[ 0, ..., 2],
861
+ ...,
862
+ [ 6, ..., 8]],
863
+
864
+ ...,
865
+
866
+ [[18, ..., 20],
867
+ ...,
868
+ [24, ..., 26]]])""")
869
+ )
870
+
871
+ b = np.zeros((3, 3, 1, 1))
872
+ assert_equal(
873
+ repr(b),
874
+ textwrap.dedent("""\
875
+ array([[[[0.]],
876
+
877
+ ...,
878
+
879
+ [[0.]]],
880
+
881
+
882
+ ...,
883
+
884
+
885
+ [[[0.]],
886
+
887
+ ...,
888
+
889
+ [[0.]]]])""")
890
+ )
891
+
892
+ # 1.13 had extra trailing spaces, and was missing newlines
893
+ np.set_printoptions(legacy='1.13')
894
+
895
+ assert_equal(
896
+ repr(a),
897
+ textwrap.dedent("""\
898
+ array([[[ 0, ..., 2],
899
+ ...,
900
+ [ 6, ..., 8]],
901
+
902
+ ...,
903
+ [[18, ..., 20],
904
+ ...,
905
+ [24, ..., 26]]])""")
906
+ )
907
+
908
+ assert_equal(
909
+ repr(b),
910
+ textwrap.dedent("""\
911
+ array([[[[ 0.]],
912
+
913
+ ...,
914
+ [[ 0.]]],
915
+
916
+
917
+ ...,
918
+ [[[ 0.]],
919
+
920
+ ...,
921
+ [[ 0.]]]])""")
922
+ )
923
+
924
+ def test_bad_args(self):
925
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
926
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
927
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
928
+
929
+ assert_raises(TypeError, np.set_printoptions, precision='1')
930
+ assert_raises(TypeError, np.set_printoptions, precision=1.5)
931
+
932
+ def test_unicode_object_array():
933
+ expected = "array(['é'], dtype=object)"
934
+ x = np.array([u'\xe9'], dtype=object)
935
+ assert_equal(repr(x), expected)
936
+
937
+
938
+ class TestContextManager:
939
+ def test_ctx_mgr(self):
940
+ # test that context manager actually works
941
+ with np.printoptions(precision=2):
942
+ s = str(np.array([2.0]) / 3)
943
+ assert_equal(s, '[0.67]')
944
+
945
+ def test_ctx_mgr_restores(self):
946
+ # test that print options are actually restrored
947
+ opts = np.get_printoptions()
948
+ with np.printoptions(precision=opts['precision'] - 1,
949
+ linewidth=opts['linewidth'] - 4):
950
+ pass
951
+ assert_equal(np.get_printoptions(), opts)
952
+
953
+ def test_ctx_mgr_exceptions(self):
954
+ # test that print options are restored even if an exception is raised
955
+ opts = np.get_printoptions()
956
+ try:
957
+ with np.printoptions(precision=2, linewidth=11):
958
+ raise ValueError
959
+ except ValueError:
960
+ pass
961
+ assert_equal(np.get_printoptions(), opts)
962
+
963
+ def test_ctx_mgr_as_smth(self):
964
+ opts = {"precision": 2}
965
+ with np.printoptions(**opts) as ctx:
966
+ saved_opts = ctx.copy()
967
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
wemm/lib/python3.10/site-packages/numpy/core/tests/test_casting_unittests.py ADDED
@@ -0,0 +1,811 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The tests exercise the casting machinery in a more low-level manner.
3
+ The reason is mostly to test a new implementation of the casting machinery.
4
+
5
+ Unlike most tests in NumPy, these are closer to unit-tests rather
6
+ than integration tests.
7
+ """
8
+
9
+ import pytest
10
+ import textwrap
11
+ import enum
12
+ import random
13
+
14
+ import numpy as np
15
+ from numpy.lib.stride_tricks import as_strided
16
+
17
+ from numpy.testing import assert_array_equal
18
+ from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
19
+
20
+
21
+ # Simple skips object, parametric and long double (unsupported by struct)
22
+ simple_dtypes = "?bhilqBHILQefdFD"
23
+ if np.dtype("l").itemsize != np.dtype("q").itemsize:
24
+ # Remove l and L, the table was generated with 64bit linux in mind.
25
+ simple_dtypes = simple_dtypes.replace("l", "").replace("L", "")
26
+ simple_dtypes = [type(np.dtype(c)) for c in simple_dtypes]
27
+
28
+
29
+ def simple_dtype_instances():
30
+ for dtype_class in simple_dtypes:
31
+ dt = dtype_class()
32
+ yield pytest.param(dt, id=str(dt))
33
+ if dt.byteorder != "|":
34
+ dt = dt.newbyteorder()
35
+ yield pytest.param(dt, id=str(dt))
36
+
37
+
38
+ def get_expected_stringlength(dtype):
39
+ """Returns the string length when casting the basic dtypes to strings.
40
+ """
41
+ if dtype == np.bool_:
42
+ return 5
43
+ if dtype.kind in "iu":
44
+ if dtype.itemsize == 1:
45
+ length = 3
46
+ elif dtype.itemsize == 2:
47
+ length = 5
48
+ elif dtype.itemsize == 4:
49
+ length = 10
50
+ elif dtype.itemsize == 8:
51
+ length = 20
52
+ else:
53
+ raise AssertionError(f"did not find expected length for {dtype}")
54
+
55
+ if dtype.kind == "i":
56
+ length += 1 # adds one character for the sign
57
+
58
+ return length
59
+
60
+ # Note: Can't do dtype comparison for longdouble on windows
61
+ if dtype.char == "g":
62
+ return 48
63
+ elif dtype.char == "G":
64
+ return 48 * 2
65
+ elif dtype.kind == "f":
66
+ return 32 # also for half apparently.
67
+ elif dtype.kind == "c":
68
+ return 32 * 2
69
+
70
+ raise AssertionError(f"did not find expected length for {dtype}")
71
+
72
+
73
+ class Casting(enum.IntEnum):
74
+ no = 0
75
+ equiv = 1
76
+ safe = 2
77
+ same_kind = 3
78
+ unsafe = 4
79
+
80
+
81
+ def _get_cancast_table():
82
+ table = textwrap.dedent("""
83
+ X ? b h i l q B H I L Q e f d g F D G S U V O M m
84
+ ? # = = = = = = = = = = = = = = = = = = = = = . =
85
+ b . # = = = = . . . . . = = = = = = = = = = = . =
86
+ h . ~ # = = = . . . . . ~ = = = = = = = = = = . =
87
+ i . ~ ~ # = = . . . . . ~ ~ = = ~ = = = = = = . =
88
+ l . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
89
+ q . ~ ~ ~ # # . . . . . ~ ~ = = ~ = = = = = = . =
90
+ B . ~ = = = = # = = = = = = = = = = = = = = = . =
91
+ H . ~ ~ = = = ~ # = = = ~ = = = = = = = = = = . =
92
+ I . ~ ~ ~ = = ~ ~ # = = ~ ~ = = ~ = = = = = = . =
93
+ L . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
94
+ Q . ~ ~ ~ ~ ~ ~ ~ ~ # # ~ ~ = = ~ = = = = = = . ~
95
+ e . . . . . . . . . . . # = = = = = = = = = = . .
96
+ f . . . . . . . . . . . ~ # = = = = = = = = = . .
97
+ d . . . . . . . . . . . ~ ~ # = ~ = = = = = = . .
98
+ g . . . . . . . . . . . ~ ~ ~ # ~ ~ = = = = = . .
99
+ F . . . . . . . . . . . . . . . # = = = = = = . .
100
+ D . . . . . . . . . . . . . . . ~ # = = = = = . .
101
+ G . . . . . . . . . . . . . . . ~ ~ # = = = = . .
102
+ S . . . . . . . . . . . . . . . . . . # = = = . .
103
+ U . . . . . . . . . . . . . . . . . . . # = = . .
104
+ V . . . . . . . . . . . . . . . . . . . . # = . .
105
+ O . . . . . . . . . . . . . . . . . . . . = # . .
106
+ M . . . . . . . . . . . . . . . . . . . . = = # .
107
+ m . . . . . . . . . . . . . . . . . . . . = = . #
108
+ """).strip().split("\n")
109
+ dtypes = [type(np.dtype(c)) for c in table[0][2::2]]
110
+
111
+ convert_cast = {".": Casting.unsafe, "~": Casting.same_kind,
112
+ "=": Casting.safe, "#": Casting.equiv,
113
+ " ": -1}
114
+
115
+ cancast = {}
116
+ for from_dt, row in zip(dtypes, table[1:]):
117
+ cancast[from_dt] = {}
118
+ for to_dt, c in zip(dtypes, row[2::2]):
119
+ cancast[from_dt][to_dt] = convert_cast[c]
120
+
121
+ return cancast
122
+
123
+ CAST_TABLE = _get_cancast_table()
124
+
125
+
126
+ class TestChanges:
127
+ """
128
+ These test cases exercise some behaviour changes
129
+ """
130
+ @pytest.mark.parametrize("string", ["S", "U"])
131
+ @pytest.mark.parametrize("floating", ["e", "f", "d", "g"])
132
+ def test_float_to_string(self, floating, string):
133
+ assert np.can_cast(floating, string)
134
+ # 100 is long enough to hold any formatted floating
135
+ assert np.can_cast(floating, f"{string}100")
136
+
137
+ def test_to_void(self):
138
+ # But in general, we do consider these safe:
139
+ assert np.can_cast("d", "V")
140
+ assert np.can_cast("S20", "V")
141
+
142
+ # Do not consider it a safe cast if the void is too smaller:
143
+ assert not np.can_cast("d", "V1")
144
+ assert not np.can_cast("S20", "V1")
145
+ assert not np.can_cast("U1", "V1")
146
+ # Structured to unstructured is just like any other:
147
+ assert np.can_cast("d,i", "V", casting="same_kind")
148
+ # Unstructured void to unstructured is actually no cast at all:
149
+ assert np.can_cast("V3", "V", casting="no")
150
+ assert np.can_cast("V0", "V", casting="no")
151
+
152
+
153
+ class TestCasting:
154
+ size = 1500 # Best larger than NPY_LOWLEVEL_BUFFER_BLOCKSIZE * itemsize
155
+
156
+ def get_data(self, dtype1, dtype2):
157
+ if dtype2 is None or dtype1.itemsize >= dtype2.itemsize:
158
+ length = self.size // dtype1.itemsize
159
+ else:
160
+ length = self.size // dtype2.itemsize
161
+
162
+ # Assume that the base array is well enough aligned for all inputs.
163
+ arr1 = np.empty(length, dtype=dtype1)
164
+ assert arr1.flags.c_contiguous
165
+ assert arr1.flags.aligned
166
+
167
+ values = [random.randrange(-128, 128) for _ in range(length)]
168
+
169
+ for i, value in enumerate(values):
170
+ # Use item assignment to ensure this is not using casting:
171
+ arr1[i] = value
172
+
173
+ if dtype2 is None:
174
+ if dtype1.char == "?":
175
+ values = [bool(v) for v in values]
176
+ return arr1, values
177
+
178
+ if dtype2.char == "?":
179
+ values = [bool(v) for v in values]
180
+
181
+ arr2 = np.empty(length, dtype=dtype2)
182
+ assert arr2.flags.c_contiguous
183
+ assert arr2.flags.aligned
184
+
185
+ for i, value in enumerate(values):
186
+ # Use item assignment to ensure this is not using casting:
187
+ arr2[i] = value
188
+
189
+ return arr1, arr2, values
190
+
191
+ def get_data_variation(self, arr1, arr2, aligned=True, contig=True):
192
+ """
193
+ Returns a copy of arr1 that may be non-contiguous or unaligned, and a
194
+ matching array for arr2 (although not a copy).
195
+ """
196
+ if contig:
197
+ stride1 = arr1.dtype.itemsize
198
+ stride2 = arr2.dtype.itemsize
199
+ elif aligned:
200
+ stride1 = 2 * arr1.dtype.itemsize
201
+ stride2 = 2 * arr2.dtype.itemsize
202
+ else:
203
+ stride1 = arr1.dtype.itemsize + 1
204
+ stride2 = arr2.dtype.itemsize + 1
205
+
206
+ max_size1 = len(arr1) * 3 * arr1.dtype.itemsize + 1
207
+ max_size2 = len(arr2) * 3 * arr2.dtype.itemsize + 1
208
+ from_bytes = np.zeros(max_size1, dtype=np.uint8)
209
+ to_bytes = np.zeros(max_size2, dtype=np.uint8)
210
+
211
+ # Sanity check that the above is large enough:
212
+ assert stride1 * len(arr1) <= from_bytes.nbytes
213
+ assert stride2 * len(arr2) <= to_bytes.nbytes
214
+
215
+ if aligned:
216
+ new1 = as_strided(from_bytes[:-1].view(arr1.dtype),
217
+ arr1.shape, (stride1,))
218
+ new2 = as_strided(to_bytes[:-1].view(arr2.dtype),
219
+ arr2.shape, (stride2,))
220
+ else:
221
+ new1 = as_strided(from_bytes[1:].view(arr1.dtype),
222
+ arr1.shape, (stride1,))
223
+ new2 = as_strided(to_bytes[1:].view(arr2.dtype),
224
+ arr2.shape, (stride2,))
225
+
226
+ new1[...] = arr1
227
+
228
+ if not contig:
229
+ # Ensure we did not overwrite bytes that should not be written:
230
+ offset = arr1.dtype.itemsize if aligned else 0
231
+ buf = from_bytes[offset::stride1].tobytes()
232
+ assert buf.count(b"\0") == len(buf)
233
+
234
+ if contig:
235
+ assert new1.flags.c_contiguous
236
+ assert new2.flags.c_contiguous
237
+ else:
238
+ assert not new1.flags.c_contiguous
239
+ assert not new2.flags.c_contiguous
240
+
241
+ if aligned:
242
+ assert new1.flags.aligned
243
+ assert new2.flags.aligned
244
+ else:
245
+ assert not new1.flags.aligned or new1.dtype.alignment == 1
246
+ assert not new2.flags.aligned or new2.dtype.alignment == 1
247
+
248
+ return new1, new2
249
+
250
+ @pytest.mark.parametrize("from_Dt", simple_dtypes)
251
+ def test_simple_cancast(self, from_Dt):
252
+ for to_Dt in simple_dtypes:
253
+ cast = get_castingimpl(from_Dt, to_Dt)
254
+
255
+ for from_dt in [from_Dt(), from_Dt().newbyteorder()]:
256
+ default = cast._resolve_descriptors((from_dt, None))[1][1]
257
+ assert default == to_Dt()
258
+ del default
259
+
260
+ for to_dt in [to_Dt(), to_Dt().newbyteorder()]:
261
+ casting, (from_res, to_res), view_off = (
262
+ cast._resolve_descriptors((from_dt, to_dt)))
263
+ assert(type(from_res) == from_Dt)
264
+ assert(type(to_res) == to_Dt)
265
+ if view_off is not None:
266
+ # If a view is acceptable, this is "no" casting
267
+ # and byte order must be matching.
268
+ assert casting == Casting.no
269
+ # The above table lists this as "equivalent"
270
+ assert Casting.equiv == CAST_TABLE[from_Dt][to_Dt]
271
+ # Note that to_res may not be the same as from_dt
272
+ assert from_res.isnative == to_res.isnative
273
+ else:
274
+ if from_Dt == to_Dt:
275
+ # Note that to_res may not be the same as from_dt
276
+ assert from_res.isnative != to_res.isnative
277
+ assert casting == CAST_TABLE[from_Dt][to_Dt]
278
+
279
+ if from_Dt is to_Dt:
280
+ assert(from_dt is from_res)
281
+ assert(to_dt is to_res)
282
+
283
+
284
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
285
+ @pytest.mark.parametrize("from_dt", simple_dtype_instances())
286
+ def test_simple_direct_casts(self, from_dt):
287
+ """
288
+ This test checks numeric direct casts for dtypes supported also by the
289
+ struct module (plus complex). It tries to be test a wide range of
290
+ inputs, but skips over possibly undefined behaviour (e.g. int rollover).
291
+ Longdouble and CLongdouble are tested, but only using double precision.
292
+
293
+ If this test creates issues, it should possibly just be simplified
294
+ or even removed (checking whether unaligned/non-contiguous casts give
295
+ the same results is useful, though).
296
+ """
297
+ for to_dt in simple_dtype_instances():
298
+ to_dt = to_dt.values[0]
299
+ cast = get_castingimpl(type(from_dt), type(to_dt))
300
+
301
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
302
+ (from_dt, to_dt))
303
+
304
+ if from_res is not from_dt or to_res is not to_dt:
305
+ # Do not test this case, it is handled in multiple steps,
306
+ # each of which should is tested individually.
307
+ return
308
+
309
+ safe = casting <= Casting.safe
310
+ del from_res, to_res, casting
311
+
312
+ arr1, arr2, values = self.get_data(from_dt, to_dt)
313
+
314
+ cast._simple_strided_call((arr1, arr2))
315
+
316
+ # Check via python list
317
+ assert arr2.tolist() == values
318
+
319
+ # Check that the same results are achieved for strided loops
320
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
321
+ cast._simple_strided_call((arr1_o, arr2_o))
322
+
323
+ assert_array_equal(arr2_o, arr2)
324
+ assert arr2_o.tobytes() == arr2.tobytes()
325
+
326
+ # Check if alignment makes a difference, but only if supported
327
+ # and only if the alignment can be wrong
328
+ if ((from_dt.alignment == 1 and to_dt.alignment == 1) or
329
+ not cast._supports_unaligned):
330
+ return
331
+
332
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, True)
333
+ cast._simple_strided_call((arr1_o, arr2_o))
334
+
335
+ assert_array_equal(arr2_o, arr2)
336
+ assert arr2_o.tobytes() == arr2.tobytes()
337
+
338
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, False, False)
339
+ cast._simple_strided_call((arr1_o, arr2_o))
340
+
341
+ assert_array_equal(arr2_o, arr2)
342
+ assert arr2_o.tobytes() == arr2.tobytes()
343
+
344
+ del arr1_o, arr2_o, cast
345
+
346
+ @pytest.mark.parametrize("from_Dt", simple_dtypes)
347
+ def test_numeric_to_times(self, from_Dt):
348
+ # We currently only implement contiguous loops, so only need to
349
+ # test those.
350
+ from_dt = from_Dt()
351
+
352
+ time_dtypes = [np.dtype("M8"), np.dtype("M8[ms]"), np.dtype("M8[4D]"),
353
+ np.dtype("m8"), np.dtype("m8[ms]"), np.dtype("m8[4D]")]
354
+ for time_dt in time_dtypes:
355
+ cast = get_castingimpl(type(from_dt), type(time_dt))
356
+
357
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
358
+ (from_dt, time_dt))
359
+
360
+ assert from_res is from_dt
361
+ assert to_res is time_dt
362
+ del from_res, to_res
363
+
364
+ assert casting & CAST_TABLE[from_Dt][type(time_dt)]
365
+ assert view_off is None
366
+
367
+ int64_dt = np.dtype(np.int64)
368
+ arr1, arr2, values = self.get_data(from_dt, int64_dt)
369
+ arr2 = arr2.view(time_dt)
370
+ arr2[...] = np.datetime64("NaT")
371
+
372
+ if time_dt == np.dtype("M8"):
373
+ # This is a bit of a strange path, and could probably be removed
374
+ arr1[-1] = 0 # ensure at least one value is not NaT
375
+
376
+ # The cast currently succeeds, but the values are invalid:
377
+ cast._simple_strided_call((arr1, arr2))
378
+ with pytest.raises(ValueError):
379
+ str(arr2[-1]) # e.g. conversion to string fails
380
+ return
381
+
382
+ cast._simple_strided_call((arr1, arr2))
383
+
384
+ assert [int(v) for v in arr2.tolist()] == values
385
+
386
+ # Check that the same results are achieved for strided loops
387
+ arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
388
+ cast._simple_strided_call((arr1_o, arr2_o))
389
+
390
+ assert_array_equal(arr2_o, arr2)
391
+ assert arr2_o.tobytes() == arr2.tobytes()
392
+
393
+ @pytest.mark.parametrize(
394
+ ["from_dt", "to_dt", "expected_casting", "expected_view_off",
395
+ "nom", "denom"],
396
+ [("M8[ns]", None, Casting.no, 0, 1, 1),
397
+ (str(np.dtype("M8[ns]").newbyteorder()), None,
398
+ Casting.equiv, None, 1, 1),
399
+ ("M8", "M8[ms]", Casting.safe, 0, 1, 1),
400
+ # should be invalid cast:
401
+ ("M8[ms]", "M8", Casting.unsafe, None, 1, 1),
402
+ ("M8[5ms]", "M8[5ms]", Casting.no, 0, 1, 1),
403
+ ("M8[ns]", "M8[ms]", Casting.same_kind, None, 1, 10**6),
404
+ ("M8[ms]", "M8[ns]", Casting.safe, None, 10**6, 1),
405
+ ("M8[ms]", "M8[7ms]", Casting.same_kind, None, 1, 7),
406
+ ("M8[4D]", "M8[1M]", Casting.same_kind, None, None,
407
+ # give full values based on NumPy 1.19.x
408
+ [-2**63, 0, -1, 1314, -1315, 564442610]),
409
+ ("m8[ns]", None, Casting.no, 0, 1, 1),
410
+ (str(np.dtype("m8[ns]").newbyteorder()), None,
411
+ Casting.equiv, None, 1, 1),
412
+ ("m8", "m8[ms]", Casting.safe, 0, 1, 1),
413
+ # should be invalid cast:
414
+ ("m8[ms]", "m8", Casting.unsafe, None, 1, 1),
415
+ ("m8[5ms]", "m8[5ms]", Casting.no, 0, 1, 1),
416
+ ("m8[ns]", "m8[ms]", Casting.same_kind, None, 1, 10**6),
417
+ ("m8[ms]", "m8[ns]", Casting.safe, None, 10**6, 1),
418
+ ("m8[ms]", "m8[7ms]", Casting.same_kind, None, 1, 7),
419
+ ("m8[4D]", "m8[1M]", Casting.unsafe, None, None,
420
+ # give full values based on NumPy 1.19.x
421
+ [-2**63, 0, 0, 1314, -1315, 564442610])])
422
+ def test_time_to_time(self, from_dt, to_dt,
423
+ expected_casting, expected_view_off,
424
+ nom, denom):
425
+ from_dt = np.dtype(from_dt)
426
+ if to_dt is not None:
427
+ to_dt = np.dtype(to_dt)
428
+
429
+ # Test a few values for casting (results generated with NumPy 1.19)
430
+ values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
431
+ values = values.astype(np.dtype("int64").newbyteorder(from_dt.byteorder))
432
+ assert values.dtype.byteorder == from_dt.byteorder
433
+ assert np.isnat(values.view(from_dt)[0])
434
+
435
+ DType = type(from_dt)
436
+ cast = get_castingimpl(DType, DType)
437
+ casting, (from_res, to_res), view_off = cast._resolve_descriptors(
438
+ (from_dt, to_dt))
439
+ assert from_res is from_dt
440
+ assert to_res is to_dt or to_dt is None
441
+ assert casting == expected_casting
442
+ assert view_off == expected_view_off
443
+
444
+ if nom is not None:
445
+ expected_out = (values * nom // denom).view(to_res)
446
+ expected_out[0] = "NaT"
447
+ else:
448
+ expected_out = np.empty_like(values)
449
+ expected_out[...] = denom
450
+ expected_out = expected_out.view(to_dt)
451
+
452
+ orig_arr = values.view(from_dt)
453
+ orig_out = np.empty_like(expected_out)
454
+
455
+ if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
456
+ # Casting from non-generic to generic units is an error and should
457
+ # probably be reported as an invalid cast earlier.
458
+ with pytest.raises(ValueError):
459
+ cast._simple_strided_call((orig_arr, orig_out))
460
+ return
461
+
462
+ for aligned in [True, True]:
463
+ for contig in [True, True]:
464
+ arr, out = self.get_data_variation(
465
+ orig_arr, orig_out, aligned, contig)
466
+ out[...] = 0
467
+ cast._simple_strided_call((arr, out))
468
+ assert_array_equal(out.view("int64"), expected_out.view("int64"))
469
+
470
+ def string_with_modified_length(self, dtype, change_length):
471
+ fact = 1 if dtype.char == "S" else 4
472
+ length = dtype.itemsize // fact + change_length
473
+ return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
474
+
475
+ @pytest.mark.parametrize("other_DT", simple_dtypes)
476
+ @pytest.mark.parametrize("string_char", ["S", "U"])
477
+ def test_string_cancast(self, other_DT, string_char):
478
+ fact = 1 if string_char == "S" else 4
479
+
480
+ string_DT = type(np.dtype(string_char))
481
+ cast = get_castingimpl(other_DT, string_DT)
482
+
483
+ other_dt = other_DT()
484
+ expected_length = get_expected_stringlength(other_dt)
485
+ string_dt = np.dtype(f"{string_char}{expected_length}")
486
+
487
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
488
+ (other_dt, None))
489
+ assert res_dt.itemsize == expected_length * fact
490
+ assert safety == Casting.safe # we consider to string casts "safe"
491
+ assert view_off is None
492
+ assert isinstance(res_dt, string_DT)
493
+
494
+ # These casts currently implement changing the string length, so
495
+ # check the cast-safety for too long/fixed string lengths:
496
+ for change_length in [-1, 0, 1]:
497
+ if change_length >= 0:
498
+ expected_safety = Casting.safe
499
+ else:
500
+ expected_safety = Casting.same_kind
501
+
502
+ to_dt = self.string_with_modified_length(string_dt, change_length)
503
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
504
+ (other_dt, to_dt))
505
+ assert res_dt is to_dt
506
+ assert safety == expected_safety
507
+ assert view_off is None
508
+
509
+ # The opposite direction is always considered unsafe:
510
+ cast = get_castingimpl(string_DT, other_DT)
511
+
512
+ safety, _, view_off = cast._resolve_descriptors((string_dt, other_dt))
513
+ assert safety == Casting.unsafe
514
+ assert view_off is None
515
+
516
+ cast = get_castingimpl(string_DT, other_DT)
517
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
518
+ (string_dt, None))
519
+ assert safety == Casting.unsafe
520
+ assert view_off is None
521
+ assert other_dt is res_dt # returns the singleton for simple dtypes
522
+
523
+ @pytest.mark.parametrize("string_char", ["S", "U"])
524
+ @pytest.mark.parametrize("other_dt", simple_dtype_instances())
525
+ def test_simple_string_casts_roundtrip(self, other_dt, string_char):
526
+ """
527
+ Tests casts from and to string by checking the roundtripping property.
528
+
529
+ The test also covers some string to string casts (but not all).
530
+
531
+ If this test creates issues, it should possibly just be simplified
532
+ or even removed (checking whether unaligned/non-contiguous casts give
533
+ the same results is useful, though).
534
+ """
535
+ string_DT = type(np.dtype(string_char))
536
+
537
+ cast = get_castingimpl(type(other_dt), string_DT)
538
+ cast_back = get_castingimpl(string_DT, type(other_dt))
539
+ _, (res_other_dt, string_dt), _ = cast._resolve_descriptors(
540
+ (other_dt, None))
541
+
542
+ if res_other_dt is not other_dt:
543
+ # do not support non-native byteorder, skip test in that case
544
+ assert other_dt.byteorder != res_other_dt.byteorder
545
+ return
546
+
547
+ orig_arr, values = self.get_data(other_dt, None)
548
+ str_arr = np.zeros(len(orig_arr), dtype=string_dt)
549
+ string_dt_short = self.string_with_modified_length(string_dt, -1)
550
+ str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
551
+ string_dt_long = self.string_with_modified_length(string_dt, 1)
552
+ str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
553
+
554
+ assert not cast._supports_unaligned # if support is added, should test
555
+ assert not cast_back._supports_unaligned
556
+
557
+ for contig in [True, False]:
558
+ other_arr, str_arr = self.get_data_variation(
559
+ orig_arr, str_arr, True, contig)
560
+ _, str_arr_short = self.get_data_variation(
561
+ orig_arr, str_arr_short.copy(), True, contig)
562
+ _, str_arr_long = self.get_data_variation(
563
+ orig_arr, str_arr_long, True, contig)
564
+
565
+ cast._simple_strided_call((other_arr, str_arr))
566
+
567
+ cast._simple_strided_call((other_arr, str_arr_short))
568
+ assert_array_equal(str_arr.astype(string_dt_short), str_arr_short)
569
+
570
+ cast._simple_strided_call((other_arr, str_arr_long))
571
+ assert_array_equal(str_arr, str_arr_long)
572
+
573
+ if other_dt.kind == "b":
574
+ # Booleans do not roundtrip
575
+ continue
576
+
577
+ other_arr[...] = 0
578
+ cast_back._simple_strided_call((str_arr, other_arr))
579
+ assert_array_equal(orig_arr, other_arr)
580
+
581
+ other_arr[...] = 0
582
+ cast_back._simple_strided_call((str_arr_long, other_arr))
583
+ assert_array_equal(orig_arr, other_arr)
584
+
585
+ @pytest.mark.parametrize("other_dt", ["S8", "<U8", ">U8"])
586
+ @pytest.mark.parametrize("string_char", ["S", "U"])
587
+ def test_string_to_string_cancast(self, other_dt, string_char):
588
+ other_dt = np.dtype(other_dt)
589
+
590
+ fact = 1 if string_char == "S" else 4
591
+ div = 1 if other_dt.char == "S" else 4
592
+
593
+ string_DT = type(np.dtype(string_char))
594
+ cast = get_castingimpl(type(other_dt), string_DT)
595
+
596
+ expected_length = other_dt.itemsize // div
597
+ string_dt = np.dtype(f"{string_char}{expected_length}")
598
+
599
+ safety, (res_other_dt, res_dt), view_off = cast._resolve_descriptors(
600
+ (other_dt, None))
601
+ assert res_dt.itemsize == expected_length * fact
602
+ assert isinstance(res_dt, string_DT)
603
+
604
+ expected_view_off = None
605
+ if other_dt.char == string_char:
606
+ if other_dt.isnative:
607
+ expected_safety = Casting.no
608
+ expected_view_off = 0
609
+ else:
610
+ expected_safety = Casting.equiv
611
+ elif string_char == "U":
612
+ expected_safety = Casting.safe
613
+ else:
614
+ expected_safety = Casting.unsafe
615
+
616
+ assert view_off == expected_view_off
617
+ assert expected_safety == safety
618
+
619
+ for change_length in [-1, 0, 1]:
620
+ to_dt = self.string_with_modified_length(string_dt, change_length)
621
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
622
+ (other_dt, to_dt))
623
+
624
+ assert res_dt is to_dt
625
+ if change_length <= 0:
626
+ assert view_off == expected_view_off
627
+ else:
628
+ assert view_off is None
629
+ if expected_safety == Casting.unsafe:
630
+ assert safety == expected_safety
631
+ elif change_length < 0:
632
+ assert safety == Casting.same_kind
633
+ elif change_length == 0:
634
+ assert safety == expected_safety
635
+ elif change_length > 0:
636
+ assert safety == Casting.safe
637
+
638
+ @pytest.mark.parametrize("order1", [">", "<"])
639
+ @pytest.mark.parametrize("order2", [">", "<"])
640
+ def test_unicode_byteswapped_cast(self, order1, order2):
641
+ # Very specific tests (not using the castingimpl directly)
642
+ # that tests unicode bytedwaps including for unaligned array data.
643
+ dtype1 = np.dtype(f"{order1}U30")
644
+ dtype2 = np.dtype(f"{order2}U30")
645
+ data1 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype1)
646
+ data2 = np.empty(30 * 4 + 1, dtype=np.uint8)[1:].view(dtype2)
647
+ if dtype1.alignment != 1:
648
+ # alignment should always be >1, but skip the check if not
649
+ assert not data1.flags.aligned
650
+ assert not data2.flags.aligned
651
+
652
+ element = "this is a ünicode string‽"
653
+ data1[()] = element
654
+ # Test both `data1` and `data1.copy()` (which should be aligned)
655
+ for data in [data1, data1.copy()]:
656
+ data2[...] = data1
657
+ assert data2[()] == element
658
+ assert data2.copy()[()] == element
659
+
660
+ def test_void_to_string_special_case(self):
661
+ # Cover a small special case in void to string casting that could
662
+ # probably just as well be turned into an error (compare
663
+ # `test_object_to_parametric_internal_error` below).
664
+ assert np.array([], dtype="V5").astype("S").dtype.itemsize == 5
665
+ assert np.array([], dtype="V5").astype("U").dtype.itemsize == 4 * 5
666
+
667
+ def test_object_to_parametric_internal_error(self):
668
+ # We reject casting from object to a parametric type, without
669
+ # figuring out the correct instance first.
670
+ object_dtype = type(np.dtype(object))
671
+ other_dtype = type(np.dtype(str))
672
+ cast = get_castingimpl(object_dtype, other_dtype)
673
+ with pytest.raises(TypeError,
674
+ match="casting from object to the parametric DType"):
675
+ cast._resolve_descriptors((np.dtype("O"), None))
676
+
677
+ @pytest.mark.parametrize("dtype", simple_dtype_instances())
678
+ def test_object_and_simple_resolution(self, dtype):
679
+ # Simple test to exercise the cast when no instance is specified
680
+ object_dtype = type(np.dtype(object))
681
+ cast = get_castingimpl(object_dtype, type(dtype))
682
+
683
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
684
+ (np.dtype("O"), dtype))
685
+ assert safety == Casting.unsafe
686
+ assert view_off is None
687
+ assert res_dt is dtype
688
+
689
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
690
+ (np.dtype("O"), None))
691
+ assert safety == Casting.unsafe
692
+ assert view_off is None
693
+ assert res_dt == dtype.newbyteorder("=")
694
+
695
+ @pytest.mark.parametrize("dtype", simple_dtype_instances())
696
+ def test_simple_to_object_resolution(self, dtype):
697
+ # Simple test to exercise the cast when no instance is specified
698
+ object_dtype = type(np.dtype(object))
699
+ cast = get_castingimpl(type(dtype), object_dtype)
700
+
701
+ safety, (_, res_dt), view_off = cast._resolve_descriptors(
702
+ (dtype, None))
703
+ assert safety == Casting.safe
704
+ assert view_off is None
705
+ assert res_dt is np.dtype("O")
706
+
707
+ @pytest.mark.parametrize("casting", ["no", "unsafe"])
708
+ def test_void_and_structured_with_subarray(self, casting):
709
+ # test case corresponding to gh-19325
710
+ dtype = np.dtype([("foo", "<f4", (3, 2))])
711
+ expected = casting == "unsafe"
712
+ assert np.can_cast("V4", dtype, casting=casting) == expected
713
+ assert np.can_cast(dtype, "V4", casting=casting) == expected
714
+
715
+ @pytest.mark.parametrize(["to_dt", "expected_off"],
716
+ [ # Same as `from_dt` but with both fields shifted:
717
+ (np.dtype({"names": ["a", "b"], "formats": ["i4", "f4"],
718
+ "offsets": [0, 4]}), 2),
719
+ # Additional change of the names
720
+ (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
721
+ "offsets": [0, 4]}), 2),
722
+ # Incompatible field offset change
723
+ (np.dtype({"names": ["b", "a"], "formats": ["i4", "f4"],
724
+ "offsets": [0, 6]}), None)])
725
+ def test_structured_field_offsets(self, to_dt, expected_off):
726
+ # This checks the cast-safety and view offset for swapped and "shifted"
727
+ # fields which are viewable
728
+ from_dt = np.dtype({"names": ["a", "b"],
729
+ "formats": ["i4", "f4"],
730
+ "offsets": [2, 6]})
731
+ cast = get_castingimpl(type(from_dt), type(to_dt))
732
+ safety, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
733
+ if from_dt.names == to_dt.names:
734
+ assert safety == Casting.equiv
735
+ else:
736
+ assert safety == Casting.safe
737
+ # Shifting the original data pointer by -2 will align both by
738
+ # effectively adding 2 bytes of spacing before `from_dt`.
739
+ assert view_off == expected_off
740
+
741
+ @pytest.mark.parametrize(("from_dt", "to_dt", "expected_off"), [
742
+ # Subarray cases:
743
+ ("i", "(1,1)i", 0),
744
+ ("(1,1)i", "i", 0),
745
+ ("(2,1)i", "(2,1)i", 0),
746
+ # field cases (field to field is tested explicitly also):
747
+ # Not considered viewable, because a negative offset would allow
748
+ # may structured dtype to indirectly access invalid memory.
749
+ ("i", dict(names=["a"], formats=["i"], offsets=[2]), None),
750
+ (dict(names=["a"], formats=["i"], offsets=[2]), "i", 2),
751
+ # Currently considered not viewable, due to multiple fields
752
+ # even though they overlap (maybe we should not allow that?)
753
+ ("i", dict(names=["a", "b"], formats=["i", "i"], offsets=[2, 2]),
754
+ None),
755
+ # different number of fields can't work, should probably just fail
756
+ # so it never reports "viewable":
757
+ ("i,i", "i,i,i", None),
758
+ # Unstructured void cases:
759
+ ("i4", "V3", 0), # void smaller or equal
760
+ ("i4", "V4", 0), # void smaller or equal
761
+ ("i4", "V10", None), # void is larger (no view)
762
+ ("O", "V4", None), # currently reject objects for view here.
763
+ ("O", "V8", None), # currently reject objects for view here.
764
+ ("V4", "V3", 0),
765
+ ("V4", "V4", 0),
766
+ ("V3", "V4", None),
767
+ # Note that currently void-to-other cast goes via byte-strings
768
+ # and is not a "view" based cast like the opposite direction:
769
+ ("V4", "i4", None),
770
+ # completely invalid/impossible cast:
771
+ ("i,i", "i,i,i", None),
772
+ ])
773
+ def test_structured_view_offsets_paramteric(
774
+ self, from_dt, to_dt, expected_off):
775
+ # TODO: While this test is fairly thorough, right now, it does not
776
+ # really test some paths that may have nonzero offsets (they don't
777
+ # really exists).
778
+ from_dt = np.dtype(from_dt)
779
+ to_dt = np.dtype(to_dt)
780
+ cast = get_castingimpl(type(from_dt), type(to_dt))
781
+ _, _, view_off = cast._resolve_descriptors((from_dt, to_dt))
782
+ assert view_off == expected_off
783
+
784
+ @pytest.mark.parametrize("dtype", np.typecodes["All"])
785
+ def test_object_casts_NULL_None_equivalence(self, dtype):
786
+ # None to <other> casts may succeed or fail, but a NULL'ed array must
787
+ # behave the same as one filled with None's.
788
+ arr_normal = np.array([None] * 5)
789
+ arr_NULLs = np.empty_like([None] * 5)
790
+ # If the check fails (maybe it should) the test would lose its purpose:
791
+ assert arr_NULLs.tobytes() == b"\x00" * arr_NULLs.nbytes
792
+
793
+ try:
794
+ expected = arr_normal.astype(dtype)
795
+ except TypeError:
796
+ with pytest.raises(TypeError):
797
+ arr_NULLs.astype(dtype),
798
+ else:
799
+ assert_array_equal(expected, arr_NULLs.astype(dtype))
800
+
801
+ @pytest.mark.parametrize("dtype",
802
+ np.typecodes["AllInteger"] + np.typecodes["AllFloat"])
803
+ def test_nonstandard_bool_to_other(self, dtype):
804
+ # simple test for casting bool_ to numeric types, which should not
805
+ # expose the detail that NumPy bools can sometimes take values other
806
+ # than 0 and 1. See also gh-19514.
807
+ nonstandard_bools = np.array([0, 3, -7], dtype=np.int8).view(bool)
808
+ res = nonstandard_bools.astype(dtype)
809
+ expected = [0, 1, 1]
810
+ assert_array_equal(res, expected)
811
+
wemm/lib/python3.10/site-packages/numpy/core/tests/test_cpu_dispatcher.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.core._multiarray_umath import __cpu_features__, __cpu_baseline__, __cpu_dispatch__
2
+ from numpy.core import _umath_tests
3
+ from numpy.testing import assert_equal
4
+
5
+ def test_dispatcher():
6
+ """
7
+ Testing the utilities of the CPU dispatcher
8
+ """
9
+ targets = (
10
+ "SSE2", "SSE41", "AVX2",
11
+ "VSX", "VSX2", "VSX3",
12
+ "NEON", "ASIMD", "ASIMDHP"
13
+ )
14
+ highest_sfx = "" # no suffix for the baseline
15
+ all_sfx = []
16
+ for feature in reversed(targets):
17
+ # skip baseline features, by the default `CCompilerOpt` do not generate separated objects
18
+ # for the baseline, just one object combined all of them via 'baseline' option
19
+ # within the configuration statements.
20
+ if feature in __cpu_baseline__:
21
+ continue
22
+ # check compiler and running machine support
23
+ if feature not in __cpu_dispatch__ or not __cpu_features__[feature]:
24
+ continue
25
+
26
+ if not highest_sfx:
27
+ highest_sfx = "_" + feature
28
+ all_sfx.append("func" + "_" + feature)
29
+
30
+ test = _umath_tests.test_dispatch()
31
+ assert_equal(test["func"], "func" + highest_sfx)
32
+ assert_equal(test["var"], "var" + highest_sfx)
33
+
34
+ if highest_sfx:
35
+ assert_equal(test["func_xb"], "func" + highest_sfx)
36
+ assert_equal(test["var_xb"], "var" + highest_sfx)
37
+ else:
38
+ assert_equal(test["func_xb"], "nobase")
39
+ assert_equal(test["var_xb"], "nobase")
40
+
41
+ all_sfx.append("func") # add the baseline
42
+ assert_equal(test["all"], all_sfx)
wemm/lib/python3.10/site-packages/numpy/core/tests/test_cpu_features.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, platform, re, pytest
2
+ from numpy.core._multiarray_umath import __cpu_features__
3
+
4
+ def assert_features_equal(actual, desired, fname):
5
+ __tracebackhide__ = True # Hide traceback for py.test
6
+ actual, desired = str(actual), str(desired)
7
+ if actual == desired:
8
+ return
9
+ detected = str(__cpu_features__).replace("'", "")
10
+ try:
11
+ with open("/proc/cpuinfo", "r") as fd:
12
+ cpuinfo = fd.read(2048)
13
+ except Exception as err:
14
+ cpuinfo = str(err)
15
+
16
+ try:
17
+ import subprocess
18
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
19
+ auxv = auxv.decode()
20
+ except Exception as err:
21
+ auxv = str(err)
22
+
23
+ import textwrap
24
+ error_report = textwrap.indent(
25
+ """
26
+ ###########################################
27
+ ### Extra debugging information
28
+ ###########################################
29
+ -------------------------------------------
30
+ --- NumPy Detections
31
+ -------------------------------------------
32
+ %s
33
+ -------------------------------------------
34
+ --- SYS / CPUINFO
35
+ -------------------------------------------
36
+ %s....
37
+ -------------------------------------------
38
+ --- SYS / AUXV
39
+ -------------------------------------------
40
+ %s
41
+ """ % (detected, cpuinfo, auxv), prefix='\r')
42
+
43
+ raise AssertionError((
44
+ "Failure Detection\n"
45
+ " NAME: '%s'\n"
46
+ " ACTUAL: %s\n"
47
+ " DESIRED: %s\n"
48
+ "%s"
49
+ ) % (fname, actual, desired, error_report))
50
+
51
+ class AbstractTest:
52
+ features = []
53
+ features_groups = {}
54
+ features_map = {}
55
+ features_flags = set()
56
+
57
+ def load_flags(self):
58
+ # a hook
59
+ pass
60
+ def test_features(self):
61
+ self.load_flags()
62
+ for gname, features in self.features_groups.items():
63
+ test_features = [self.cpu_have(f) for f in features]
64
+ assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
65
+
66
+ for feature_name in self.features:
67
+ cpu_have = self.cpu_have(feature_name)
68
+ npy_have = __cpu_features__.get(feature_name)
69
+ assert_features_equal(npy_have, cpu_have, feature_name)
70
+
71
+ def cpu_have(self, feature_name):
72
+ map_names = self.features_map.get(feature_name, feature_name)
73
+ if isinstance(map_names, str):
74
+ return map_names in self.features_flags
75
+ for f in map_names:
76
+ if f in self.features_flags:
77
+ return True
78
+ return False
79
+
80
+ def load_flags_cpuinfo(self, magic_key):
81
+ self.features_flags = self.get_cpuinfo_item(magic_key)
82
+
83
+ def get_cpuinfo_item(self, magic_key):
84
+ values = set()
85
+ with open('/proc/cpuinfo') as fd:
86
+ for line in fd:
87
+ if not line.startswith(magic_key):
88
+ continue
89
+ flags_value = [s.strip() for s in line.split(':', 1)]
90
+ if len(flags_value) == 2:
91
+ values = values.union(flags_value[1].upper().split())
92
+ return values
93
+
94
+ def load_flags_auxv(self):
95
+ import subprocess
96
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
97
+ for at in auxv.split(b'\n'):
98
+ if not at.startswith(b"AT_HWCAP"):
99
+ continue
100
+ hwcap_value = [s.strip() for s in at.split(b':', 1)]
101
+ if len(hwcap_value) == 2:
102
+ self.features_flags = self.features_flags.union(
103
+ hwcap_value[1].upper().decode().split()
104
+ )
105
+
106
+ is_linux = sys.platform.startswith('linux')
107
+ is_cygwin = sys.platform.startswith('cygwin')
108
+ machine = platform.machine()
109
+ is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
110
+ @pytest.mark.skipif(
111
+ not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
112
+ )
113
+ class Test_X86_Features(AbstractTest):
114
+ features = [
115
+ "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
116
+ "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
117
+ "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
118
+ "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
119
+ "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG",
120
+ ]
121
+ features_groups = dict(
122
+ AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
123
+ AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
124
+ "AVX5124VNNIW", "AVX512VPOPCNTDQ"],
125
+ AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
126
+ AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
127
+ AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
128
+ "AVX512VBMI"],
129
+ AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
130
+ "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
131
+ )
132
+ features_map = dict(
133
+ SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
134
+ AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
135
+ AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
136
+ )
137
+ def load_flags(self):
138
+ self.load_flags_cpuinfo("flags")
139
+
140
+ is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
141
+ @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
142
+ class Test_POWER_Features(AbstractTest):
143
+ features = ["VSX", "VSX2", "VSX3", "VSX4"]
144
+ features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
145
+
146
+ def load_flags(self):
147
+ self.load_flags_auxv()
148
+
149
+
150
+ is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
151
+ @pytest.mark.skipif(not is_linux or not is_zarch,
152
+ reason="Only for Linux and IBM Z")
153
+ class Test_ZARCH_Features(AbstractTest):
154
+ features = ["VX", "VXE", "VXE2"]
155
+
156
+ def load_flags(self):
157
+ self.load_flags_auxv()
158
+
159
+
160
+ is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
161
+ @pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
162
+ class Test_ARM_Features(AbstractTest):
163
+ features = [
164
+ "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
165
+ ]
166
+ features_groups = dict(
167
+ NEON_FP16 = ["NEON", "HALF"],
168
+ NEON_VFPV4 = ["NEON", "VFPV4"],
169
+ )
170
+ def load_flags(self):
171
+ self.load_flags_cpuinfo("Features")
172
+ arch = self.get_cpuinfo_item("CPU architecture")
173
+ # in case of mounting virtual filesystem of aarch64 kernel
174
+ is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
175
+ if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
176
+ self.features_map = dict(
177
+ NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
178
+ )
179
+ else:
180
+ self.features_map = dict(
181
+ # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
182
+ # doesn't provide information about ASIMD, so we assume that ASIMD is supported
183
+ # if the kernel reports any one of the following ARM8 features.
184
+ ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
185
+ )
wemm/lib/python3.10/site-packages/numpy/core/tests/test_custom_dtypes.py ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_equal
5
+ from numpy.core._multiarray_umath import (
6
+ _discover_array_parameters as discover_array_params, _get_sfloat_dtype)
7
+
8
+
9
+ SF = _get_sfloat_dtype()
10
+
11
+
12
+ class TestSFloat:
13
+ def _get_array(self, scaling, aligned=True):
14
+ if not aligned:
15
+ a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
16
+ a = a.view(np.float64)
17
+ a[:] = [1., 2., 3.]
18
+ else:
19
+ a = np.array([1., 2., 3.])
20
+
21
+ a *= 1./scaling # the casting code also uses the reciprocal.
22
+ return a.view(SF(scaling))
23
+
24
+ def test_sfloat_rescaled(self):
25
+ sf = SF(1.)
26
+ sf2 = sf.scaled_by(2.)
27
+ assert sf2.get_scaling() == 2.
28
+ sf6 = sf2.scaled_by(3.)
29
+ assert sf6.get_scaling() == 6.
30
+
31
+ def test_class_discovery(self):
32
+ # This does not test much, since we always discover the scaling as 1.
33
+ # But most of NumPy (when writing) does not understand DType classes
34
+ dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
35
+ assert dt == SF(1.)
36
+
37
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
38
+ def test_scaled_float_from_floats(self, scaling):
39
+ a = np.array([1., 2., 3.], dtype=SF(scaling))
40
+
41
+ assert a.dtype.get_scaling() == scaling
42
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
43
+
44
+ def test_repr(self):
45
+ # Check the repr, mainly to cover the code paths:
46
+ assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
47
+
48
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
49
+ def test_sfloat_from_float(self, scaling):
50
+ a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
51
+
52
+ assert a.dtype.get_scaling() == scaling
53
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
54
+
55
+ @pytest.mark.parametrize("aligned", [True, False])
56
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
57
+ def test_sfloat_getitem(self, aligned, scaling):
58
+ a = self._get_array(1., aligned)
59
+ assert a.tolist() == [1., 2., 3.]
60
+
61
+ @pytest.mark.parametrize("aligned", [True, False])
62
+ def test_sfloat_casts(self, aligned):
63
+ a = self._get_array(1., aligned)
64
+
65
+ assert np.can_cast(a, SF(-1.), casting="equiv")
66
+ assert not np.can_cast(a, SF(-1.), casting="no")
67
+ na = a.astype(SF(-1.))
68
+ assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
69
+
70
+ assert np.can_cast(a, SF(2.), casting="same_kind")
71
+ assert not np.can_cast(a, SF(2.), casting="safe")
72
+ a2 = a.astype(SF(2.))
73
+ assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
74
+
75
+ @pytest.mark.parametrize("aligned", [True, False])
76
+ def test_sfloat_cast_internal_errors(self, aligned):
77
+ a = self._get_array(2e300, aligned)
78
+
79
+ with pytest.raises(TypeError,
80
+ match="error raised inside the core-loop: non-finite factor!"):
81
+ a.astype(SF(2e-300))
82
+
83
+ def test_sfloat_promotion(self):
84
+ assert np.result_type(SF(2.), SF(3.)) == SF(3.)
85
+ assert np.result_type(SF(3.), SF(2.)) == SF(3.)
86
+ # Float64 -> SF(1.) and then promotes normally, so both of this work:
87
+ assert np.result_type(SF(3.), np.float64) == SF(3.)
88
+ assert np.result_type(np.float64, SF(0.5)) == SF(1.)
89
+
90
+ # Test an undefined promotion:
91
+ with pytest.raises(TypeError):
92
+ np.result_type(SF(1.), np.int64)
93
+
94
+ def test_basic_multiply(self):
95
+ a = self._get_array(2.)
96
+ b = self._get_array(4.)
97
+
98
+ res = a * b
99
+ # multiplies dtype scaling and content separately:
100
+ assert res.dtype.get_scaling() == 8.
101
+ expected_view = a.view(np.float64) * b.view(np.float64)
102
+ assert_array_equal(res.view(np.float64), expected_view)
103
+
104
+ def test_possible_and_impossible_reduce(self):
105
+ # For reductions to work, the first and last operand must have the
106
+ # same dtype. For this parametric DType that is not necessarily true.
107
+ a = self._get_array(2.)
108
+ # Addition reductin works (as of writing requires to pass initial
109
+ # because setting a scaled-float from the default `0` fails).
110
+ res = np.add.reduce(a, initial=0.)
111
+ assert res == a.astype(np.float64).sum()
112
+
113
+ # But each multiplication changes the factor, so a reduction is not
114
+ # possible (the relaxed version of the old refusal to handle any
115
+ # flexible dtype).
116
+ with pytest.raises(TypeError,
117
+ match="the resolved dtypes are not compatible"):
118
+ np.multiply.reduce(a)
119
+
120
+ def test_basic_ufunc_at(self):
121
+ float_a = np.array([1., 2., 3.])
122
+ b = self._get_array(2.)
123
+
124
+ float_b = b.view(np.float64).copy()
125
+ np.multiply.at(float_b, [1, 1, 1], float_a)
126
+ np.multiply.at(b, [1, 1, 1], float_a)
127
+
128
+ assert_array_equal(b.view(np.float64), float_b)
129
+
130
+ def test_basic_multiply_promotion(self):
131
+ float_a = np.array([1., 2., 3.])
132
+ b = self._get_array(2.)
133
+
134
+ res1 = float_a * b
135
+ res2 = b * float_a
136
+
137
+ # one factor is one, so we get the factor of b:
138
+ assert res1.dtype == res2.dtype == b.dtype
139
+ expected_view = float_a * b.view(np.float64)
140
+ assert_array_equal(res1.view(np.float64), expected_view)
141
+ assert_array_equal(res2.view(np.float64), expected_view)
142
+
143
+ # Check that promotion works when `out` is used:
144
+ np.multiply(b, float_a, out=res2)
145
+ with pytest.raises(TypeError):
146
+ # The promoter accepts this (maybe it should not), but the SFloat
147
+ # result cannot be cast to integer:
148
+ np.multiply(b, float_a, out=np.arange(3))
149
+
150
+ def test_basic_addition(self):
151
+ a = self._get_array(2.)
152
+ b = self._get_array(4.)
153
+
154
+ res = a + b
155
+ # addition uses the type promotion rules for the result:
156
+ assert res.dtype == np.result_type(a.dtype, b.dtype)
157
+ expected_view = (a.astype(res.dtype).view(np.float64) +
158
+ b.astype(res.dtype).view(np.float64))
159
+ assert_array_equal(res.view(np.float64), expected_view)
160
+
161
+ def test_addition_cast_safety(self):
162
+ """The addition method is special for the scaled float, because it
163
+ includes the "cast" between different factors, thus cast-safety
164
+ is influenced by the implementation.
165
+ """
166
+ a = self._get_array(2.)
167
+ b = self._get_array(-2.)
168
+ c = self._get_array(3.)
169
+
170
+ # sign change is "equiv":
171
+ np.add(a, b, casting="equiv")
172
+ with pytest.raises(TypeError):
173
+ np.add(a, b, casting="no")
174
+
175
+ # Different factor is "same_kind" (default) so check that "safe" fails
176
+ with pytest.raises(TypeError):
177
+ np.add(a, c, casting="safe")
178
+
179
+ # Check that casting the output fails also (done by the ufunc here)
180
+ with pytest.raises(TypeError):
181
+ np.add(a, a, out=c, casting="safe")
182
+
183
+ @pytest.mark.parametrize("ufunc",
184
+ [np.logical_and, np.logical_or, np.logical_xor])
185
+ def test_logical_ufuncs_casts_to_bool(self, ufunc):
186
+ a = self._get_array(2.)
187
+ a[0] = 0. # make sure first element is considered False.
188
+
189
+ float_equiv = a.astype(float)
190
+ expected = ufunc(float_equiv, float_equiv)
191
+ res = ufunc(a, a)
192
+ assert_array_equal(res, expected)
193
+
194
+ # also check that the same works for reductions:
195
+ expected = ufunc.reduce(float_equiv)
196
+ res = ufunc.reduce(a)
197
+ assert_array_equal(res, expected)
198
+
199
+ # The output casting does not match the bool, bool -> bool loop:
200
+ with pytest.raises(TypeError):
201
+ ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
wemm/lib/python3.10/site-packages/numpy/core/tests/test_cython.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import shutil
3
+ import subprocess
4
+ import sys
5
+ import pytest
6
+
7
+ import numpy as np
8
+
9
+ # This import is copied from random.tests.test_extending
10
+ try:
11
+ import cython
12
+ from Cython.Compiler.Version import version as cython_version
13
+ except ImportError:
14
+ cython = None
15
+ else:
16
+ from numpy.compat import _pep440
17
+
18
+ # Cython 0.29.30 is required for Python 3.11 and there are
19
+ # other fixes in the 0.29 series that are needed even for earlier
20
+ # Python versions.
21
+ # Note: keep in sync with the one in pyproject.toml
22
+ required_version = "0.29.30"
23
+ if _pep440.parse(cython_version) < _pep440.Version(required_version):
24
+ # too old or wrong cython, skip the test
25
+ cython = None
26
+
27
+ pytestmark = pytest.mark.skipif(cython is None, reason="requires cython")
28
+
29
+
30
+ @pytest.fixture
31
+ def install_temp(request, tmp_path):
32
+ # Based in part on test_cython from random.tests.test_extending
33
+
34
+ here = os.path.dirname(__file__)
35
+ ext_dir = os.path.join(here, "examples", "cython")
36
+
37
+ cytest = str(tmp_path / "cytest")
38
+
39
+ shutil.copytree(ext_dir, cytest)
40
+ # build the examples and "install" them into a temporary directory
41
+
42
+ install_log = str(tmp_path / "tmp_install_log.txt")
43
+ subprocess.check_output(
44
+ [
45
+ sys.executable,
46
+ "setup.py",
47
+ "build",
48
+ "install",
49
+ "--prefix", str(tmp_path / "installdir"),
50
+ "--single-version-externally-managed",
51
+ "--record",
52
+ install_log,
53
+ ],
54
+ cwd=cytest,
55
+ )
56
+
57
+ # In order to import the built module, we need its path to sys.path
58
+ # so parse that out of the record
59
+ with open(install_log) as fid:
60
+ for line in fid:
61
+ if "checks" in line:
62
+ sys.path.append(os.path.dirname(line))
63
+ break
64
+ else:
65
+ raise RuntimeError(f'could not parse "{install_log}"')
66
+
67
+
68
+ def test_is_timedelta64_object(install_temp):
69
+ import checks
70
+
71
+ assert checks.is_td64(np.timedelta64(1234))
72
+ assert checks.is_td64(np.timedelta64(1234, "ns"))
73
+ assert checks.is_td64(np.timedelta64("NaT", "ns"))
74
+
75
+ assert not checks.is_td64(1)
76
+ assert not checks.is_td64(None)
77
+ assert not checks.is_td64("foo")
78
+ assert not checks.is_td64(np.datetime64("now", "s"))
79
+
80
+
81
+ def test_is_datetime64_object(install_temp):
82
+ import checks
83
+
84
+ assert checks.is_dt64(np.datetime64(1234, "ns"))
85
+ assert checks.is_dt64(np.datetime64("NaT", "ns"))
86
+
87
+ assert not checks.is_dt64(1)
88
+ assert not checks.is_dt64(None)
89
+ assert not checks.is_dt64("foo")
90
+ assert not checks.is_dt64(np.timedelta64(1234))
91
+
92
+
93
+ def test_get_datetime64_value(install_temp):
94
+ import checks
95
+
96
+ dt64 = np.datetime64("2016-01-01", "ns")
97
+
98
+ result = checks.get_dt64_value(dt64)
99
+ expected = dt64.view("i8")
100
+
101
+ assert result == expected
102
+
103
+
104
+ def test_get_timedelta64_value(install_temp):
105
+ import checks
106
+
107
+ td64 = np.timedelta64(12345, "h")
108
+
109
+ result = checks.get_td64_value(td64)
110
+ expected = td64.view("i8")
111
+
112
+ assert result == expected
113
+
114
+
115
+ def test_get_datetime64_unit(install_temp):
116
+ import checks
117
+
118
+ dt64 = np.datetime64("2016-01-01", "ns")
119
+ result = checks.get_dt64_unit(dt64)
120
+ expected = 10
121
+ assert result == expected
122
+
123
+ td64 = np.timedelta64(12345, "h")
124
+ result = checks.get_dt64_unit(td64)
125
+ expected = 5
126
+ assert result == expected
127
+
128
+
129
+ def test_abstract_scalars(install_temp):
130
+ import checks
131
+
132
+ assert checks.is_integer(1)
133
+ assert checks.is_integer(np.int8(1))
134
+ assert checks.is_integer(np.uint64(1))
wemm/lib/python3.10/site-packages/numpy/core/tests/test_defchararray.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import numpy as np
3
+ from numpy.core.multiarray import _vec_string
4
+ from numpy.testing import (
5
+ assert_, assert_equal, assert_array_equal, assert_raises,
6
+ assert_raises_regex
7
+ )
8
+
9
+ kw_unicode_true = {'unicode': True} # make 2to3 work properly
10
+ kw_unicode_false = {'unicode': False}
11
+
12
+ class TestBasic:
13
+ def test_from_object_array(self):
14
+ A = np.array([['abc', 2],
15
+ ['long ', '0123456789']], dtype='O')
16
+ B = np.char.array(A)
17
+ assert_equal(B.dtype.itemsize, 10)
18
+ assert_array_equal(B, [[b'abc', b'2'],
19
+ [b'long', b'0123456789']])
20
+
21
+ def test_from_object_array_unicode(self):
22
+ A = np.array([['abc', u'Sigma \u03a3'],
23
+ ['long ', '0123456789']], dtype='O')
24
+ assert_raises(ValueError, np.char.array, (A,))
25
+ B = np.char.array(A, **kw_unicode_true)
26
+ assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
27
+ assert_array_equal(B, [['abc', u'Sigma \u03a3'],
28
+ ['long', '0123456789']])
29
+
30
+ def test_from_string_array(self):
31
+ A = np.array([[b'abc', b'foo'],
32
+ [b'long ', b'0123456789']])
33
+ assert_equal(A.dtype.type, np.string_)
34
+ B = np.char.array(A)
35
+ assert_array_equal(B, A)
36
+ assert_equal(B.dtype, A.dtype)
37
+ assert_equal(B.shape, A.shape)
38
+ B[0, 0] = 'changed'
39
+ assert_(B[0, 0] != A[0, 0])
40
+ C = np.char.asarray(A)
41
+ assert_array_equal(C, A)
42
+ assert_equal(C.dtype, A.dtype)
43
+ C[0, 0] = 'changed again'
44
+ assert_(C[0, 0] != B[0, 0])
45
+ assert_(C[0, 0] == A[0, 0])
46
+
47
+ def test_from_unicode_array(self):
48
+ A = np.array([['abc', u'Sigma \u03a3'],
49
+ ['long ', '0123456789']])
50
+ assert_equal(A.dtype.type, np.unicode_)
51
+ B = np.char.array(A)
52
+ assert_array_equal(B, A)
53
+ assert_equal(B.dtype, A.dtype)
54
+ assert_equal(B.shape, A.shape)
55
+ B = np.char.array(A, **kw_unicode_true)
56
+ assert_array_equal(B, A)
57
+ assert_equal(B.dtype, A.dtype)
58
+ assert_equal(B.shape, A.shape)
59
+
60
+ def fail():
61
+ np.char.array(A, **kw_unicode_false)
62
+
63
+ assert_raises(UnicodeEncodeError, fail)
64
+
65
+ def test_unicode_upconvert(self):
66
+ A = np.char.array(['abc'])
67
+ B = np.char.array([u'\u03a3'])
68
+ assert_(issubclass((A + B).dtype.type, np.unicode_))
69
+
70
+ def test_from_string(self):
71
+ A = np.char.array(b'abc')
72
+ assert_equal(len(A), 1)
73
+ assert_equal(len(A[0]), 3)
74
+ assert_(issubclass(A.dtype.type, np.string_))
75
+
76
+ def test_from_unicode(self):
77
+ A = np.char.array(u'\u03a3')
78
+ assert_equal(len(A), 1)
79
+ assert_equal(len(A[0]), 1)
80
+ assert_equal(A.itemsize, 4)
81
+ assert_(issubclass(A.dtype.type, np.unicode_))
82
+
83
+ class TestVecString:
84
+ def test_non_existent_method(self):
85
+
86
+ def fail():
87
+ _vec_string('a', np.string_, 'bogus')
88
+
89
+ assert_raises(AttributeError, fail)
90
+
91
+ def test_non_string_array(self):
92
+
93
+ def fail():
94
+ _vec_string(1, np.string_, 'strip')
95
+
96
+ assert_raises(TypeError, fail)
97
+
98
+ def test_invalid_args_tuple(self):
99
+
100
+ def fail():
101
+ _vec_string(['a'], np.string_, 'strip', 1)
102
+
103
+ assert_raises(TypeError, fail)
104
+
105
+ def test_invalid_type_descr(self):
106
+
107
+ def fail():
108
+ _vec_string(['a'], 'BOGUS', 'strip')
109
+
110
+ assert_raises(TypeError, fail)
111
+
112
+ def test_invalid_function_args(self):
113
+
114
+ def fail():
115
+ _vec_string(['a'], np.string_, 'strip', (1,))
116
+
117
+ assert_raises(TypeError, fail)
118
+
119
+ def test_invalid_result_type(self):
120
+
121
+ def fail():
122
+ _vec_string(['a'], np.int_, 'strip')
123
+
124
+ assert_raises(TypeError, fail)
125
+
126
+ def test_broadcast_error(self):
127
+
128
+ def fail():
129
+ _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
130
+
131
+ assert_raises(ValueError, fail)
132
+
133
+
134
+ class TestWhitespace:
135
+ def setup_method(self):
136
+ self.A = np.array([['abc ', '123 '],
137
+ ['789 ', 'xyz ']]).view(np.chararray)
138
+ self.B = np.array([['abc', '123'],
139
+ ['789', 'xyz']]).view(np.chararray)
140
+
141
+ def test1(self):
142
+ assert_(np.all(self.A == self.B))
143
+ assert_(np.all(self.A >= self.B))
144
+ assert_(np.all(self.A <= self.B))
145
+ assert_(not np.any(self.A > self.B))
146
+ assert_(not np.any(self.A < self.B))
147
+ assert_(not np.any(self.A != self.B))
148
+
149
+ class TestChar:
150
+ def setup_method(self):
151
+ self.A = np.array('abc1', dtype='c').view(np.chararray)
152
+
153
+ def test_it(self):
154
+ assert_equal(self.A.shape, (4,))
155
+ assert_equal(self.A.upper()[:2].tobytes(), b'AB')
156
+
157
+ class TestComparisons:
158
+ def setup_method(self):
159
+ self.A = np.array([['abc', '123'],
160
+ ['789', 'xyz']]).view(np.chararray)
161
+ self.B = np.array([['efg', '123 '],
162
+ ['051', 'tuv']]).view(np.chararray)
163
+
164
+ def test_not_equal(self):
165
+ assert_array_equal((self.A != self.B), [[True, False], [True, True]])
166
+
167
+ def test_equal(self):
168
+ assert_array_equal((self.A == self.B), [[False, True], [False, False]])
169
+
170
+ def test_greater_equal(self):
171
+ assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
172
+
173
+ def test_less_equal(self):
174
+ assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
175
+
176
+ def test_greater(self):
177
+ assert_array_equal((self.A > self.B), [[False, False], [True, True]])
178
+
179
+ def test_less(self):
180
+ assert_array_equal((self.A < self.B), [[True, False], [False, False]])
181
+
182
+ def test_type(self):
183
+ out1 = np.char.equal(self.A, self.B)
184
+ out2 = np.char.equal('a', 'a')
185
+ assert_(isinstance(out1, np.ndarray))
186
+ assert_(isinstance(out2, np.ndarray))
187
+
188
+ class TestComparisonsMixed1(TestComparisons):
189
+ """Ticket #1276"""
190
+
191
+ def setup_method(self):
192
+ TestComparisons.setup_method(self)
193
+ self.B = np.array([['efg', '123 '],
194
+ ['051', 'tuv']], np.unicode_).view(np.chararray)
195
+
196
+ class TestComparisonsMixed2(TestComparisons):
197
+ """Ticket #1276"""
198
+
199
+ def setup_method(self):
200
+ TestComparisons.setup_method(self)
201
+ self.A = np.array([['abc', '123'],
202
+ ['789', 'xyz']], np.unicode_).view(np.chararray)
203
+
204
+ class TestInformation:
205
+ def setup_method(self):
206
+ self.A = np.array([[' abc ', ''],
207
+ ['12345', 'MixedCase'],
208
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
209
+ self.B = np.array([[u' \u03a3 ', u''],
210
+ [u'12345', u'MixedCase'],
211
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
212
+
213
+ def test_len(self):
214
+ assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
215
+ assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
216
+ assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
217
+
218
+ def test_count(self):
219
+ assert_(issubclass(self.A.count('').dtype.type, np.integer))
220
+ assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
221
+ assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
222
+ # Python doesn't seem to like counting NULL characters
223
+ # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
224
+ assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
225
+ assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
226
+ assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
227
+ # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
228
+
229
+ def test_endswith(self):
230
+ assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
231
+ assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
232
+ assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
233
+
234
+ def fail():
235
+ self.A.endswith('3', 'fdjk')
236
+
237
+ assert_raises(TypeError, fail)
238
+
239
+ def test_find(self):
240
+ assert_(issubclass(self.A.find('a').dtype.type, np.integer))
241
+ assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
242
+ assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
243
+ assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
244
+ assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
245
+
246
+ def test_index(self):
247
+
248
+ def fail():
249
+ self.A.index('a')
250
+
251
+ assert_raises(ValueError, fail)
252
+ assert_(np.char.index('abcba', 'b') == 1)
253
+ assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
254
+
255
+ def test_isalnum(self):
256
+ assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
257
+ assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
258
+
259
+ def test_isalpha(self):
260
+ assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
261
+ assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
262
+
263
+ def test_isdigit(self):
264
+ assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
265
+ assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
266
+
267
+ def test_islower(self):
268
+ assert_(issubclass(self.A.islower().dtype.type, np.bool_))
269
+ assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
270
+
271
+ def test_isspace(self):
272
+ assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
273
+ assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
274
+
275
+ def test_istitle(self):
276
+ assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
277
+ assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
278
+
279
+ def test_isupper(self):
280
+ assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
281
+ assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
282
+
283
+ def test_rfind(self):
284
+ assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
285
+ assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
286
+ assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
287
+ assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
288
+ assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
289
+
290
+ def test_rindex(self):
291
+
292
+ def fail():
293
+ self.A.rindex('a')
294
+
295
+ assert_raises(ValueError, fail)
296
+ assert_(np.char.rindex('abcba', 'b') == 3)
297
+ assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
298
+
299
+ def test_startswith(self):
300
+ assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
301
+ assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
302
+ assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
303
+
304
+ def fail():
305
+ self.A.startswith('3', 'fdjk')
306
+
307
+ assert_raises(TypeError, fail)
308
+
309
+
310
+ class TestMethods:
311
+ def setup_method(self):
312
+ self.A = np.array([[' abc ', ''],
313
+ ['12345', 'MixedCase'],
314
+ ['123 \t 345 \0 ', 'UPPER']],
315
+ dtype='S').view(np.chararray)
316
+ self.B = np.array([[u' \u03a3 ', u''],
317
+ [u'12345', u'MixedCase'],
318
+ [u'123 \t 345 \0 ', u'UPPER']]).view(np.chararray)
319
+
320
+ def test_capitalize(self):
321
+ tgt = [[b' abc ', b''],
322
+ [b'12345', b'Mixedcase'],
323
+ [b'123 \t 345 \0 ', b'Upper']]
324
+ assert_(issubclass(self.A.capitalize().dtype.type, np.string_))
325
+ assert_array_equal(self.A.capitalize(), tgt)
326
+
327
+ tgt = [[u' \u03c3 ', ''],
328
+ ['12345', 'Mixedcase'],
329
+ ['123 \t 345 \0 ', 'Upper']]
330
+ assert_(issubclass(self.B.capitalize().dtype.type, np.unicode_))
331
+ assert_array_equal(self.B.capitalize(), tgt)
332
+
333
+ def test_center(self):
334
+ assert_(issubclass(self.A.center(10).dtype.type, np.string_))
335
+ C = self.A.center([10, 20])
336
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
337
+
338
+ C = self.A.center(20, b'#')
339
+ assert_(np.all(C.startswith(b'#')))
340
+ assert_(np.all(C.endswith(b'#')))
341
+
342
+ C = np.char.center(b'FOO', [[10, 20], [15, 8]])
343
+ tgt = [[b' FOO ', b' FOO '],
344
+ [b' FOO ', b' FOO ']]
345
+ assert_(issubclass(C.dtype.type, np.string_))
346
+ assert_array_equal(C, tgt)
347
+
348
+ def test_decode(self):
349
+ A = np.char.array([b'\\u03a3'])
350
+ assert_(A.decode('unicode-escape')[0] == '\u03a3')
351
+
352
+ def test_encode(self):
353
+ B = self.B.encode('unicode_escape')
354
+ assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
355
+
356
+ def test_expandtabs(self):
357
+ T = self.A.expandtabs()
358
+ assert_(T[2, 0] == b'123 345 \0')
359
+
360
+ def test_join(self):
361
+ # NOTE: list(b'123') == [49, 50, 51]
362
+ # so that b','.join(b'123') results to an error on Py3
363
+ A0 = self.A.decode('ascii')
364
+
365
+ A = np.char.join([',', '#'], A0)
366
+ assert_(issubclass(A.dtype.type, np.unicode_))
367
+ tgt = np.array([[' ,a,b,c, ', ''],
368
+ ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
369
+ ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
370
+ assert_array_equal(np.char.join([',', '#'], A0), tgt)
371
+
372
+ def test_ljust(self):
373
+ assert_(issubclass(self.A.ljust(10).dtype.type, np.string_))
374
+
375
+ C = self.A.ljust([10, 20])
376
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
377
+
378
+ C = self.A.ljust(20, b'#')
379
+ assert_array_equal(C.startswith(b'#'), [
380
+ [False, True], [False, False], [False, False]])
381
+ assert_(np.all(C.endswith(b'#')))
382
+
383
+ C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
384
+ tgt = [[b'FOO ', b'FOO '],
385
+ [b'FOO ', b'FOO ']]
386
+ assert_(issubclass(C.dtype.type, np.string_))
387
+ assert_array_equal(C, tgt)
388
+
389
+ def test_lower(self):
390
+ tgt = [[b' abc ', b''],
391
+ [b'12345', b'mixedcase'],
392
+ [b'123 \t 345 \0 ', b'upper']]
393
+ assert_(issubclass(self.A.lower().dtype.type, np.string_))
394
+ assert_array_equal(self.A.lower(), tgt)
395
+
396
+ tgt = [[u' \u03c3 ', u''],
397
+ [u'12345', u'mixedcase'],
398
+ [u'123 \t 345 \0 ', u'upper']]
399
+ assert_(issubclass(self.B.lower().dtype.type, np.unicode_))
400
+ assert_array_equal(self.B.lower(), tgt)
401
+
402
+ def test_lstrip(self):
403
+ tgt = [[b'abc ', b''],
404
+ [b'12345', b'MixedCase'],
405
+ [b'123 \t 345 \0 ', b'UPPER']]
406
+ assert_(issubclass(self.A.lstrip().dtype.type, np.string_))
407
+ assert_array_equal(self.A.lstrip(), tgt)
408
+
409
+ tgt = [[b' abc', b''],
410
+ [b'2345', b'ixedCase'],
411
+ [b'23 \t 345 \x00', b'UPPER']]
412
+ assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
413
+
414
+ tgt = [[u'\u03a3 ', ''],
415
+ ['12345', 'MixedCase'],
416
+ ['123 \t 345 \0 ', 'UPPER']]
417
+ assert_(issubclass(self.B.lstrip().dtype.type, np.unicode_))
418
+ assert_array_equal(self.B.lstrip(), tgt)
419
+
420
+ def test_partition(self):
421
+ P = self.A.partition([b'3', b'M'])
422
+ tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
423
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
424
+ [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
425
+ assert_(issubclass(P.dtype.type, np.string_))
426
+ assert_array_equal(P, tgt)
427
+
428
+ def test_replace(self):
429
+ R = self.A.replace([b'3', b'a'],
430
+ [b'##########', b'@'])
431
+ tgt = [[b' abc ', b''],
432
+ [b'12##########45', b'MixedC@se'],
433
+ [b'12########## \t ##########45 \x00', b'UPPER']]
434
+ assert_(issubclass(R.dtype.type, np.string_))
435
+ assert_array_equal(R, tgt)
436
+
437
+ def test_rjust(self):
438
+ assert_(issubclass(self.A.rjust(10).dtype.type, np.string_))
439
+
440
+ C = self.A.rjust([10, 20])
441
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
442
+
443
+ C = self.A.rjust(20, b'#')
444
+ assert_(np.all(C.startswith(b'#')))
445
+ assert_array_equal(C.endswith(b'#'),
446
+ [[False, True], [False, False], [False, False]])
447
+
448
+ C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
449
+ tgt = [[b' FOO', b' FOO'],
450
+ [b' FOO', b' FOO']]
451
+ assert_(issubclass(C.dtype.type, np.string_))
452
+ assert_array_equal(C, tgt)
453
+
454
+ def test_rpartition(self):
455
+ P = self.A.rpartition([b'3', b'M'])
456
+ tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
457
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
458
+ [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
459
+ assert_(issubclass(P.dtype.type, np.string_))
460
+ assert_array_equal(P, tgt)
461
+
462
+ def test_rsplit(self):
463
+ A = self.A.rsplit(b'3')
464
+ tgt = [[[b' abc '], [b'']],
465
+ [[b'12', b'45'], [b'MixedCase']],
466
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
467
+ assert_(issubclass(A.dtype.type, np.object_))
468
+ assert_equal(A.tolist(), tgt)
469
+
470
+ def test_rstrip(self):
471
+ assert_(issubclass(self.A.rstrip().dtype.type, np.string_))
472
+
473
+ tgt = [[b' abc', b''],
474
+ [b'12345', b'MixedCase'],
475
+ [b'123 \t 345', b'UPPER']]
476
+ assert_array_equal(self.A.rstrip(), tgt)
477
+
478
+ tgt = [[b' abc ', b''],
479
+ [b'1234', b'MixedCase'],
480
+ [b'123 \t 345 \x00', b'UPP']
481
+ ]
482
+ assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
483
+
484
+ tgt = [[u' \u03a3', ''],
485
+ ['12345', 'MixedCase'],
486
+ ['123 \t 345', 'UPPER']]
487
+ assert_(issubclass(self.B.rstrip().dtype.type, np.unicode_))
488
+ assert_array_equal(self.B.rstrip(), tgt)
489
+
490
+ def test_strip(self):
491
+ tgt = [[b'abc', b''],
492
+ [b'12345', b'MixedCase'],
493
+ [b'123 \t 345', b'UPPER']]
494
+ assert_(issubclass(self.A.strip().dtype.type, np.string_))
495
+ assert_array_equal(self.A.strip(), tgt)
496
+
497
+ tgt = [[b' abc ', b''],
498
+ [b'234', b'ixedCas'],
499
+ [b'23 \t 345 \x00', b'UPP']]
500
+ assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
501
+
502
+ tgt = [[u'\u03a3', ''],
503
+ ['12345', 'MixedCase'],
504
+ ['123 \t 345', 'UPPER']]
505
+ assert_(issubclass(self.B.strip().dtype.type, np.unicode_))
506
+ assert_array_equal(self.B.strip(), tgt)
507
+
508
+ def test_split(self):
509
+ A = self.A.split(b'3')
510
+ tgt = [
511
+ [[b' abc '], [b'']],
512
+ [[b'12', b'45'], [b'MixedCase']],
513
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
514
+ assert_(issubclass(A.dtype.type, np.object_))
515
+ assert_equal(A.tolist(), tgt)
516
+
517
+ def test_splitlines(self):
518
+ A = np.char.array(['abc\nfds\nwer']).splitlines()
519
+ assert_(issubclass(A.dtype.type, np.object_))
520
+ assert_(A.shape == (1,))
521
+ assert_(len(A[0]) == 3)
522
+
523
+ def test_swapcase(self):
524
+ tgt = [[b' ABC ', b''],
525
+ [b'12345', b'mIXEDcASE'],
526
+ [b'123 \t 345 \0 ', b'upper']]
527
+ assert_(issubclass(self.A.swapcase().dtype.type, np.string_))
528
+ assert_array_equal(self.A.swapcase(), tgt)
529
+
530
+ tgt = [[u' \u03c3 ', u''],
531
+ [u'12345', u'mIXEDcASE'],
532
+ [u'123 \t 345 \0 ', u'upper']]
533
+ assert_(issubclass(self.B.swapcase().dtype.type, np.unicode_))
534
+ assert_array_equal(self.B.swapcase(), tgt)
535
+
536
+ def test_title(self):
537
+ tgt = [[b' Abc ', b''],
538
+ [b'12345', b'Mixedcase'],
539
+ [b'123 \t 345 \0 ', b'Upper']]
540
+ assert_(issubclass(self.A.title().dtype.type, np.string_))
541
+ assert_array_equal(self.A.title(), tgt)
542
+
543
+ tgt = [[u' \u03a3 ', u''],
544
+ [u'12345', u'Mixedcase'],
545
+ [u'123 \t 345 \0 ', u'Upper']]
546
+ assert_(issubclass(self.B.title().dtype.type, np.unicode_))
547
+ assert_array_equal(self.B.title(), tgt)
548
+
549
+ def test_upper(self):
550
+ tgt = [[b' ABC ', b''],
551
+ [b'12345', b'MIXEDCASE'],
552
+ [b'123 \t 345 \0 ', b'UPPER']]
553
+ assert_(issubclass(self.A.upper().dtype.type, np.string_))
554
+ assert_array_equal(self.A.upper(), tgt)
555
+
556
+ tgt = [[u' \u03a3 ', u''],
557
+ [u'12345', u'MIXEDCASE'],
558
+ [u'123 \t 345 \0 ', u'UPPER']]
559
+ assert_(issubclass(self.B.upper().dtype.type, np.unicode_))
560
+ assert_array_equal(self.B.upper(), tgt)
561
+
562
+ def test_isnumeric(self):
563
+
564
+ def fail():
565
+ self.A.isnumeric()
566
+
567
+ assert_raises(TypeError, fail)
568
+ assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
569
+ assert_array_equal(self.B.isnumeric(), [
570
+ [False, False], [True, False], [False, False]])
571
+
572
+ def test_isdecimal(self):
573
+
574
+ def fail():
575
+ self.A.isdecimal()
576
+
577
+ assert_raises(TypeError, fail)
578
+ assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
579
+ assert_array_equal(self.B.isdecimal(), [
580
+ [False, False], [True, False], [False, False]])
581
+
582
+
583
+ class TestOperations:
584
+ def setup_method(self):
585
+ self.A = np.array([['abc', '123'],
586
+ ['789', 'xyz']]).view(np.chararray)
587
+ self.B = np.array([['efg', '456'],
588
+ ['051', 'tuv']]).view(np.chararray)
589
+
590
+ def test_add(self):
591
+ AB = np.array([['abcefg', '123456'],
592
+ ['789051', 'xyztuv']]).view(np.chararray)
593
+ assert_array_equal(AB, (self.A + self.B))
594
+ assert_(len((self.A + self.B)[0][0]) == 6)
595
+
596
+ def test_radd(self):
597
+ QA = np.array([['qabc', 'q123'],
598
+ ['q789', 'qxyz']]).view(np.chararray)
599
+ assert_array_equal(QA, ('q' + self.A))
600
+
601
+ def test_mul(self):
602
+ A = self.A
603
+ for r in (2, 3, 5, 7, 197):
604
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
605
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
606
+
607
+ assert_array_equal(Ar, (self.A * r))
608
+
609
+ for ob in [object(), 'qrs']:
610
+ with assert_raises_regex(ValueError,
611
+ 'Can only multiply by integers'):
612
+ A*ob
613
+
614
+ def test_rmul(self):
615
+ A = self.A
616
+ for r in (2, 3, 5, 7, 197):
617
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
618
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
619
+ assert_array_equal(Ar, (r * self.A))
620
+
621
+ for ob in [object(), 'qrs']:
622
+ with assert_raises_regex(ValueError,
623
+ 'Can only multiply by integers'):
624
+ ob * A
625
+
626
+ def test_mod(self):
627
+ """Ticket #856"""
628
+ F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
629
+ C = np.array([[3, 7], [19, 1]])
630
+ FC = np.array([['3', '7.000000'],
631
+ ['19', '1']]).view(np.chararray)
632
+ assert_array_equal(FC, F % C)
633
+
634
+ A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
635
+ A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
636
+ assert_array_equal(A1, (A % 1))
637
+
638
+ A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
639
+ assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
640
+
641
+ def test_rmod(self):
642
+ assert_(("%s" % self.A) == str(self.A))
643
+ assert_(("%r" % self.A) == repr(self.A))
644
+
645
+ for ob in [42, object()]:
646
+ with assert_raises_regex(
647
+ TypeError, "unsupported operand type.* and 'chararray'"):
648
+ ob % self.A
649
+
650
+ def test_slice(self):
651
+ """Regression test for https://github.com/numpy/numpy/issues/5982"""
652
+
653
+ arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
654
+ dtype='S4').view(np.chararray)
655
+ sl1 = arr[:]
656
+ assert_array_equal(sl1, arr)
657
+ assert_(sl1.base is arr)
658
+ assert_(sl1.base.base is arr.base)
659
+
660
+ sl2 = arr[:, :]
661
+ assert_array_equal(sl2, arr)
662
+ assert_(sl2.base is arr)
663
+ assert_(sl2.base.base is arr.base)
664
+
665
+ assert_(arr[0, 0] == b'abc')
666
+
667
+
668
+ def test_empty_indexing():
669
+ """Regression test for ticket 1948."""
670
+ # Check that indexing a chararray with an empty list/array returns an
671
+ # empty chararray instead of a chararray with a single empty string in it.
672
+ s = np.chararray((4,))
673
+ assert_(s[[]].size == 0)