koichi12 commited on
Commit
cf9b4fe
·
verified ·
1 Parent(s): 2d7dca4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_add_newdocs_scalars.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_asarray.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_dtype.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_exceptions.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_internal.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_machar.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_methods.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_string_helpers.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_type_aliases.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/_ufunc_config.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/cversions.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/defchararray.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/einsumfunc.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/function_base.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/getlimits.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/memmap.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/multiarray.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/numeric.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/numerictypes.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/overrides.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/records.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/shape_base.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/umath.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/numpy/core/__pycache__/umath_tests.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/numpy/core/_multiarray_tests.cpython-311-x86_64-linux-gnu.so +3 -0
  29. .venv/lib/python3.11/site-packages/numpy/core/_multiarray_umath.cpython-311-x86_64-linux-gnu.so +3 -0
  30. .venv/lib/python3.11/site-packages/numpy/core/_simd.cpython-311-x86_64-linux-gnu.so +3 -0
  31. .venv/lib/python3.11/site-packages/numpy/core/tests/_locales.py +74 -0
  32. .venv/lib/python3.11/site-packages/numpy/core/tests/test__exceptions.py +88 -0
  33. .venv/lib/python3.11/site-packages/numpy/core/tests/test_abc.py +54 -0
  34. .venv/lib/python3.11/site-packages/numpy/core/tests/test_argparse.py +62 -0
  35. .venv/lib/python3.11/site-packages/numpy/core/tests/test_array_coercion.py +898 -0
  36. .venv/lib/python3.11/site-packages/numpy/core/tests/test_array_interface.py +219 -0
  37. .venv/lib/python3.11/site-packages/numpy/core/tests/test_arraymethod.py +85 -0
  38. .venv/lib/python3.11/site-packages/numpy/core/tests/test_arrayprint.py +1047 -0
  39. .venv/lib/python3.11/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py +154 -0
  40. .venv/lib/python3.11/site-packages/numpy/core/tests/test_cpu_features.py +404 -0
  41. .venv/lib/python3.11/site-packages/numpy/core/tests/test_custom_dtypes.py +253 -0
  42. .venv/lib/python3.11/site-packages/numpy/core/tests/test_datetime.py +0 -0
  43. .venv/lib/python3.11/site-packages/numpy/core/tests/test_defchararray.py +686 -0
  44. .venv/lib/python3.11/site-packages/numpy/core/tests/test_deprecations.py +817 -0
  45. .venv/lib/python3.11/site-packages/numpy/core/tests/test_dlpack.py +124 -0
  46. .venv/lib/python3.11/site-packages/numpy/core/tests/test_dtype.py +1906 -0
  47. .venv/lib/python3.11/site-packages/numpy/core/tests/test_einsum.py +1248 -0
  48. .venv/lib/python3.11/site-packages/numpy/core/tests/test_errstate.py +61 -0
  49. .venv/lib/python3.11/site-packages/numpy/core/tests/test_function_base.py +446 -0
  50. .venv/lib/python3.11/site-packages/numpy/core/tests/test_half.py +572 -0
.gitattributes CHANGED
@@ -369,3 +369,6 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
369
  .venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/test_random.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
370
  .venv/lib/python3.11/site-packages/numpy/random/mtrand.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
371
  .venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
369
  .venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/test_random.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
370
  .venv/lib/python3.11/site-packages/numpy/random/mtrand.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
371
  .venv/lib/python3.11/site-packages/numpy/random/tests/__pycache__/test_randomstate.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
372
+ .venv/lib/python3.11/site-packages/numpy/core/_multiarray_umath.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
373
+ .venv/lib/python3.11/site-packages/numpy/core/_multiarray_tests.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
374
+ .venv/lib/python3.11/site-packages/numpy/core/_simd.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (5.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_add_newdocs_scalars.cpython-311.pyc ADDED
Binary file (13.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_asarray.cpython-311.pyc ADDED
Binary file (4.66 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_dtype.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_dtype_ctypes.cpython-311.pyc ADDED
Binary file (5.29 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_exceptions.cpython-311.pyc ADDED
Binary file (9.42 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_internal.cpython-311.pyc ADDED
Binary file (38.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_machar.cpython-311.pyc ADDED
Binary file (12.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_methods.cpython-311.pyc ADDED
Binary file (11.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_string_helpers.cpython-311.pyc ADDED
Binary file (3.59 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_type_aliases.cpython-311.pyc ADDED
Binary file (9.19 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/_ufunc_config.cpython-311.pyc ADDED
Binary file (17.5 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/cversions.cpython-311.pyc ADDED
Binary file (719 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/defchararray.cpython-311.pyc ADDED
Binary file (88.4 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/einsumfunc.cpython-311.pyc ADDED
Binary file (54.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/function_base.cpython-311.pyc ADDED
Binary file (22.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/getlimits.cpython-311.pyc ADDED
Binary file (30 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/memmap.cpython-311.pyc ADDED
Binary file (13.5 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/multiarray.cpython-311.pyc ADDED
Binary file (57.4 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/numeric.cpython-311.pyc ADDED
Binary file (90.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/numerictypes.cpython-311.pyc ADDED
Binary file (21.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/overrides.cpython-311.pyc ADDED
Binary file (7.96 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/records.cpython-311.pyc ADDED
Binary file (44.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/shape_base.cpython-311.pyc ADDED
Binary file (34.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/umath.cpython-311.pyc ADDED
Binary file (1.81 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/core/__pycache__/umath_tests.cpython-311.pyc ADDED
Binary file (616 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/core/_multiarray_tests.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c4195087f00564c13cd7e3b5682bef7eb2059e51acc3c17450c0ee4650aeabf
3
+ size 175912
.venv/lib/python3.11/site-packages/numpy/core/_multiarray_umath.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a735e4e8355b75c800112af8a5b1731b891b2016ed57067352eea6cab0aa00bc
3
+ size 7426817
.venv/lib/python3.11/site-packages/numpy/core/_simd.cpython-311-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abff6287d5a407977dc9db05014e6548e1f54a7c4f21dee319819c49bb6bc532
3
+ size 3527040
.venv/lib/python3.11/site-packages/numpy/core/tests/_locales.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Provide class for testing in French locale
2
+
3
+ """
4
+ import sys
5
+ import locale
6
+
7
+ import pytest
8
+
9
+ __ALL__ = ['CommaDecimalPointLocale']
10
+
11
+
12
+ def find_comma_decimal_point_locale():
13
+ """See if platform has a decimal point as comma locale.
14
+
15
+ Find a locale that uses a comma instead of a period as the
16
+ decimal point.
17
+
18
+ Returns
19
+ -------
20
+ old_locale: str
21
+ Locale when the function was called.
22
+ new_locale: {str, None)
23
+ First French locale found, None if none found.
24
+
25
+ """
26
+ if sys.platform == 'win32':
27
+ locales = ['FRENCH']
28
+ else:
29
+ locales = ['fr_FR', 'fr_FR.UTF-8', 'fi_FI', 'fi_FI.UTF-8']
30
+
31
+ old_locale = locale.getlocale(locale.LC_NUMERIC)
32
+ new_locale = None
33
+ try:
34
+ for loc in locales:
35
+ try:
36
+ locale.setlocale(locale.LC_NUMERIC, loc)
37
+ new_locale = loc
38
+ break
39
+ except locale.Error:
40
+ pass
41
+ finally:
42
+ locale.setlocale(locale.LC_NUMERIC, locale=old_locale)
43
+ return old_locale, new_locale
44
+
45
+
46
+ class CommaDecimalPointLocale:
47
+ """Sets LC_NUMERIC to a locale with comma as decimal point.
48
+
49
+ Classes derived from this class have setup and teardown methods that run
50
+ tests with locale.LC_NUMERIC set to a locale where commas (',') are used as
51
+ the decimal point instead of periods ('.'). On exit the locale is restored
52
+ to the initial locale. It also serves as context manager with the same
53
+ effect. If no such locale is available, the test is skipped.
54
+
55
+ .. versionadded:: 1.15.0
56
+
57
+ """
58
+ (cur_locale, tst_locale) = find_comma_decimal_point_locale()
59
+
60
+ def setup_method(self):
61
+ if self.tst_locale is None:
62
+ pytest.skip("No French locale available")
63
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
64
+
65
+ def teardown_method(self):
66
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
67
+
68
+ def __enter__(self):
69
+ if self.tst_locale is None:
70
+ pytest.skip("No French locale available")
71
+ locale.setlocale(locale.LC_NUMERIC, locale=self.tst_locale)
72
+
73
+ def __exit__(self, type, value, traceback):
74
+ locale.setlocale(locale.LC_NUMERIC, locale=self.cur_locale)
.venv/lib/python3.11/site-packages/numpy/core/tests/test__exceptions.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
3
+ """
4
+
5
+ import pickle
6
+
7
+ import pytest
8
+ import numpy as np
9
+
10
+ _ArrayMemoryError = np.core._exceptions._ArrayMemoryError
11
+ _UFuncNoLoopError = np.core._exceptions._UFuncNoLoopError
12
+
13
+ class TestArrayMemoryError:
14
+ def test_pickling(self):
15
+ """ Test that _ArrayMemoryError can be pickled """
16
+ error = _ArrayMemoryError((1023,), np.dtype(np.uint8))
17
+ res = pickle.loads(pickle.dumps(error))
18
+ assert res._total_size == error._total_size
19
+
20
+ def test_str(self):
21
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
22
+ str(e) # not crashing is enough
23
+
24
+ # testing these properties is easier than testing the full string repr
25
+ def test__size_to_string(self):
26
+ """ Test e._size_to_string """
27
+ f = _ArrayMemoryError._size_to_string
28
+ Ki = 1024
29
+ assert f(0) == '0 bytes'
30
+ assert f(1) == '1 bytes'
31
+ assert f(1023) == '1023 bytes'
32
+ assert f(Ki) == '1.00 KiB'
33
+ assert f(Ki+1) == '1.00 KiB'
34
+ assert f(10*Ki) == '10.0 KiB'
35
+ assert f(int(999.4*Ki)) == '999. KiB'
36
+ assert f(int(1023.4*Ki)) == '1023. KiB'
37
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
38
+ assert f(Ki*Ki) == '1.00 MiB'
39
+
40
+ # 1023.9999 Mib should round to 1 GiB
41
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
42
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
43
+ # larger than sys.maxsize, adding larger prefixes isn't going to help
44
+ # anyway.
45
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
46
+
47
+ def test__total_size(self):
48
+ """ Test e._total_size """
49
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
50
+ assert e._total_size == 1
51
+
52
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
53
+ assert e._total_size == 1024
54
+
55
+
56
+ class TestUFuncNoLoopError:
57
+ def test_pickling(self):
58
+ """ Test that _UFuncNoLoopError can be pickled """
59
+ assert isinstance(pickle.dumps(_UFuncNoLoopError), bytes)
60
+
61
+
62
+ @pytest.mark.parametrize("args", [
63
+ (2, 1, None),
64
+ (2, 1, "test_prefix"),
65
+ ("test message",),
66
+ ])
67
+ class TestAxisError:
68
+ def test_attr(self, args):
69
+ """Validate attribute types."""
70
+ exc = np.AxisError(*args)
71
+ if len(args) == 1:
72
+ assert exc.axis is None
73
+ assert exc.ndim is None
74
+ else:
75
+ axis, ndim, *_ = args
76
+ assert exc.axis == axis
77
+ assert exc.ndim == ndim
78
+
79
+ def test_pickling(self, args):
80
+ """Test that `AxisError` can be pickled."""
81
+ exc = np.AxisError(*args)
82
+ exc2 = pickle.loads(pickle.dumps(exc))
83
+
84
+ assert type(exc) is type(exc2)
85
+ for name in ("axis", "ndim", "args"):
86
+ attr1 = getattr(exc, name)
87
+ attr2 = getattr(exc2, name)
88
+ assert attr1 == attr2, name
.venv/lib/python3.11/site-packages/numpy/core/tests/test_abc.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.testing import assert_
2
+
3
+ import numbers
4
+
5
+ import numpy as np
6
+ from numpy.core.numerictypes import sctypes
7
+
8
+ class TestABC:
9
+ def test_abstract(self):
10
+ assert_(issubclass(np.number, numbers.Number))
11
+
12
+ assert_(issubclass(np.inexact, numbers.Complex))
13
+ assert_(issubclass(np.complexfloating, numbers.Complex))
14
+ assert_(issubclass(np.floating, numbers.Real))
15
+
16
+ assert_(issubclass(np.integer, numbers.Integral))
17
+ assert_(issubclass(np.signedinteger, numbers.Integral))
18
+ assert_(issubclass(np.unsignedinteger, numbers.Integral))
19
+
20
+ def test_floats(self):
21
+ for t in sctypes['float']:
22
+ assert_(isinstance(t(), numbers.Real),
23
+ f"{t.__name__} is not instance of Real")
24
+ assert_(issubclass(t, numbers.Real),
25
+ f"{t.__name__} is not subclass of Real")
26
+ assert_(not isinstance(t(), numbers.Rational),
27
+ f"{t.__name__} is instance of Rational")
28
+ assert_(not issubclass(t, numbers.Rational),
29
+ f"{t.__name__} is subclass of Rational")
30
+
31
+ def test_complex(self):
32
+ for t in sctypes['complex']:
33
+ assert_(isinstance(t(), numbers.Complex),
34
+ f"{t.__name__} is not instance of Complex")
35
+ assert_(issubclass(t, numbers.Complex),
36
+ f"{t.__name__} is not subclass of Complex")
37
+ assert_(not isinstance(t(), numbers.Real),
38
+ f"{t.__name__} is instance of Real")
39
+ assert_(not issubclass(t, numbers.Real),
40
+ f"{t.__name__} is subclass of Real")
41
+
42
+ def test_int(self):
43
+ for t in sctypes['int']:
44
+ assert_(isinstance(t(), numbers.Integral),
45
+ f"{t.__name__} is not instance of Integral")
46
+ assert_(issubclass(t, numbers.Integral),
47
+ f"{t.__name__} is not subclass of Integral")
48
+
49
+ def test_uint(self):
50
+ for t in sctypes['uint']:
51
+ assert_(isinstance(t(), numbers.Integral),
52
+ f"{t.__name__} is not instance of Integral")
53
+ assert_(issubclass(t, numbers.Integral),
54
+ f"{t.__name__} is not subclass of Integral")
.venv/lib/python3.11/site-packages/numpy/core/tests/test_argparse.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for the private NumPy argument parsing functionality.
3
+ They mainly exists to ensure good test coverage without having to try the
4
+ weirder cases on actual numpy functions but test them in one place.
5
+
6
+ The test function is defined in C to be equivalent to (errors may not always
7
+ match exactly, and could be adjusted):
8
+
9
+ def func(arg1, /, arg2, *, arg3):
10
+ i = integer(arg1) # reproducing the 'i' parsing in Python.
11
+ return None
12
+ """
13
+
14
+ import pytest
15
+
16
+ import numpy as np
17
+ from numpy.core._multiarray_tests import argparse_example_function as func
18
+
19
+
20
+ def test_invalid_integers():
21
+ with pytest.raises(TypeError,
22
+ match="integer argument expected, got float"):
23
+ func(1.)
24
+ with pytest.raises(OverflowError):
25
+ func(2**100)
26
+
27
+
28
+ def test_missing_arguments():
29
+ with pytest.raises(TypeError,
30
+ match="missing required positional argument 0"):
31
+ func()
32
+ with pytest.raises(TypeError,
33
+ match="missing required positional argument 0"):
34
+ func(arg2=1, arg3=4)
35
+ with pytest.raises(TypeError,
36
+ match=r"missing required argument \'arg2\' \(pos 1\)"):
37
+ func(1, arg3=5)
38
+
39
+
40
+ def test_too_many_positional():
41
+ # the second argument is positional but can be passed as keyword.
42
+ with pytest.raises(TypeError,
43
+ match="takes from 2 to 3 positional arguments but 4 were given"):
44
+ func(1, 2, 3, 4)
45
+
46
+
47
+ def test_multiple_values():
48
+ with pytest.raises(TypeError,
49
+ match=r"given by name \('arg2'\) and position \(position 1\)"):
50
+ func(1, 2, arg2=3)
51
+
52
+
53
+ def test_string_fallbacks():
54
+ # We can (currently?) use numpy strings to test the "slow" fallbacks
55
+ # that should normally not be taken due to string interning.
56
+ arg2 = np.str_("arg2")
57
+ missing_arg = np.str_("missing_arg")
58
+ func(1, **{arg2: 3})
59
+ with pytest.raises(TypeError,
60
+ match="got an unexpected keyword argument 'missing_arg'"):
61
+ func(2, **{missing_arg: 3})
62
+
.venv/lib/python3.11/site-packages/numpy/core/tests/test_array_coercion.py ADDED
@@ -0,0 +1,898 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for array coercion, mainly through testing `np.array` results directly.
3
+ Note that other such tests exist, e.g., in `test_api.py` and many corner-cases
4
+ are tested (sometimes indirectly) elsewhere.
5
+ """
6
+
7
+ from itertools import permutations, product
8
+
9
+ import pytest
10
+ from pytest import param
11
+
12
+ import numpy as np
13
+ from numpy.core._rational_tests import rational
14
+ from numpy.core._multiarray_umath import _discover_array_parameters
15
+
16
+ from numpy.testing import (
17
+ assert_array_equal, assert_warns, IS_PYPY)
18
+
19
+
20
+ def arraylikes():
21
+ """
22
+ Generator for functions converting an array into various array-likes.
23
+ If full is True (default) it includes array-likes not capable of handling
24
+ all dtypes.
25
+ """
26
+ # base array:
27
+ def ndarray(a):
28
+ return a
29
+
30
+ yield param(ndarray, id="ndarray")
31
+
32
+ # subclass:
33
+ class MyArr(np.ndarray):
34
+ pass
35
+
36
+ def subclass(a):
37
+ return a.view(MyArr)
38
+
39
+ yield subclass
40
+
41
+ class _SequenceLike():
42
+ # Older NumPy versions, sometimes cared whether a protocol array was
43
+ # also _SequenceLike. This shouldn't matter, but keep it for now
44
+ # for __array__ and not the others.
45
+ def __len__(self):
46
+ raise TypeError
47
+
48
+ def __getitem__(self):
49
+ raise TypeError
50
+
51
+ # Array-interface
52
+ class ArrayDunder(_SequenceLike):
53
+ def __init__(self, a):
54
+ self.a = a
55
+
56
+ def __array__(self, dtype=None):
57
+ return self.a
58
+
59
+ yield param(ArrayDunder, id="__array__")
60
+
61
+ # memory-view
62
+ yield param(memoryview, id="memoryview")
63
+
64
+ # Array-interface
65
+ class ArrayInterface:
66
+ def __init__(self, a):
67
+ self.a = a # need to hold on to keep interface valid
68
+ self.__array_interface__ = a.__array_interface__
69
+
70
+ yield param(ArrayInterface, id="__array_interface__")
71
+
72
+ # Array-Struct
73
+ class ArrayStruct:
74
+ def __init__(self, a):
75
+ self.a = a # need to hold on to keep struct valid
76
+ self.__array_struct__ = a.__array_struct__
77
+
78
+ yield param(ArrayStruct, id="__array_struct__")
79
+
80
+
81
+ def scalar_instances(times=True, extended_precision=True, user_dtype=True):
82
+ # Hard-coded list of scalar instances.
83
+ # Floats:
84
+ yield param(np.sqrt(np.float16(5)), id="float16")
85
+ yield param(np.sqrt(np.float32(5)), id="float32")
86
+ yield param(np.sqrt(np.float64(5)), id="float64")
87
+ if extended_precision:
88
+ yield param(np.sqrt(np.longdouble(5)), id="longdouble")
89
+
90
+ # Complex:
91
+ yield param(np.sqrt(np.complex64(2+3j)), id="complex64")
92
+ yield param(np.sqrt(np.complex128(2+3j)), id="complex128")
93
+ if extended_precision:
94
+ yield param(np.sqrt(np.longcomplex(2+3j)), id="clongdouble")
95
+
96
+ # Bool:
97
+ # XFAIL: Bool should be added, but has some bad properties when it
98
+ # comes to strings, see also gh-9875
99
+ # yield param(np.bool_(0), id="bool")
100
+
101
+ # Integers:
102
+ yield param(np.int8(2), id="int8")
103
+ yield param(np.int16(2), id="int16")
104
+ yield param(np.int32(2), id="int32")
105
+ yield param(np.int64(2), id="int64")
106
+
107
+ yield param(np.uint8(2), id="uint8")
108
+ yield param(np.uint16(2), id="uint16")
109
+ yield param(np.uint32(2), id="uint32")
110
+ yield param(np.uint64(2), id="uint64")
111
+
112
+ # Rational:
113
+ if user_dtype:
114
+ yield param(rational(1, 2), id="rational")
115
+
116
+ # Cannot create a structured void scalar directly:
117
+ structured = np.array([(1, 3)], "i,i")[0]
118
+ assert isinstance(structured, np.void)
119
+ assert structured.dtype == np.dtype("i,i")
120
+ yield param(structured, id="structured")
121
+
122
+ if times:
123
+ # Datetimes and timedelta
124
+ yield param(np.timedelta64(2), id="timedelta64[generic]")
125
+ yield param(np.timedelta64(23, "s"), id="timedelta64[s]")
126
+ yield param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)")
127
+
128
+ yield param(np.datetime64("NaT"), id="datetime64[generic](NaT)")
129
+ yield param(np.datetime64("2020-06-07 12:43", "ms"), id="datetime64[ms]")
130
+
131
+ # Strings and unstructured void:
132
+ yield param(np.bytes_(b"1234"), id="bytes")
133
+ yield param(np.str_("2345"), id="unicode")
134
+ yield param(np.void(b"4321"), id="unstructured_void")
135
+
136
+
137
+ def is_parametric_dtype(dtype):
138
+ """Returns True if the dtype is a parametric legacy dtype (itemsize
139
+ is 0, or a datetime without units)
140
+ """
141
+ if dtype.itemsize == 0:
142
+ return True
143
+ if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
144
+ if dtype.name.endswith("64"):
145
+ # Generic time units
146
+ return True
147
+ return False
148
+
149
+
150
+ class TestStringDiscovery:
151
+ @pytest.mark.parametrize("obj",
152
+ [object(), 1.2, 10**43, None, "string"],
153
+ ids=["object", "1.2", "10**43", "None", "string"])
154
+ def test_basic_stringlength(self, obj):
155
+ length = len(str(obj))
156
+ expected = np.dtype(f"S{length}")
157
+
158
+ assert np.array(obj, dtype="S").dtype == expected
159
+ assert np.array([obj], dtype="S").dtype == expected
160
+
161
+ # A nested array is also discovered correctly
162
+ arr = np.array(obj, dtype="O")
163
+ assert np.array(arr, dtype="S").dtype == expected
164
+ # Also if we use the dtype class
165
+ assert np.array(arr, dtype=type(expected)).dtype == expected
166
+ # Check that .astype() behaves identical
167
+ assert arr.astype("S").dtype == expected
168
+ # The DType class is accepted by `.astype()`
169
+ assert arr.astype(type(np.dtype("S"))).dtype == expected
170
+
171
+ @pytest.mark.parametrize("obj",
172
+ [object(), 1.2, 10**43, None, "string"],
173
+ ids=["object", "1.2", "10**43", "None", "string"])
174
+ def test_nested_arrays_stringlength(self, obj):
175
+ length = len(str(obj))
176
+ expected = np.dtype(f"S{length}")
177
+ arr = np.array(obj, dtype="O")
178
+ assert np.array([arr, arr], dtype="S").dtype == expected
179
+
180
+ @pytest.mark.parametrize("arraylike", arraylikes())
181
+ def test_unpack_first_level(self, arraylike):
182
+ # We unpack exactly one level of array likes
183
+ obj = np.array([None])
184
+ obj[0] = np.array(1.2)
185
+ # the length of the included item, not of the float dtype
186
+ length = len(str(obj[0]))
187
+ expected = np.dtype(f"S{length}")
188
+
189
+ obj = arraylike(obj)
190
+ # casting to string usually calls str(obj)
191
+ arr = np.array([obj], dtype="S")
192
+ assert arr.shape == (1, 1)
193
+ assert arr.dtype == expected
194
+
195
+
196
+ class TestScalarDiscovery:
197
+ def test_void_special_case(self):
198
+ # Void dtypes with structures discover tuples as elements
199
+ arr = np.array((1, 2, 3), dtype="i,i,i")
200
+ assert arr.shape == ()
201
+ arr = np.array([(1, 2, 3)], dtype="i,i,i")
202
+ assert arr.shape == (1,)
203
+
204
+ def test_char_special_case(self):
205
+ arr = np.array("string", dtype="c")
206
+ assert arr.shape == (6,)
207
+ assert arr.dtype.char == "c"
208
+ arr = np.array(["string"], dtype="c")
209
+ assert arr.shape == (1, 6)
210
+ assert arr.dtype.char == "c"
211
+
212
+ def test_char_special_case_deep(self):
213
+ # Check that the character special case errors correctly if the
214
+ # array is too deep:
215
+ nested = ["string"] # 2 dimensions (due to string being sequence)
216
+ for i in range(np.MAXDIMS - 2):
217
+ nested = [nested]
218
+
219
+ arr = np.array(nested, dtype='c')
220
+ assert arr.shape == (1,) * (np.MAXDIMS - 1) + (6,)
221
+ with pytest.raises(ValueError):
222
+ np.array([nested], dtype="c")
223
+
224
+ def test_unknown_object(self):
225
+ arr = np.array(object())
226
+ assert arr.shape == ()
227
+ assert arr.dtype == np.dtype("O")
228
+
229
+ @pytest.mark.parametrize("scalar", scalar_instances())
230
+ def test_scalar(self, scalar):
231
+ arr = np.array(scalar)
232
+ assert arr.shape == ()
233
+ assert arr.dtype == scalar.dtype
234
+
235
+ arr = np.array([[scalar, scalar]])
236
+ assert arr.shape == (1, 2)
237
+ assert arr.dtype == scalar.dtype
238
+
239
+ # Additionally to string this test also runs into a corner case
240
+ # with datetime promotion (the difference is the promotion order).
241
+ @pytest.mark.filterwarnings("ignore:Promotion of numbers:FutureWarning")
242
+ def test_scalar_promotion(self):
243
+ for sc1, sc2 in product(scalar_instances(), scalar_instances()):
244
+ sc1, sc2 = sc1.values[0], sc2.values[0]
245
+ # test all combinations:
246
+ try:
247
+ arr = np.array([sc1, sc2])
248
+ except (TypeError, ValueError):
249
+ # The promotion between two times can fail
250
+ # XFAIL (ValueError): Some object casts are currently undefined
251
+ continue
252
+ assert arr.shape == (2,)
253
+ try:
254
+ dt1, dt2 = sc1.dtype, sc2.dtype
255
+ expected_dtype = np.promote_types(dt1, dt2)
256
+ assert arr.dtype == expected_dtype
257
+ except TypeError as e:
258
+ # Will currently always go to object dtype
259
+ assert arr.dtype == np.dtype("O")
260
+
261
+ @pytest.mark.parametrize("scalar", scalar_instances())
262
+ def test_scalar_coercion(self, scalar):
263
+ # This tests various scalar coercion paths, mainly for the numerical
264
+ # types. It includes some paths not directly related to `np.array`.
265
+ if isinstance(scalar, np.inexact):
266
+ # Ensure we have a full-precision number if available
267
+ scalar = type(scalar)((scalar * 2)**0.5)
268
+
269
+ if type(scalar) is rational:
270
+ # Rational generally fails due to a missing cast. In the future
271
+ # object casts should automatically be defined based on `setitem`.
272
+ pytest.xfail("Rational to object cast is undefined currently.")
273
+
274
+ # Use casting from object:
275
+ arr = np.array(scalar, dtype=object).astype(scalar.dtype)
276
+
277
+ # Test various ways to create an array containing this scalar:
278
+ arr1 = np.array(scalar).reshape(1)
279
+ arr2 = np.array([scalar])
280
+ arr3 = np.empty(1, dtype=scalar.dtype)
281
+ arr3[0] = scalar
282
+ arr4 = np.empty(1, dtype=scalar.dtype)
283
+ arr4[:] = [scalar]
284
+ # All of these methods should yield the same results
285
+ assert_array_equal(arr, arr1)
286
+ assert_array_equal(arr, arr2)
287
+ assert_array_equal(arr, arr3)
288
+ assert_array_equal(arr, arr4)
289
+
290
+ @pytest.mark.xfail(IS_PYPY, reason="`int(np.complex128(3))` fails on PyPy")
291
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
292
+ @pytest.mark.parametrize("cast_to", scalar_instances())
293
+ def test_scalar_coercion_same_as_cast_and_assignment(self, cast_to):
294
+ """
295
+ Test that in most cases:
296
+ * `np.array(scalar, dtype=dtype)`
297
+ * `np.empty((), dtype=dtype)[()] = scalar`
298
+ * `np.array(scalar).astype(dtype)`
299
+ should behave the same. The only exceptions are parametric dtypes
300
+ (mainly datetime/timedelta without unit) and void without fields.
301
+ """
302
+ dtype = cast_to.dtype # use to parametrize only the target dtype
303
+
304
+ for scalar in scalar_instances(times=False):
305
+ scalar = scalar.values[0]
306
+
307
+ if dtype.type == np.void:
308
+ if scalar.dtype.fields is not None and dtype.fields is None:
309
+ # Here, coercion to "V6" works, but the cast fails.
310
+ # Since the types are identical, SETITEM takes care of
311
+ # this, but has different rules than the cast.
312
+ with pytest.raises(TypeError):
313
+ np.array(scalar).astype(dtype)
314
+ np.array(scalar, dtype=dtype)
315
+ np.array([scalar], dtype=dtype)
316
+ continue
317
+
318
+ # The main test, we first try to use casting and if it succeeds
319
+ # continue below testing that things are the same, otherwise
320
+ # test that the alternative paths at least also fail.
321
+ try:
322
+ cast = np.array(scalar).astype(dtype)
323
+ except (TypeError, ValueError, RuntimeError):
324
+ # coercion should also raise (error type may change)
325
+ with pytest.raises(Exception):
326
+ np.array(scalar, dtype=dtype)
327
+
328
+ if (isinstance(scalar, rational) and
329
+ np.issubdtype(dtype, np.signedinteger)):
330
+ return
331
+
332
+ with pytest.raises(Exception):
333
+ np.array([scalar], dtype=dtype)
334
+ # assignment should also raise
335
+ res = np.zeros((), dtype=dtype)
336
+ with pytest.raises(Exception):
337
+ res[()] = scalar
338
+
339
+ return
340
+
341
+ # Non error path:
342
+ arr = np.array(scalar, dtype=dtype)
343
+ assert_array_equal(arr, cast)
344
+ # assignment behaves the same
345
+ ass = np.zeros((), dtype=dtype)
346
+ ass[()] = scalar
347
+ assert_array_equal(ass, cast)
348
+
349
+ @pytest.mark.parametrize("pyscalar", [10, 10.32, 10.14j, 10**100])
350
+ def test_pyscalar_subclasses(self, pyscalar):
351
+ """NumPy arrays are read/write which means that anything but invariant
352
+ behaviour is on thin ice. However, we currently are happy to discover
353
+ subclasses of Python float, int, complex the same as the base classes.
354
+ This should potentially be deprecated.
355
+ """
356
+ class MyScalar(type(pyscalar)):
357
+ pass
358
+
359
+ res = np.array(MyScalar(pyscalar))
360
+ expected = np.array(pyscalar)
361
+ assert_array_equal(res, expected)
362
+
363
+ @pytest.mark.parametrize("dtype_char", np.typecodes["All"])
364
+ def test_default_dtype_instance(self, dtype_char):
365
+ if dtype_char in "SU":
366
+ dtype = np.dtype(dtype_char + "1")
367
+ elif dtype_char == "V":
368
+ # Legacy behaviour was to use V8. The reason was float64 being the
369
+ # default dtype and that having 8 bytes.
370
+ dtype = np.dtype("V8")
371
+ else:
372
+ dtype = np.dtype(dtype_char)
373
+
374
+ discovered_dtype, _ = _discover_array_parameters([], type(dtype))
375
+
376
+ assert discovered_dtype == dtype
377
+ assert discovered_dtype.itemsize == dtype.itemsize
378
+
379
+ @pytest.mark.parametrize("dtype", np.typecodes["Integer"])
380
+ @pytest.mark.parametrize(["scalar", "error"],
381
+ [(np.float64(np.nan), ValueError),
382
+ (np.array(-1).astype(np.ulonglong)[()], OverflowError)])
383
+ def test_scalar_to_int_coerce_does_not_cast(self, dtype, scalar, error):
384
+ """
385
+ Signed integers are currently different in that they do not cast other
386
+ NumPy scalar, but instead use scalar.__int__(). The hardcoded
387
+ exception to this rule is `np.array(scalar, dtype=integer)`.
388
+ """
389
+ dtype = np.dtype(dtype)
390
+
391
+ # This is a special case using casting logic. It warns for the NaN
392
+ # but allows the cast (giving undefined behaviour).
393
+ with np.errstate(invalid="ignore"):
394
+ coerced = np.array(scalar, dtype=dtype)
395
+ cast = np.array(scalar).astype(dtype)
396
+ assert_array_equal(coerced, cast)
397
+
398
+ # However these fail:
399
+ with pytest.raises(error):
400
+ np.array([scalar], dtype=dtype)
401
+ with pytest.raises(error):
402
+ cast[()] = scalar
403
+
404
+
405
+ class TestTimeScalars:
406
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
407
+ @pytest.mark.parametrize("scalar",
408
+ [param(np.timedelta64("NaT", "s"), id="timedelta64[s](NaT)"),
409
+ param(np.timedelta64(123, "s"), id="timedelta64[s]"),
410
+ param(np.datetime64("NaT", "generic"), id="datetime64[generic](NaT)"),
411
+ param(np.datetime64(1, "D"), id="datetime64[D]")],)
412
+ def test_coercion_basic(self, dtype, scalar):
413
+ # Note the `[scalar]` is there because np.array(scalar) uses stricter
414
+ # `scalar.__int__()` rules for backward compatibility right now.
415
+ arr = np.array(scalar, dtype=dtype)
416
+ cast = np.array(scalar).astype(dtype)
417
+ assert_array_equal(arr, cast)
418
+
419
+ ass = np.ones((), dtype=dtype)
420
+ if issubclass(dtype, np.integer):
421
+ with pytest.raises(TypeError):
422
+ # raises, as would np.array([scalar], dtype=dtype), this is
423
+ # conversion from times, but behaviour of integers.
424
+ ass[()] = scalar
425
+ else:
426
+ ass[()] = scalar
427
+ assert_array_equal(ass, cast)
428
+
429
+ @pytest.mark.parametrize("dtype", [np.int64, np.float32])
430
+ @pytest.mark.parametrize("scalar",
431
+ [param(np.timedelta64(123, "ns"), id="timedelta64[ns]"),
432
+ param(np.timedelta64(12, "generic"), id="timedelta64[generic]")])
433
+ def test_coercion_timedelta_convert_to_number(self, dtype, scalar):
434
+ # Only "ns" and "generic" timedeltas can be converted to numbers
435
+ # so these are slightly special.
436
+ arr = np.array(scalar, dtype=dtype)
437
+ cast = np.array(scalar).astype(dtype)
438
+ ass = np.ones((), dtype=dtype)
439
+ ass[()] = scalar # raises, as would np.array([scalar], dtype=dtype)
440
+
441
+ assert_array_equal(arr, cast)
442
+ assert_array_equal(cast, cast)
443
+
444
+ @pytest.mark.parametrize("dtype", ["S6", "U6"])
445
+ @pytest.mark.parametrize(["val", "unit"],
446
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
447
+ def test_coercion_assignment_datetime(self, val, unit, dtype):
448
+ # String from datetime64 assignment is currently special cased to
449
+ # never use casting. This is because casting will error in this
450
+ # case, and traditionally in most cases the behaviour is maintained
451
+ # like this. (`np.array(scalar, dtype="U6")` would have failed before)
452
+ # TODO: This discrepancy _should_ be resolved, either by relaxing the
453
+ # cast, or by deprecating the first part.
454
+ scalar = np.datetime64(val, unit)
455
+ dtype = np.dtype(dtype)
456
+ cut_string = dtype.type(str(scalar)[:6])
457
+
458
+ arr = np.array(scalar, dtype=dtype)
459
+ assert arr[()] == cut_string
460
+ ass = np.ones((), dtype=dtype)
461
+ ass[()] = scalar
462
+ assert ass[()] == cut_string
463
+
464
+ with pytest.raises(RuntimeError):
465
+ # However, unlike the above assignment using `str(scalar)[:6]`
466
+ # due to being handled by the string DType and not be casting
467
+ # the explicit cast fails:
468
+ np.array(scalar).astype(dtype)
469
+
470
+
471
+ @pytest.mark.parametrize(["val", "unit"],
472
+ [param(123, "s", id="[s]"), param(123, "D", id="[D]")])
473
+ def test_coercion_assignment_timedelta(self, val, unit):
474
+ scalar = np.timedelta64(val, unit)
475
+
476
+ # Unlike datetime64, timedelta allows the unsafe cast:
477
+ np.array(scalar, dtype="S6")
478
+ cast = np.array(scalar).astype("S6")
479
+ ass = np.ones((), dtype="S6")
480
+ ass[()] = scalar
481
+ expected = scalar.astype("S")[:6]
482
+ assert cast[()] == expected
483
+ assert ass[()] == expected
484
+
485
+ class TestNested:
486
+ def test_nested_simple(self):
487
+ initial = [1.2]
488
+ nested = initial
489
+ for i in range(np.MAXDIMS - 1):
490
+ nested = [nested]
491
+
492
+ arr = np.array(nested, dtype="float64")
493
+ assert arr.shape == (1,) * np.MAXDIMS
494
+ with pytest.raises(ValueError):
495
+ np.array([nested], dtype="float64")
496
+
497
+ with pytest.raises(ValueError, match=".*would exceed the maximum"):
498
+ np.array([nested]) # user must ask for `object` explicitly
499
+
500
+ arr = np.array([nested], dtype=object)
501
+ assert arr.dtype == np.dtype("O")
502
+ assert arr.shape == (1,) * np.MAXDIMS
503
+ assert arr.item() is initial
504
+
505
+ def test_pathological_self_containing(self):
506
+ # Test that this also works for two nested sequences
507
+ l = []
508
+ l.append(l)
509
+ arr = np.array([l, l, l], dtype=object)
510
+ assert arr.shape == (3,) + (1,) * (np.MAXDIMS - 1)
511
+
512
+ # Also check a ragged case:
513
+ arr = np.array([l, [None], l], dtype=object)
514
+ assert arr.shape == (3, 1)
515
+
516
+ @pytest.mark.parametrize("arraylike", arraylikes())
517
+ def test_nested_arraylikes(self, arraylike):
518
+ # We try storing an array like into an array, but the array-like
519
+ # will have too many dimensions. This means the shape discovery
520
+ # decides that the array-like must be treated as an object (a special
521
+ # case of ragged discovery). The result will be an array with one
522
+ # dimension less than the maximum dimensions, and the array being
523
+ # assigned to it (which does work for object or if `float(arraylike)`
524
+ # works).
525
+ initial = arraylike(np.ones((1, 1)))
526
+
527
+ nested = initial
528
+ for i in range(np.MAXDIMS - 1):
529
+ nested = [nested]
530
+
531
+ with pytest.raises(ValueError, match=".*would exceed the maximum"):
532
+ # It will refuse to assign the array into
533
+ np.array(nested, dtype="float64")
534
+
535
+ # If this is object, we end up assigning a (1, 1) array into (1,)
536
+ # (due to running out of dimensions), this is currently supported but
537
+ # a special case which is not ideal.
538
+ arr = np.array(nested, dtype=object)
539
+ assert arr.shape == (1,) * np.MAXDIMS
540
+ assert arr.item() == np.array(initial).item()
541
+
542
+ @pytest.mark.parametrize("arraylike", arraylikes())
543
+ def test_uneven_depth_ragged(self, arraylike):
544
+ arr = np.arange(4).reshape((2, 2))
545
+ arr = arraylike(arr)
546
+
547
+ # Array is ragged in the second dimension already:
548
+ out = np.array([arr, [arr]], dtype=object)
549
+ assert out.shape == (2,)
550
+ assert out[0] is arr
551
+ assert type(out[1]) is list
552
+
553
+ # Array is ragged in the third dimension:
554
+ with pytest.raises(ValueError):
555
+ # This is a broadcast error during assignment, because
556
+ # the array shape would be (2, 2, 2) but `arr[0, 0] = arr` fails.
557
+ np.array([arr, [arr, arr]], dtype=object)
558
+
559
+ def test_empty_sequence(self):
560
+ arr = np.array([[], [1], [[1]]], dtype=object)
561
+ assert arr.shape == (3,)
562
+
563
+ # The empty sequence stops further dimension discovery, so the
564
+ # result shape will be (0,) which leads to an error during:
565
+ with pytest.raises(ValueError):
566
+ np.array([[], np.empty((0, 1))], dtype=object)
567
+
568
+ def test_array_of_different_depths(self):
569
+ # When multiple arrays (or array-likes) are included in a
570
+ # sequences and have different depth, we currently discover
571
+ # as many dimensions as they share. (see also gh-17224)
572
+ arr = np.zeros((3, 2))
573
+ mismatch_first_dim = np.zeros((1, 2))
574
+ mismatch_second_dim = np.zeros((3, 3))
575
+
576
+ dtype, shape = _discover_array_parameters(
577
+ [arr, mismatch_second_dim], dtype=np.dtype("O"))
578
+ assert shape == (2, 3)
579
+
580
+ dtype, shape = _discover_array_parameters(
581
+ [arr, mismatch_first_dim], dtype=np.dtype("O"))
582
+ assert shape == (2,)
583
+ # The second case is currently supported because the arrays
584
+ # can be stored as objects:
585
+ res = np.asarray([arr, mismatch_first_dim], dtype=np.dtype("O"))
586
+ assert res[0] is arr
587
+ assert res[1] is mismatch_first_dim
588
+
589
+
590
+ class TestBadSequences:
591
+ # These are tests for bad objects passed into `np.array`, in general
592
+ # these have undefined behaviour. In the old code they partially worked
593
+ # when now they will fail. We could (and maybe should) create a copy
594
+ # of all sequences to be safe against bad-actors.
595
+
596
+ def test_growing_list(self):
597
+ # List to coerce, `mylist` will append to it during coercion
598
+ obj = []
599
+ class mylist(list):
600
+ def __len__(self):
601
+ obj.append([1, 2])
602
+ return super().__len__()
603
+
604
+ obj.append(mylist([1, 2]))
605
+
606
+ with pytest.raises(RuntimeError):
607
+ np.array(obj)
608
+
609
+ # Note: We do not test a shrinking list. These do very evil things
610
+ # and the only way to fix them would be to copy all sequences.
611
+ # (which may be a real option in the future).
612
+
613
+ def test_mutated_list(self):
614
+ # List to coerce, `mylist` will mutate the first element
615
+ obj = []
616
+ class mylist(list):
617
+ def __len__(self):
618
+ obj[0] = [2, 3] # replace with a different list.
619
+ return super().__len__()
620
+
621
+ obj.append([2, 3])
622
+ obj.append(mylist([1, 2]))
623
+ # Does not crash:
624
+ np.array(obj)
625
+
626
+ def test_replace_0d_array(self):
627
+ # List to coerce, `mylist` will mutate the first element
628
+ obj = []
629
+ class baditem:
630
+ def __len__(self):
631
+ obj[0][0] = 2 # replace with a different list.
632
+ raise ValueError("not actually a sequence!")
633
+
634
+ def __getitem__(self):
635
+ pass
636
+
637
+ # Runs into a corner case in the new code, the `array(2)` is cached
638
+ # so replacing it invalidates the cache.
639
+ obj.append([np.array(2), baditem()])
640
+ with pytest.raises(RuntimeError):
641
+ np.array(obj)
642
+
643
+
644
+ class TestArrayLikes:
645
+ @pytest.mark.parametrize("arraylike", arraylikes())
646
+ def test_0d_object_special_case(self, arraylike):
647
+ arr = np.array(0.)
648
+ obj = arraylike(arr)
649
+ # A single array-like is always converted:
650
+ res = np.array(obj, dtype=object)
651
+ assert_array_equal(arr, res)
652
+
653
+ # But a single 0-D nested array-like never:
654
+ res = np.array([obj], dtype=object)
655
+ assert res[0] is obj
656
+
657
+ @pytest.mark.parametrize("arraylike", arraylikes())
658
+ @pytest.mark.parametrize("arr", [np.array(0.), np.arange(4)])
659
+ def test_object_assignment_special_case(self, arraylike, arr):
660
+ obj = arraylike(arr)
661
+ empty = np.arange(1, dtype=object)
662
+ empty[:] = [obj]
663
+ assert empty[0] is obj
664
+
665
+ def test_0d_generic_special_case(self):
666
+ class ArraySubclass(np.ndarray):
667
+ def __float__(self):
668
+ raise TypeError("e.g. quantities raise on this")
669
+
670
+ arr = np.array(0.)
671
+ obj = arr.view(ArraySubclass)
672
+ res = np.array(obj)
673
+ # The subclass is simply cast:
674
+ assert_array_equal(arr, res)
675
+
676
+ # If the 0-D array-like is included, __float__ is currently
677
+ # guaranteed to be used. We may want to change that, quantities
678
+ # and masked arrays half make use of this.
679
+ with pytest.raises(TypeError):
680
+ np.array([obj])
681
+
682
+ # The same holds for memoryview:
683
+ obj = memoryview(arr)
684
+ res = np.array(obj)
685
+ assert_array_equal(arr, res)
686
+ with pytest.raises(ValueError):
687
+ # The error type does not matter much here.
688
+ np.array([obj])
689
+
690
+ def test_arraylike_classes(self):
691
+ # The classes of array-likes should generally be acceptable to be
692
+ # stored inside a numpy (object) array. This tests all of the
693
+ # special attributes (since all are checked during coercion).
694
+ arr = np.array(np.int64)
695
+ assert arr[()] is np.int64
696
+ arr = np.array([np.int64])
697
+ assert arr[0] is np.int64
698
+
699
+ # This also works for properties/unbound methods:
700
+ class ArrayLike:
701
+ @property
702
+ def __array_interface__(self):
703
+ pass
704
+
705
+ @property
706
+ def __array_struct__(self):
707
+ pass
708
+
709
+ def __array__(self):
710
+ pass
711
+
712
+ arr = np.array(ArrayLike)
713
+ assert arr[()] is ArrayLike
714
+ arr = np.array([ArrayLike])
715
+ assert arr[0] is ArrayLike
716
+
717
+ @pytest.mark.skipif(
718
+ np.dtype(np.intp).itemsize < 8, reason="Needs 64bit platform")
719
+ def test_too_large_array_error_paths(self):
720
+ """Test the error paths, including for memory leaks"""
721
+ arr = np.array(0, dtype="uint8")
722
+ # Guarantees that a contiguous copy won't work:
723
+ arr = np.broadcast_to(arr, 2**62)
724
+
725
+ for i in range(5):
726
+ # repeat, to ensure caching cannot have an effect:
727
+ with pytest.raises(MemoryError):
728
+ np.array(arr)
729
+ with pytest.raises(MemoryError):
730
+ np.array([arr])
731
+
732
+ @pytest.mark.parametrize("attribute",
733
+ ["__array_interface__", "__array__", "__array_struct__"])
734
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
735
+ def test_bad_array_like_attributes(self, attribute, error):
736
+ # RecursionError and MemoryError are considered fatal. All errors
737
+ # (except AttributeError) should probably be raised in the future,
738
+ # but shapely made use of it, so it will require a deprecation.
739
+
740
+ class BadInterface:
741
+ def __getattr__(self, attr):
742
+ if attr == attribute:
743
+ raise error
744
+ super().__getattr__(attr)
745
+
746
+ with pytest.raises(error):
747
+ np.array(BadInterface())
748
+
749
+ @pytest.mark.parametrize("error", [RecursionError, MemoryError])
750
+ def test_bad_array_like_bad_length(self, error):
751
+ # RecursionError and MemoryError are considered "critical" in
752
+ # sequences. We could expand this more generally though. (NumPy 1.20)
753
+ class BadSequence:
754
+ def __len__(self):
755
+ raise error
756
+ def __getitem__(self):
757
+ # must have getitem to be a Sequence
758
+ return 1
759
+
760
+ with pytest.raises(error):
761
+ np.array(BadSequence())
762
+
763
+
764
+ class TestAsArray:
765
+ """Test expected behaviors of ``asarray``."""
766
+
767
+ def test_dtype_identity(self):
768
+ """Confirm the intended behavior for *dtype* kwarg.
769
+
770
+ The result of ``asarray()`` should have the dtype provided through the
771
+ keyword argument, when used. This forces unique array handles to be
772
+ produced for unique np.dtype objects, but (for equivalent dtypes), the
773
+ underlying data (the base object) is shared with the original array
774
+ object.
775
+
776
+ Ref https://github.com/numpy/numpy/issues/1468
777
+ """
778
+ int_array = np.array([1, 2, 3], dtype='i')
779
+ assert np.asarray(int_array) is int_array
780
+
781
+ # The character code resolves to the singleton dtype object provided
782
+ # by the numpy package.
783
+ assert np.asarray(int_array, dtype='i') is int_array
784
+
785
+ # Derive a dtype from n.dtype('i'), but add a metadata object to force
786
+ # the dtype to be distinct.
787
+ unequal_type = np.dtype('i', metadata={'spam': True})
788
+ annotated_int_array = np.asarray(int_array, dtype=unequal_type)
789
+ assert annotated_int_array is not int_array
790
+ assert annotated_int_array.base is int_array
791
+ # Create an equivalent descriptor with a new and distinct dtype
792
+ # instance.
793
+ equivalent_requirement = np.dtype('i', metadata={'spam': True})
794
+ annotated_int_array_alt = np.asarray(annotated_int_array,
795
+ dtype=equivalent_requirement)
796
+ assert unequal_type == equivalent_requirement
797
+ assert unequal_type is not equivalent_requirement
798
+ assert annotated_int_array_alt is not annotated_int_array
799
+ assert annotated_int_array_alt.dtype is equivalent_requirement
800
+
801
+ # Check the same logic for a pair of C types whose equivalence may vary
802
+ # between computing environments.
803
+ # Find an equivalent pair.
804
+ integer_type_codes = ('i', 'l', 'q')
805
+ integer_dtypes = [np.dtype(code) for code in integer_type_codes]
806
+ typeA = None
807
+ typeB = None
808
+ for typeA, typeB in permutations(integer_dtypes, r=2):
809
+ if typeA == typeB:
810
+ assert typeA is not typeB
811
+ break
812
+ assert isinstance(typeA, np.dtype) and isinstance(typeB, np.dtype)
813
+
814
+ # These ``asarray()`` calls may produce a new view or a copy,
815
+ # but never the same object.
816
+ long_int_array = np.asarray(int_array, dtype='l')
817
+ long_long_int_array = np.asarray(int_array, dtype='q')
818
+ assert long_int_array is not int_array
819
+ assert long_long_int_array is not int_array
820
+ assert np.asarray(long_int_array, dtype='q') is not long_int_array
821
+ array_a = np.asarray(int_array, dtype=typeA)
822
+ assert typeA == typeB
823
+ assert typeA is not typeB
824
+ assert array_a.dtype is typeA
825
+ assert array_a is not np.asarray(array_a, dtype=typeB)
826
+ assert np.asarray(array_a, dtype=typeB).dtype is typeB
827
+ assert array_a is np.asarray(array_a, dtype=typeB).base
828
+
829
+
830
+ class TestSpecialAttributeLookupFailure:
831
+ # An exception was raised while fetching the attribute
832
+
833
+ class WeirdArrayLike:
834
+ @property
835
+ def __array__(self):
836
+ raise RuntimeError("oops!")
837
+
838
+ class WeirdArrayInterface:
839
+ @property
840
+ def __array_interface__(self):
841
+ raise RuntimeError("oops!")
842
+
843
+ def test_deprecated(self):
844
+ with pytest.raises(RuntimeError):
845
+ np.array(self.WeirdArrayLike())
846
+ with pytest.raises(RuntimeError):
847
+ np.array(self.WeirdArrayInterface())
848
+
849
+
850
+ def test_subarray_from_array_construction():
851
+ # Arrays are more complex, since they "broadcast" on success:
852
+ arr = np.array([1, 2])
853
+
854
+ res = arr.astype("(2)i,")
855
+ assert_array_equal(res, [[1, 1], [2, 2]])
856
+
857
+ res = np.array(arr, dtype="(2)i,")
858
+
859
+ assert_array_equal(res, [[1, 1], [2, 2]])
860
+
861
+ res = np.array([[(1,), (2,)], arr], dtype="(2)i,")
862
+ assert_array_equal(res, [[[1, 1], [2, 2]], [[1, 1], [2, 2]]])
863
+
864
+ # Also try a multi-dimensional example:
865
+ arr = np.arange(5 * 2).reshape(5, 2)
866
+ expected = np.broadcast_to(arr[:, :, np.newaxis, np.newaxis], (5, 2, 2, 2))
867
+
868
+ res = arr.astype("(2,2)f")
869
+ assert_array_equal(res, expected)
870
+
871
+ res = np.array(arr, dtype="(2,2)f")
872
+ assert_array_equal(res, expected)
873
+
874
+
875
+ def test_empty_string():
876
+ # Empty strings are unfortunately often converted to S1 and we need to
877
+ # make sure we are filling the S1 and not the (possibly) detected S0
878
+ # result. This should likely just return S0 and if not maybe the decision
879
+ # to return S1 should be moved.
880
+ res = np.array([""] * 10, dtype="S")
881
+ assert_array_equal(res, np.array("\0", "S1"))
882
+ assert res.dtype == "S1"
883
+
884
+ arr = np.array([""] * 10, dtype=object)
885
+
886
+ res = arr.astype("S")
887
+ assert_array_equal(res, b"")
888
+ assert res.dtype == "S1"
889
+
890
+ res = np.array(arr, dtype="S")
891
+ assert_array_equal(res, b"")
892
+ # TODO: This is arguably weird/wrong, but seems old:
893
+ assert res.dtype == f"S{np.dtype('O').itemsize}"
894
+
895
+ res = np.array([[""] * 10, arr], dtype="S")
896
+ assert_array_equal(res, b"")
897
+ assert res.shape == (2, 10)
898
+ assert res.dtype == "S1"
.venv/lib/python3.11/site-packages/numpy/core/tests/test_array_interface.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pytest
3
+ import numpy as np
4
+ from numpy.testing import extbuild
5
+
6
+
7
+ @pytest.fixture
8
+ def get_module(tmp_path):
9
+ """ Some codes to generate data and manage temporary buffers use when
10
+ sharing with numpy via the array interface protocol.
11
+ """
12
+
13
+ if not sys.platform.startswith('linux'):
14
+ pytest.skip('link fails on cygwin')
15
+
16
+ prologue = '''
17
+ #include <Python.h>
18
+ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
19
+ #include <numpy/arrayobject.h>
20
+ #include <stdio.h>
21
+ #include <math.h>
22
+
23
+ NPY_NO_EXPORT
24
+ void delete_array_struct(PyObject *cap) {
25
+
26
+ /* get the array interface structure */
27
+ PyArrayInterface *inter = (PyArrayInterface*)
28
+ PyCapsule_GetPointer(cap, NULL);
29
+
30
+ /* get the buffer by which data was shared */
31
+ double *ptr = (double*)PyCapsule_GetContext(cap);
32
+
33
+ /* for the purposes of the regression test set the elements
34
+ to nan */
35
+ for (npy_intp i = 0; i < inter->shape[0]; ++i)
36
+ ptr[i] = nan("");
37
+
38
+ /* free the shared buffer */
39
+ free(ptr);
40
+
41
+ /* free the array interface structure */
42
+ free(inter->shape);
43
+ free(inter);
44
+
45
+ fprintf(stderr, "delete_array_struct\\ncap = %ld inter = %ld"
46
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)ptr);
47
+ }
48
+ '''
49
+
50
+ functions = [
51
+ ("new_array_struct", "METH_VARARGS", """
52
+
53
+ long long n_elem = 0;
54
+ double value = 0.0;
55
+
56
+ if (!PyArg_ParseTuple(args, "Ld", &n_elem, &value)) {
57
+ Py_RETURN_NONE;
58
+ }
59
+
60
+ /* allocate and initialize the data to share with numpy */
61
+ long long n_bytes = n_elem*sizeof(double);
62
+ double *data = (double*)malloc(n_bytes);
63
+
64
+ if (!data) {
65
+ PyErr_Format(PyExc_MemoryError,
66
+ "Failed to malloc %lld bytes", n_bytes);
67
+
68
+ Py_RETURN_NONE;
69
+ }
70
+
71
+ for (long long i = 0; i < n_elem; ++i) {
72
+ data[i] = value;
73
+ }
74
+
75
+ /* calculate the shape and stride */
76
+ int nd = 1;
77
+
78
+ npy_intp *ss = (npy_intp*)malloc(2*nd*sizeof(npy_intp));
79
+ npy_intp *shape = ss;
80
+ npy_intp *stride = ss + nd;
81
+
82
+ shape[0] = n_elem;
83
+ stride[0] = sizeof(double);
84
+
85
+ /* construct the array interface */
86
+ PyArrayInterface *inter = (PyArrayInterface*)
87
+ malloc(sizeof(PyArrayInterface));
88
+
89
+ memset(inter, 0, sizeof(PyArrayInterface));
90
+
91
+ inter->two = 2;
92
+ inter->nd = nd;
93
+ inter->typekind = 'f';
94
+ inter->itemsize = sizeof(double);
95
+ inter->shape = shape;
96
+ inter->strides = stride;
97
+ inter->data = data;
98
+ inter->flags = NPY_ARRAY_WRITEABLE | NPY_ARRAY_NOTSWAPPED |
99
+ NPY_ARRAY_ALIGNED | NPY_ARRAY_C_CONTIGUOUS;
100
+
101
+ /* package into a capsule */
102
+ PyObject *cap = PyCapsule_New(inter, NULL, delete_array_struct);
103
+
104
+ /* save the pointer to the data */
105
+ PyCapsule_SetContext(cap, data);
106
+
107
+ fprintf(stderr, "new_array_struct\\ncap = %ld inter = %ld"
108
+ " ptr = %ld\\n", (long)cap, (long)inter, (long)data);
109
+
110
+ return cap;
111
+ """)
112
+ ]
113
+
114
+ more_init = "import_array();"
115
+
116
+ try:
117
+ import array_interface_testing
118
+ return array_interface_testing
119
+ except ImportError:
120
+ pass
121
+
122
+ # if it does not exist, build and load it
123
+ return extbuild.build_and_import_extension('array_interface_testing',
124
+ functions,
125
+ prologue=prologue,
126
+ include_dirs=[np.get_include()],
127
+ build_dir=tmp_path,
128
+ more_init=more_init)
129
+
130
+
131
+ # FIXME: numpy.testing.extbuild uses `numpy.distutils`, so this won't work on
132
+ # Python 3.12 and up.
133
+ @pytest.mark.skipif(sys.version_info >= (3, 12), reason="no numpy.distutils")
134
+ @pytest.mark.slow
135
+ def test_cstruct(get_module):
136
+
137
+ class data_source:
138
+ """
139
+ This class is for testing the timing of the PyCapsule destructor
140
+ invoked when numpy release its reference to the shared data as part of
141
+ the numpy array interface protocol. If the PyCapsule destructor is
142
+ called early the shared data is freed and invalid memory accesses will
143
+ occur.
144
+ """
145
+
146
+ def __init__(self, size, value):
147
+ self.size = size
148
+ self.value = value
149
+
150
+ @property
151
+ def __array_struct__(self):
152
+ return get_module.new_array_struct(self.size, self.value)
153
+
154
+ # write to the same stream as the C code
155
+ stderr = sys.__stderr__
156
+
157
+ # used to validate the shared data.
158
+ expected_value = -3.1415
159
+ multiplier = -10000.0
160
+
161
+ # create some data to share with numpy via the array interface
162
+ # assign the data an expected value.
163
+ stderr.write(' ---- create an object to share data ---- \n')
164
+ buf = data_source(256, expected_value)
165
+ stderr.write(' ---- OK!\n\n')
166
+
167
+ # share the data
168
+ stderr.write(' ---- share data via the array interface protocol ---- \n')
169
+ arr = np.array(buf, copy=False)
170
+ stderr.write('arr.__array_interface___ = %s\n' % (
171
+ str(arr.__array_interface__)))
172
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
173
+ stderr.write(' ---- OK!\n\n')
174
+
175
+ # release the source of the shared data. this will not release the data
176
+ # that was shared with numpy, that is done in the PyCapsule destructor.
177
+ stderr.write(' ---- destroy the object that shared data ---- \n')
178
+ buf = None
179
+ stderr.write(' ---- OK!\n\n')
180
+
181
+ # check that we got the expected data. If the PyCapsule destructor we
182
+ # defined was prematurely called then this test will fail because our
183
+ # destructor sets the elements of the array to NaN before free'ing the
184
+ # buffer. Reading the values here may also cause a SEGV
185
+ assert np.allclose(arr, expected_value)
186
+
187
+ # read the data. If the PyCapsule destructor we defined was prematurely
188
+ # called then reading the values here may cause a SEGV and will be reported
189
+ # as invalid reads by valgrind
190
+ stderr.write(' ---- read shared data ---- \n')
191
+ stderr.write('arr = %s\n' % (str(arr)))
192
+ stderr.write(' ---- OK!\n\n')
193
+
194
+ # write to the shared buffer. If the shared data was prematurely deleted
195
+ # this will may cause a SEGV and valgrind will report invalid writes
196
+ stderr.write(' ---- modify shared data ---- \n')
197
+ arr *= multiplier
198
+ expected_value *= multiplier
199
+ stderr.write('arr.__array_interface___ = %s\n' % (
200
+ str(arr.__array_interface__)))
201
+ stderr.write('arr.base = %s\n' % (str(arr.base)))
202
+ stderr.write(' ---- OK!\n\n')
203
+
204
+ # read the data. If the shared data was prematurely deleted this
205
+ # will may cause a SEGV and valgrind will report invalid reads
206
+ stderr.write(' ---- read modified shared data ---- \n')
207
+ stderr.write('arr = %s\n' % (str(arr)))
208
+ stderr.write(' ---- OK!\n\n')
209
+
210
+ # check that we got the expected data. If the PyCapsule destructor we
211
+ # defined was prematurely called then this test will fail because our
212
+ # destructor sets the elements of the array to NaN before free'ing the
213
+ # buffer. Reading the values here may also cause a SEGV
214
+ assert np.allclose(arr, expected_value)
215
+
216
+ # free the shared data, the PyCapsule destructor should run here
217
+ stderr.write(' ---- free shared data ---- \n')
218
+ arr = None
219
+ stderr.write(' ---- OK!\n\n')
.venv/lib/python3.11/site-packages/numpy/core/tests/test_arraymethod.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file tests the generic aspects of ArrayMethod. At the time of writing
3
+ this is private API, but when added, public API may be added here.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ import sys
9
+ import types
10
+ from typing import Any
11
+
12
+ import pytest
13
+
14
+ import numpy as np
15
+ from numpy.core._multiarray_umath import _get_castingimpl as get_castingimpl
16
+
17
+
18
+ class TestResolveDescriptors:
19
+ # Test mainly error paths of the resolve_descriptors function,
20
+ # note that the `casting_unittests` tests exercise this non-error paths.
21
+
22
+ # Casting implementations are the main/only current user:
23
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
24
+
25
+ @pytest.mark.parametrize("args", [
26
+ (True,), # Not a tuple.
27
+ ((None,)), # Too few elements
28
+ ((None, None, None),), # Too many
29
+ ((None, None),), # Input dtype is None, which is invalid.
30
+ ((np.dtype("d"), True),), # Output dtype is not a dtype
31
+ ((np.dtype("f"), None),), # Input dtype does not match method
32
+ ])
33
+ def test_invalid_arguments(self, args):
34
+ with pytest.raises(TypeError):
35
+ self.method._resolve_descriptors(*args)
36
+
37
+
38
+ class TestSimpleStridedCall:
39
+ # Test mainly error paths of the resolve_descriptors function,
40
+ # note that the `casting_unittests` tests exercise this non-error paths.
41
+
42
+ # Casting implementations are the main/only current user:
43
+ method = get_castingimpl(type(np.dtype("d")), type(np.dtype("f")))
44
+
45
+ @pytest.mark.parametrize(["args", "error"], [
46
+ ((True,), TypeError), # Not a tuple
47
+ (((None,),), TypeError), # Too few elements
48
+ ((None, None), TypeError), # Inputs are not arrays.
49
+ (((None, None, None),), TypeError), # Too many
50
+ (((np.arange(3), np.arange(3)),), TypeError), # Incorrect dtypes
51
+ (((np.ones(3, dtype=">d"), np.ones(3, dtype="<f")),),
52
+ TypeError), # Does not support byte-swapping
53
+ (((np.ones((2, 2), dtype="d"), np.ones((2, 2), dtype="f")),),
54
+ ValueError), # not 1-D
55
+ (((np.ones(3, dtype="d"), np.ones(4, dtype="f")),),
56
+ ValueError), # different length
57
+ (((np.frombuffer(b"\0x00"*3*2, dtype="d"),
58
+ np.frombuffer(b"\0x00"*3, dtype="f")),),
59
+ ValueError), # output not writeable
60
+ ])
61
+ def test_invalid_arguments(self, args, error):
62
+ # This is private API, which may be modified freely
63
+ with pytest.raises(error):
64
+ self.method._simple_strided_call(*args)
65
+
66
+
67
+ @pytest.mark.parametrize(
68
+ "cls", [np.ndarray, np.recarray, np.chararray, np.matrix, np.memmap]
69
+ )
70
+ class TestClassGetItem:
71
+ def test_class_getitem(self, cls: type[np.ndarray]) -> None:
72
+ """Test `ndarray.__class_getitem__`."""
73
+ alias = cls[Any, Any]
74
+ assert isinstance(alias, types.GenericAlias)
75
+ assert alias.__origin__ is cls
76
+
77
+ @pytest.mark.parametrize("arg_len", range(4))
78
+ def test_subscript_tup(self, cls: type[np.ndarray], arg_len: int) -> None:
79
+ arg_tup = (Any,) * arg_len
80
+ if arg_len in (1, 2):
81
+ assert cls[arg_tup]
82
+ else:
83
+ match = f"Too {'few' if arg_len == 0 else 'many'} arguments"
84
+ with pytest.raises(TypeError, match=match):
85
+ cls[arg_tup]
.venv/lib/python3.11/site-packages/numpy/core/tests/test_arrayprint.py ADDED
@@ -0,0 +1,1047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import gc
3
+ from hypothesis import given
4
+ from hypothesis.extra import numpy as hynp
5
+ import pytest
6
+
7
+ import numpy as np
8
+ from numpy.testing import (
9
+ assert_, assert_equal, assert_raises, assert_warns, HAS_REFCOUNT,
10
+ assert_raises_regex,
11
+ )
12
+ from numpy.core.arrayprint import _typelessdata
13
+ import textwrap
14
+
15
+ class TestArrayRepr:
16
+ def test_nan_inf(self):
17
+ x = np.array([np.nan, np.inf])
18
+ assert_equal(repr(x), 'array([nan, inf])')
19
+
20
+ def test_subclass(self):
21
+ class sub(np.ndarray): pass
22
+
23
+ # one dimensional
24
+ x1d = np.array([1, 2]).view(sub)
25
+ assert_equal(repr(x1d), 'sub([1, 2])')
26
+
27
+ # two dimensional
28
+ x2d = np.array([[1, 2], [3, 4]]).view(sub)
29
+ assert_equal(repr(x2d),
30
+ 'sub([[1, 2],\n'
31
+ ' [3, 4]])')
32
+
33
+ # two dimensional with flexible dtype
34
+ xstruct = np.ones((2,2), dtype=[('a', '<i4')]).view(sub)
35
+ assert_equal(repr(xstruct),
36
+ "sub([[(1,), (1,)],\n"
37
+ " [(1,), (1,)]], dtype=[('a', '<i4')])"
38
+ )
39
+
40
+ @pytest.mark.xfail(reason="See gh-10544")
41
+ def test_object_subclass(self):
42
+ class sub(np.ndarray):
43
+ def __new__(cls, inp):
44
+ obj = np.asarray(inp).view(cls)
45
+ return obj
46
+
47
+ def __getitem__(self, ind):
48
+ ret = super().__getitem__(ind)
49
+ return sub(ret)
50
+
51
+ # test that object + subclass is OK:
52
+ x = sub([None, None])
53
+ assert_equal(repr(x), 'sub([None, None], dtype=object)')
54
+ assert_equal(str(x), '[None None]')
55
+
56
+ x = sub([None, sub([None, None])])
57
+ assert_equal(repr(x),
58
+ 'sub([None, sub([None, None], dtype=object)], dtype=object)')
59
+ assert_equal(str(x), '[None sub([None, None], dtype=object)]')
60
+
61
+ def test_0d_object_subclass(self):
62
+ # make sure that subclasses which return 0ds instead
63
+ # of scalars don't cause infinite recursion in str
64
+ class sub(np.ndarray):
65
+ def __new__(cls, inp):
66
+ obj = np.asarray(inp).view(cls)
67
+ return obj
68
+
69
+ def __getitem__(self, ind):
70
+ ret = super().__getitem__(ind)
71
+ return sub(ret)
72
+
73
+ x = sub(1)
74
+ assert_equal(repr(x), 'sub(1)')
75
+ assert_equal(str(x), '1')
76
+
77
+ x = sub([1, 1])
78
+ assert_equal(repr(x), 'sub([1, 1])')
79
+ assert_equal(str(x), '[1 1]')
80
+
81
+ # check it works properly with object arrays too
82
+ x = sub(None)
83
+ assert_equal(repr(x), 'sub(None, dtype=object)')
84
+ assert_equal(str(x), 'None')
85
+
86
+ # plus recursive object arrays (even depth > 1)
87
+ y = sub(None)
88
+ x[()] = y
89
+ y[()] = x
90
+ assert_equal(repr(x),
91
+ 'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
92
+ assert_equal(str(x), '...')
93
+ x[()] = 0 # resolve circular references for garbage collector
94
+
95
+ # nested 0d-subclass-object
96
+ x = sub(None)
97
+ x[()] = sub(None)
98
+ assert_equal(repr(x), 'sub(sub(None, dtype=object), dtype=object)')
99
+ assert_equal(str(x), 'None')
100
+
101
+ # gh-10663
102
+ class DuckCounter(np.ndarray):
103
+ def __getitem__(self, item):
104
+ result = super().__getitem__(item)
105
+ if not isinstance(result, DuckCounter):
106
+ result = result[...].view(DuckCounter)
107
+ return result
108
+
109
+ def to_string(self):
110
+ return {0: 'zero', 1: 'one', 2: 'two'}.get(self.item(), 'many')
111
+
112
+ def __str__(self):
113
+ if self.shape == ():
114
+ return self.to_string()
115
+ else:
116
+ fmt = {'all': lambda x: x.to_string()}
117
+ return np.array2string(self, formatter=fmt)
118
+
119
+ dc = np.arange(5).view(DuckCounter)
120
+ assert_equal(str(dc), "[zero one two many many]")
121
+ assert_equal(str(dc[0]), "zero")
122
+
123
+ def test_self_containing(self):
124
+ arr0d = np.array(None)
125
+ arr0d[()] = arr0d
126
+ assert_equal(repr(arr0d),
127
+ 'array(array(..., dtype=object), dtype=object)')
128
+ arr0d[()] = 0 # resolve recursion for garbage collector
129
+
130
+ arr1d = np.array([None, None])
131
+ arr1d[1] = arr1d
132
+ assert_equal(repr(arr1d),
133
+ 'array([None, array(..., dtype=object)], dtype=object)')
134
+ arr1d[1] = 0 # resolve recursion for garbage collector
135
+
136
+ first = np.array(None)
137
+ second = np.array(None)
138
+ first[()] = second
139
+ second[()] = first
140
+ assert_equal(repr(first),
141
+ 'array(array(array(..., dtype=object), dtype=object), dtype=object)')
142
+ first[()] = 0 # resolve circular references for garbage collector
143
+
144
+ def test_containing_list(self):
145
+ # printing square brackets directly would be ambiguuous
146
+ arr1d = np.array([None, None])
147
+ arr1d[0] = [1, 2]
148
+ arr1d[1] = [3]
149
+ assert_equal(repr(arr1d),
150
+ 'array([list([1, 2]), list([3])], dtype=object)')
151
+
152
+ def test_void_scalar_recursion(self):
153
+ # gh-9345
154
+ repr(np.void(b'test')) # RecursionError ?
155
+
156
+ def test_fieldless_structured(self):
157
+ # gh-10366
158
+ no_fields = np.dtype([])
159
+ arr_no_fields = np.empty(4, dtype=no_fields)
160
+ assert_equal(repr(arr_no_fields), 'array([(), (), (), ()], dtype=[])')
161
+
162
+
163
+ class TestComplexArray:
164
+ def test_str(self):
165
+ rvals = [0, 1, -1, np.inf, -np.inf, np.nan]
166
+ cvals = [complex(rp, ip) for rp in rvals for ip in rvals]
167
+ dtypes = [np.complex64, np.cdouble, np.clongdouble]
168
+ actual = [str(np.array([c], dt)) for c in cvals for dt in dtypes]
169
+ wanted = [
170
+ '[0.+0.j]', '[0.+0.j]', '[0.+0.j]',
171
+ '[0.+1.j]', '[0.+1.j]', '[0.+1.j]',
172
+ '[0.-1.j]', '[0.-1.j]', '[0.-1.j]',
173
+ '[0.+infj]', '[0.+infj]', '[0.+infj]',
174
+ '[0.-infj]', '[0.-infj]', '[0.-infj]',
175
+ '[0.+nanj]', '[0.+nanj]', '[0.+nanj]',
176
+ '[1.+0.j]', '[1.+0.j]', '[1.+0.j]',
177
+ '[1.+1.j]', '[1.+1.j]', '[1.+1.j]',
178
+ '[1.-1.j]', '[1.-1.j]', '[1.-1.j]',
179
+ '[1.+infj]', '[1.+infj]', '[1.+infj]',
180
+ '[1.-infj]', '[1.-infj]', '[1.-infj]',
181
+ '[1.+nanj]', '[1.+nanj]', '[1.+nanj]',
182
+ '[-1.+0.j]', '[-1.+0.j]', '[-1.+0.j]',
183
+ '[-1.+1.j]', '[-1.+1.j]', '[-1.+1.j]',
184
+ '[-1.-1.j]', '[-1.-1.j]', '[-1.-1.j]',
185
+ '[-1.+infj]', '[-1.+infj]', '[-1.+infj]',
186
+ '[-1.-infj]', '[-1.-infj]', '[-1.-infj]',
187
+ '[-1.+nanj]', '[-1.+nanj]', '[-1.+nanj]',
188
+ '[inf+0.j]', '[inf+0.j]', '[inf+0.j]',
189
+ '[inf+1.j]', '[inf+1.j]', '[inf+1.j]',
190
+ '[inf-1.j]', '[inf-1.j]', '[inf-1.j]',
191
+ '[inf+infj]', '[inf+infj]', '[inf+infj]',
192
+ '[inf-infj]', '[inf-infj]', '[inf-infj]',
193
+ '[inf+nanj]', '[inf+nanj]', '[inf+nanj]',
194
+ '[-inf+0.j]', '[-inf+0.j]', '[-inf+0.j]',
195
+ '[-inf+1.j]', '[-inf+1.j]', '[-inf+1.j]',
196
+ '[-inf-1.j]', '[-inf-1.j]', '[-inf-1.j]',
197
+ '[-inf+infj]', '[-inf+infj]', '[-inf+infj]',
198
+ '[-inf-infj]', '[-inf-infj]', '[-inf-infj]',
199
+ '[-inf+nanj]', '[-inf+nanj]', '[-inf+nanj]',
200
+ '[nan+0.j]', '[nan+0.j]', '[nan+0.j]',
201
+ '[nan+1.j]', '[nan+1.j]', '[nan+1.j]',
202
+ '[nan-1.j]', '[nan-1.j]', '[nan-1.j]',
203
+ '[nan+infj]', '[nan+infj]', '[nan+infj]',
204
+ '[nan-infj]', '[nan-infj]', '[nan-infj]',
205
+ '[nan+nanj]', '[nan+nanj]', '[nan+nanj]']
206
+
207
+ for res, val in zip(actual, wanted):
208
+ assert_equal(res, val)
209
+
210
+ class TestArray2String:
211
+ def test_basic(self):
212
+ """Basic test of array2string."""
213
+ a = np.arange(3)
214
+ assert_(np.array2string(a) == '[0 1 2]')
215
+ assert_(np.array2string(a, max_line_width=4, legacy='1.13') == '[0 1\n 2]')
216
+ assert_(np.array2string(a, max_line_width=4) == '[0\n 1\n 2]')
217
+
218
+ def test_unexpected_kwarg(self):
219
+ # ensure than an appropriate TypeError
220
+ # is raised when array2string receives
221
+ # an unexpected kwarg
222
+
223
+ with assert_raises_regex(TypeError, 'nonsense'):
224
+ np.array2string(np.array([1, 2, 3]),
225
+ nonsense=None)
226
+
227
+ def test_format_function(self):
228
+ """Test custom format function for each element in array."""
229
+ def _format_function(x):
230
+ if np.abs(x) < 1:
231
+ return '.'
232
+ elif np.abs(x) < 2:
233
+ return 'o'
234
+ else:
235
+ return 'O'
236
+
237
+ x = np.arange(3)
238
+ x_hex = "[0x0 0x1 0x2]"
239
+ x_oct = "[0o0 0o1 0o2]"
240
+ assert_(np.array2string(x, formatter={'all':_format_function}) ==
241
+ "[. o O]")
242
+ assert_(np.array2string(x, formatter={'int_kind':_format_function}) ==
243
+ "[. o O]")
244
+ assert_(np.array2string(x, formatter={'all':lambda x: "%.4f" % x}) ==
245
+ "[0.0000 1.0000 2.0000]")
246
+ assert_equal(np.array2string(x, formatter={'int':lambda x: hex(x)}),
247
+ x_hex)
248
+ assert_equal(np.array2string(x, formatter={'int':lambda x: oct(x)}),
249
+ x_oct)
250
+
251
+ x = np.arange(3.)
252
+ assert_(np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x}) ==
253
+ "[0.00 1.00 2.00]")
254
+ assert_(np.array2string(x, formatter={'float':lambda x: "%.2f" % x}) ==
255
+ "[0.00 1.00 2.00]")
256
+
257
+ s = np.array(['abc', 'def'])
258
+ assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
259
+ '[abcabc defdef]')
260
+
261
+ def test_structure_format_mixed(self):
262
+ dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
263
+ x = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
264
+ assert_equal(np.array2string(x),
265
+ "[('Sarah', [8., 7.]) ('John', [6., 7.])]")
266
+
267
+ np.set_printoptions(legacy='1.13')
268
+ try:
269
+ # for issue #5692
270
+ A = np.zeros(shape=10, dtype=[("A", "M8[s]")])
271
+ A[5:].fill(np.datetime64('NaT'))
272
+ assert_equal(
273
+ np.array2string(A),
274
+ textwrap.dedent("""\
275
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
276
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',) ('NaT',) ('NaT',)
277
+ ('NaT',) ('NaT',) ('NaT',)]""")
278
+ )
279
+ finally:
280
+ np.set_printoptions(legacy=False)
281
+
282
+ # same again, but with non-legacy behavior
283
+ assert_equal(
284
+ np.array2string(A),
285
+ textwrap.dedent("""\
286
+ [('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
287
+ ('1970-01-01T00:00:00',) ('1970-01-01T00:00:00',)
288
+ ('1970-01-01T00:00:00',) ( 'NaT',)
289
+ ( 'NaT',) ( 'NaT',)
290
+ ( 'NaT',) ( 'NaT',)]""")
291
+ )
292
+
293
+ # and again, with timedeltas
294
+ A = np.full(10, 123456, dtype=[("A", "m8[s]")])
295
+ A[5:].fill(np.datetime64('NaT'))
296
+ assert_equal(
297
+ np.array2string(A),
298
+ textwrap.dedent("""\
299
+ [(123456,) (123456,) (123456,) (123456,) (123456,) ( 'NaT',) ( 'NaT',)
300
+ ( 'NaT',) ( 'NaT',) ( 'NaT',)]""")
301
+ )
302
+
303
+ def test_structure_format_int(self):
304
+ # See #8160
305
+ struct_int = np.array([([1, -1],), ([123, 1],)], dtype=[('B', 'i4', 2)])
306
+ assert_equal(np.array2string(struct_int),
307
+ "[([ 1, -1],) ([123, 1],)]")
308
+ struct_2dint = np.array([([[0, 1], [2, 3]],), ([[12, 0], [0, 0]],)],
309
+ dtype=[('B', 'i4', (2, 2))])
310
+ assert_equal(np.array2string(struct_2dint),
311
+ "[([[ 0, 1], [ 2, 3]],) ([[12, 0], [ 0, 0]],)]")
312
+
313
+ def test_structure_format_float(self):
314
+ # See #8172
315
+ array_scalar = np.array(
316
+ (1., 2.1234567890123456789, 3.), dtype=('f8,f8,f8'))
317
+ assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)")
318
+
319
+ def test_unstructured_void_repr(self):
320
+ a = np.array([27, 91, 50, 75, 7, 65, 10, 8,
321
+ 27, 91, 51, 49,109, 82,101,100], dtype='u1').view('V8')
322
+ assert_equal(repr(a[0]), r"void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')")
323
+ assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'")
324
+ assert_equal(repr(a),
325
+ r"array([b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'," "\n"
326
+ r" b'\x1B\x5B\x33\x31\x6D\x52\x65\x64'], dtype='|V8')")
327
+
328
+ assert_equal(eval(repr(a), vars(np)), a)
329
+ assert_equal(eval(repr(a[0]), vars(np)), a[0])
330
+
331
+ def test_edgeitems_kwarg(self):
332
+ # previously the global print options would be taken over the kwarg
333
+ arr = np.zeros(3, int)
334
+ assert_equal(
335
+ np.array2string(arr, edgeitems=1, threshold=0),
336
+ "[0 ... 0]"
337
+ )
338
+
339
+ def test_summarize_1d(self):
340
+ A = np.arange(1001)
341
+ strA = '[ 0 1 2 ... 998 999 1000]'
342
+ assert_equal(str(A), strA)
343
+
344
+ reprA = 'array([ 0, 1, 2, ..., 998, 999, 1000])'
345
+ assert_equal(repr(A), reprA)
346
+
347
+ def test_summarize_2d(self):
348
+ A = np.arange(1002).reshape(2, 501)
349
+ strA = '[[ 0 1 2 ... 498 499 500]\n' \
350
+ ' [ 501 502 503 ... 999 1000 1001]]'
351
+ assert_equal(str(A), strA)
352
+
353
+ reprA = 'array([[ 0, 1, 2, ..., 498, 499, 500],\n' \
354
+ ' [ 501, 502, 503, ..., 999, 1000, 1001]])'
355
+ assert_equal(repr(A), reprA)
356
+
357
+ def test_summarize_structure(self):
358
+ A = (np.arange(2002, dtype="<i8").reshape(2, 1001)
359
+ .view([('i', "<i8", (1001,))]))
360
+ strA = ("[[([ 0, 1, 2, ..., 998, 999, 1000],)]\n"
361
+ " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]]")
362
+ assert_equal(str(A), strA)
363
+
364
+ reprA = ("array([[([ 0, 1, 2, ..., 998, 999, 1000],)],\n"
365
+ " [([1001, 1002, 1003, ..., 1999, 2000, 2001],)]],\n"
366
+ " dtype=[('i', '<i8', (1001,))])")
367
+ assert_equal(repr(A), reprA)
368
+
369
+ B = np.ones(2002, dtype=">i8").view([('i', ">i8", (2, 1001))])
370
+ strB = "[([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)]"
371
+ assert_equal(str(B), strB)
372
+
373
+ reprB = (
374
+ "array([([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]],)],\n"
375
+ " dtype=[('i', '>i8', (2, 1001))])"
376
+ )
377
+ assert_equal(repr(B), reprB)
378
+
379
+ C = (np.arange(22, dtype="<i8").reshape(2, 11)
380
+ .view([('i1', "<i8"), ('i10', "<i8", (10,))]))
381
+ strC = "[[( 0, [ 1, ..., 10])]\n [(11, [12, ..., 21])]]"
382
+ assert_equal(np.array2string(C, threshold=1, edgeitems=1), strC)
383
+
384
+ def test_linewidth(self):
385
+ a = np.full(6, 1)
386
+
387
+ def make_str(a, width, **kw):
388
+ return np.array2string(a, separator="", max_line_width=width, **kw)
389
+
390
+ assert_equal(make_str(a, 8, legacy='1.13'), '[111111]')
391
+ assert_equal(make_str(a, 7, legacy='1.13'), '[111111]')
392
+ assert_equal(make_str(a, 5, legacy='1.13'), '[1111\n'
393
+ ' 11]')
394
+
395
+ assert_equal(make_str(a, 8), '[111111]')
396
+ assert_equal(make_str(a, 7), '[11111\n'
397
+ ' 1]')
398
+ assert_equal(make_str(a, 5), '[111\n'
399
+ ' 111]')
400
+
401
+ b = a[None,None,:]
402
+
403
+ assert_equal(make_str(b, 12, legacy='1.13'), '[[[111111]]]')
404
+ assert_equal(make_str(b, 9, legacy='1.13'), '[[[111111]]]')
405
+ assert_equal(make_str(b, 8, legacy='1.13'), '[[[11111\n'
406
+ ' 1]]]')
407
+
408
+ assert_equal(make_str(b, 12), '[[[111111]]]')
409
+ assert_equal(make_str(b, 9), '[[[111\n'
410
+ ' 111]]]')
411
+ assert_equal(make_str(b, 8), '[[[11\n'
412
+ ' 11\n'
413
+ ' 11]]]')
414
+
415
+ def test_wide_element(self):
416
+ a = np.array(['xxxxx'])
417
+ assert_equal(
418
+ np.array2string(a, max_line_width=5),
419
+ "['xxxxx']"
420
+ )
421
+ assert_equal(
422
+ np.array2string(a, max_line_width=5, legacy='1.13'),
423
+ "[ 'xxxxx']"
424
+ )
425
+
426
+ def test_multiline_repr(self):
427
+ class MultiLine:
428
+ def __repr__(self):
429
+ return "Line 1\nLine 2"
430
+
431
+ a = np.array([[None, MultiLine()], [MultiLine(), None]])
432
+
433
+ assert_equal(
434
+ np.array2string(a),
435
+ '[[None Line 1\n'
436
+ ' Line 2]\n'
437
+ ' [Line 1\n'
438
+ ' Line 2 None]]'
439
+ )
440
+ assert_equal(
441
+ np.array2string(a, max_line_width=5),
442
+ '[[None\n'
443
+ ' Line 1\n'
444
+ ' Line 2]\n'
445
+ ' [Line 1\n'
446
+ ' Line 2\n'
447
+ ' None]]'
448
+ )
449
+ assert_equal(
450
+ repr(a),
451
+ 'array([[None, Line 1\n'
452
+ ' Line 2],\n'
453
+ ' [Line 1\n'
454
+ ' Line 2, None]], dtype=object)'
455
+ )
456
+
457
+ class MultiLineLong:
458
+ def __repr__(self):
459
+ return "Line 1\nLooooooooooongestLine2\nLongerLine 3"
460
+
461
+ a = np.array([[None, MultiLineLong()], [MultiLineLong(), None]])
462
+ assert_equal(
463
+ repr(a),
464
+ 'array([[None, Line 1\n'
465
+ ' LooooooooooongestLine2\n'
466
+ ' LongerLine 3 ],\n'
467
+ ' [Line 1\n'
468
+ ' LooooooooooongestLine2\n'
469
+ ' LongerLine 3 , None]], dtype=object)'
470
+ )
471
+ assert_equal(
472
+ np.array_repr(a, 20),
473
+ 'array([[None,\n'
474
+ ' Line 1\n'
475
+ ' LooooooooooongestLine2\n'
476
+ ' LongerLine 3 ],\n'
477
+ ' [Line 1\n'
478
+ ' LooooooooooongestLine2\n'
479
+ ' LongerLine 3 ,\n'
480
+ ' None]],\n'
481
+ ' dtype=object)'
482
+ )
483
+
484
+ def test_nested_array_repr(self):
485
+ a = np.empty((2, 2), dtype=object)
486
+ a[0, 0] = np.eye(2)
487
+ a[0, 1] = np.eye(3)
488
+ a[1, 0] = None
489
+ a[1, 1] = np.ones((3, 1))
490
+ assert_equal(
491
+ repr(a),
492
+ 'array([[array([[1., 0.],\n'
493
+ ' [0., 1.]]), array([[1., 0., 0.],\n'
494
+ ' [0., 1., 0.],\n'
495
+ ' [0., 0., 1.]])],\n'
496
+ ' [None, array([[1.],\n'
497
+ ' [1.],\n'
498
+ ' [1.]])]], dtype=object)'
499
+ )
500
+
501
+ @given(hynp.from_dtype(np.dtype("U")))
502
+ def test_any_text(self, text):
503
+ # This test checks that, given any value that can be represented in an
504
+ # array of dtype("U") (i.e. unicode string), ...
505
+ a = np.array([text, text, text])
506
+ # casting a list of them to an array does not e.g. truncate the value
507
+ assert_equal(a[0], text)
508
+ # and that np.array2string puts a newline in the expected location
509
+ expected_repr = "[{0!r} {0!r}\n {0!r}]".format(text)
510
+ result = np.array2string(a, max_line_width=len(repr(text)) * 2 + 3)
511
+ assert_equal(result, expected_repr)
512
+
513
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
514
+ def test_refcount(self):
515
+ # make sure we do not hold references to the array due to a recursive
516
+ # closure (gh-10620)
517
+ gc.disable()
518
+ a = np.arange(2)
519
+ r1 = sys.getrefcount(a)
520
+ np.array2string(a)
521
+ np.array2string(a)
522
+ r2 = sys.getrefcount(a)
523
+ gc.collect()
524
+ gc.enable()
525
+ assert_(r1 == r2)
526
+
527
+ class TestPrintOptions:
528
+ """Test getting and setting global print options."""
529
+
530
+ def setup_method(self):
531
+ self.oldopts = np.get_printoptions()
532
+
533
+ def teardown_method(self):
534
+ np.set_printoptions(**self.oldopts)
535
+
536
+ def test_basic(self):
537
+ x = np.array([1.5, 0, 1.234567890])
538
+ assert_equal(repr(x), "array([1.5 , 0. , 1.23456789])")
539
+ np.set_printoptions(precision=4)
540
+ assert_equal(repr(x), "array([1.5 , 0. , 1.2346])")
541
+
542
+ def test_precision_zero(self):
543
+ np.set_printoptions(precision=0)
544
+ for values, string in (
545
+ ([0.], "0."), ([.3], "0."), ([-.3], "-0."), ([.7], "1."),
546
+ ([1.5], "2."), ([-1.5], "-2."), ([-15.34], "-15."),
547
+ ([100.], "100."), ([.2, -1, 122.51], " 0., -1., 123."),
548
+ ([0], "0"), ([-12], "-12"), ([complex(.3, -.7)], "0.-1.j")):
549
+ x = np.array(values)
550
+ assert_equal(repr(x), "array([%s])" % string)
551
+
552
+ def test_formatter(self):
553
+ x = np.arange(3)
554
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
555
+ assert_equal(repr(x), "array([-1, 0, 1])")
556
+
557
+ def test_formatter_reset(self):
558
+ x = np.arange(3)
559
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
560
+ assert_equal(repr(x), "array([-1, 0, 1])")
561
+ np.set_printoptions(formatter={'int':None})
562
+ assert_equal(repr(x), "array([0, 1, 2])")
563
+
564
+ np.set_printoptions(formatter={'all':lambda x: str(x-1)})
565
+ assert_equal(repr(x), "array([-1, 0, 1])")
566
+ np.set_printoptions(formatter={'all':None})
567
+ assert_equal(repr(x), "array([0, 1, 2])")
568
+
569
+ np.set_printoptions(formatter={'int':lambda x: str(x-1)})
570
+ assert_equal(repr(x), "array([-1, 0, 1])")
571
+ np.set_printoptions(formatter={'int_kind':None})
572
+ assert_equal(repr(x), "array([0, 1, 2])")
573
+
574
+ x = np.arange(3.)
575
+ np.set_printoptions(formatter={'float':lambda x: str(x-1)})
576
+ assert_equal(repr(x), "array([-1.0, 0.0, 1.0])")
577
+ np.set_printoptions(formatter={'float_kind':None})
578
+ assert_equal(repr(x), "array([0., 1., 2.])")
579
+
580
+ def test_0d_arrays(self):
581
+ assert_equal(str(np.array('café', '<U4')), 'café')
582
+
583
+ assert_equal(repr(np.array('café', '<U4')),
584
+ "array('café', dtype='<U4')")
585
+ assert_equal(str(np.array('test', np.str_)), 'test')
586
+
587
+ a = np.zeros(1, dtype=[('a', '<i4', (3,))])
588
+ assert_equal(str(a[0]), '([0, 0, 0],)')
589
+
590
+ assert_equal(repr(np.datetime64('2005-02-25')[...]),
591
+ "array('2005-02-25', dtype='datetime64[D]')")
592
+
593
+ assert_equal(repr(np.timedelta64('10', 'Y')[...]),
594
+ "array(10, dtype='timedelta64[Y]')")
595
+
596
+ # repr of 0d arrays is affected by printoptions
597
+ x = np.array(1)
598
+ np.set_printoptions(formatter={'all':lambda x: "test"})
599
+ assert_equal(repr(x), "array(test)")
600
+ # str is unaffected
601
+ assert_equal(str(x), "1")
602
+
603
+ # check `style` arg raises
604
+ assert_warns(DeprecationWarning, np.array2string,
605
+ np.array(1.), style=repr)
606
+ # but not in legacy mode
607
+ np.array2string(np.array(1.), style=repr, legacy='1.13')
608
+ # gh-10934 style was broken in legacy mode, check it works
609
+ np.array2string(np.array(1.), legacy='1.13')
610
+
611
+ def test_float_spacing(self):
612
+ x = np.array([1., 2., 3.])
613
+ y = np.array([1., 2., -10.])
614
+ z = np.array([100., 2., -1.])
615
+ w = np.array([-100., 2., 1.])
616
+
617
+ assert_equal(repr(x), 'array([1., 2., 3.])')
618
+ assert_equal(repr(y), 'array([ 1., 2., -10.])')
619
+ assert_equal(repr(np.array(y[0])), 'array(1.)')
620
+ assert_equal(repr(np.array(y[-1])), 'array(-10.)')
621
+ assert_equal(repr(z), 'array([100., 2., -1.])')
622
+ assert_equal(repr(w), 'array([-100., 2., 1.])')
623
+
624
+ assert_equal(repr(np.array([np.nan, np.inf])), 'array([nan, inf])')
625
+ assert_equal(repr(np.array([np.nan, -np.inf])), 'array([ nan, -inf])')
626
+
627
+ x = np.array([np.inf, 100000, 1.1234])
628
+ y = np.array([np.inf, 100000, -1.1234])
629
+ z = np.array([np.inf, 1.1234, -1e120])
630
+ np.set_printoptions(precision=2)
631
+ assert_equal(repr(x), 'array([ inf, 1.00e+05, 1.12e+00])')
632
+ assert_equal(repr(y), 'array([ inf, 1.00e+05, -1.12e+00])')
633
+ assert_equal(repr(z), 'array([ inf, 1.12e+000, -1.00e+120])')
634
+
635
+ def test_bool_spacing(self):
636
+ assert_equal(repr(np.array([True, True])),
637
+ 'array([ True, True])')
638
+ assert_equal(repr(np.array([True, False])),
639
+ 'array([ True, False])')
640
+ assert_equal(repr(np.array([True])),
641
+ 'array([ True])')
642
+ assert_equal(repr(np.array(True)),
643
+ 'array(True)')
644
+ assert_equal(repr(np.array(False)),
645
+ 'array(False)')
646
+
647
+ def test_sign_spacing(self):
648
+ a = np.arange(4.)
649
+ b = np.array([1.234e9])
650
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
651
+
652
+ assert_equal(repr(a), 'array([0., 1., 2., 3.])')
653
+ assert_equal(repr(np.array(1.)), 'array(1.)')
654
+ assert_equal(repr(b), 'array([1.234e+09])')
655
+ assert_equal(repr(np.array([0.])), 'array([0.])')
656
+ assert_equal(repr(c),
657
+ "array([1. +1.j , 1.12345679+1.12345679j])")
658
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
659
+
660
+ np.set_printoptions(sign=' ')
661
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
662
+ assert_equal(repr(np.array(1.)), 'array( 1.)')
663
+ assert_equal(repr(b), 'array([ 1.234e+09])')
664
+ assert_equal(repr(c),
665
+ "array([ 1. +1.j , 1.12345679+1.12345679j])")
666
+ assert_equal(repr(np.array([0., -0.])), 'array([ 0., -0.])')
667
+
668
+ np.set_printoptions(sign='+')
669
+ assert_equal(repr(a), 'array([+0., +1., +2., +3.])')
670
+ assert_equal(repr(np.array(1.)), 'array(+1.)')
671
+ assert_equal(repr(b), 'array([+1.234e+09])')
672
+ assert_equal(repr(c),
673
+ "array([+1. +1.j , +1.12345679+1.12345679j])")
674
+
675
+ np.set_printoptions(legacy='1.13')
676
+ assert_equal(repr(a), 'array([ 0., 1., 2., 3.])')
677
+ assert_equal(repr(b), 'array([ 1.23400000e+09])')
678
+ assert_equal(repr(-b), 'array([ -1.23400000e+09])')
679
+ assert_equal(repr(np.array(1.)), 'array(1.0)')
680
+ assert_equal(repr(np.array([0.])), 'array([ 0.])')
681
+ assert_equal(repr(c),
682
+ "array([ 1.00000000+1.j , 1.12345679+1.12345679j])")
683
+ # gh-10383
684
+ assert_equal(str(np.array([-1., 10])), "[ -1. 10.]")
685
+
686
+ assert_raises(TypeError, np.set_printoptions, wrongarg=True)
687
+
688
+ def test_float_overflow_nowarn(self):
689
+ # make sure internal computations in FloatingFormat don't
690
+ # warn about overflow
691
+ repr(np.array([1e4, 0.1], dtype='f2'))
692
+
693
+ def test_sign_spacing_structured(self):
694
+ a = np.ones(2, dtype='<f,<f')
695
+ assert_equal(repr(a),
696
+ "array([(1., 1.), (1., 1.)], dtype=[('f0', '<f4'), ('f1', '<f4')])")
697
+ assert_equal(repr(a[0]), "(1., 1.)")
698
+
699
+ def test_floatmode(self):
700
+ x = np.array([0.6104, 0.922, 0.457, 0.0906, 0.3733, 0.007244,
701
+ 0.5933, 0.947, 0.2383, 0.4226], dtype=np.float16)
702
+ y = np.array([0.2918820979355541, 0.5064172631089138,
703
+ 0.2848750619642916, 0.4342965294660567,
704
+ 0.7326538397312751, 0.3459503329096204,
705
+ 0.0862072768214508, 0.39112753029631175],
706
+ dtype=np.float64)
707
+ z = np.arange(6, dtype=np.float16)/10
708
+ c = np.array([1.0 + 1.0j, 1.123456789 + 1.123456789j], dtype='c16')
709
+
710
+ # also make sure 1e23 is right (is between two fp numbers)
711
+ w = np.array(['1e{}'.format(i) for i in range(25)], dtype=np.float64)
712
+ # note: we construct w from the strings `1eXX` instead of doing
713
+ # `10.**arange(24)` because it turns out the two are not equivalent in
714
+ # python. On some architectures `1e23 != 10.**23`.
715
+ wp = np.array([1.234e1, 1e2, 1e123])
716
+
717
+ # unique mode
718
+ np.set_printoptions(floatmode='unique')
719
+ assert_equal(repr(x),
720
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
721
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
722
+ assert_equal(repr(y),
723
+ "array([0.2918820979355541 , 0.5064172631089138 , 0.2848750619642916 ,\n"
724
+ " 0.4342965294660567 , 0.7326538397312751 , 0.3459503329096204 ,\n"
725
+ " 0.0862072768214508 , 0.39112753029631175])")
726
+ assert_equal(repr(z),
727
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
728
+ assert_equal(repr(w),
729
+ "array([1.e+00, 1.e+01, 1.e+02, 1.e+03, 1.e+04, 1.e+05, 1.e+06, 1.e+07,\n"
730
+ " 1.e+08, 1.e+09, 1.e+10, 1.e+11, 1.e+12, 1.e+13, 1.e+14, 1.e+15,\n"
731
+ " 1.e+16, 1.e+17, 1.e+18, 1.e+19, 1.e+20, 1.e+21, 1.e+22, 1.e+23,\n"
732
+ " 1.e+24])")
733
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
734
+ assert_equal(repr(c),
735
+ "array([1. +1.j , 1.123456789+1.123456789j])")
736
+
737
+ # maxprec mode, precision=8
738
+ np.set_printoptions(floatmode='maxprec', precision=8)
739
+ assert_equal(repr(x),
740
+ "array([0.6104 , 0.922 , 0.457 , 0.0906 , 0.3733 , 0.007244,\n"
741
+ " 0.5933 , 0.947 , 0.2383 , 0.4226 ], dtype=float16)")
742
+ assert_equal(repr(y),
743
+ "array([0.2918821 , 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
744
+ " 0.34595033, 0.08620728, 0.39112753])")
745
+ assert_equal(repr(z),
746
+ "array([0. , 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
747
+ assert_equal(repr(w[::5]),
748
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
749
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
750
+ assert_equal(repr(c),
751
+ "array([1. +1.j , 1.12345679+1.12345679j])")
752
+
753
+ # fixed mode, precision=4
754
+ np.set_printoptions(floatmode='fixed', precision=4)
755
+ assert_equal(repr(x),
756
+ "array([0.6104, 0.9219, 0.4570, 0.0906, 0.3733, 0.0072, 0.5933, 0.9468,\n"
757
+ " 0.2383, 0.4226], dtype=float16)")
758
+ assert_equal(repr(y),
759
+ "array([0.2919, 0.5064, 0.2849, 0.4343, 0.7327, 0.3460, 0.0862, 0.3911])")
760
+ assert_equal(repr(z),
761
+ "array([0.0000, 0.1000, 0.2000, 0.3000, 0.3999, 0.5000], dtype=float16)")
762
+ assert_equal(repr(w[::5]),
763
+ "array([1.0000e+00, 1.0000e+05, 1.0000e+10, 1.0000e+15, 1.0000e+20])")
764
+ assert_equal(repr(wp), "array([1.2340e+001, 1.0000e+002, 1.0000e+123])")
765
+ assert_equal(repr(np.zeros(3)), "array([0.0000, 0.0000, 0.0000])")
766
+ assert_equal(repr(c),
767
+ "array([1.0000+1.0000j, 1.1235+1.1235j])")
768
+ # for larger precision, representation error becomes more apparent:
769
+ np.set_printoptions(floatmode='fixed', precision=8)
770
+ assert_equal(repr(z),
771
+ "array([0.00000000, 0.09997559, 0.19995117, 0.30004883, 0.39990234,\n"
772
+ " 0.50000000], dtype=float16)")
773
+
774
+ # maxprec_equal mode, precision=8
775
+ np.set_printoptions(floatmode='maxprec_equal', precision=8)
776
+ assert_equal(repr(x),
777
+ "array([0.610352, 0.921875, 0.457031, 0.090576, 0.373291, 0.007244,\n"
778
+ " 0.593262, 0.946777, 0.238281, 0.422607], dtype=float16)")
779
+ assert_equal(repr(y),
780
+ "array([0.29188210, 0.50641726, 0.28487506, 0.43429653, 0.73265384,\n"
781
+ " 0.34595033, 0.08620728, 0.39112753])")
782
+ assert_equal(repr(z),
783
+ "array([0.0, 0.1, 0.2, 0.3, 0.4, 0.5], dtype=float16)")
784
+ assert_equal(repr(w[::5]),
785
+ "array([1.e+00, 1.e+05, 1.e+10, 1.e+15, 1.e+20])")
786
+ assert_equal(repr(wp), "array([1.234e+001, 1.000e+002, 1.000e+123])")
787
+ assert_equal(repr(c),
788
+ "array([1.00000000+1.00000000j, 1.12345679+1.12345679j])")
789
+
790
+ # test unique special case (gh-18609)
791
+ a = np.float64.fromhex('-1p-97')
792
+ assert_equal(np.float64(np.array2string(a, floatmode='unique')), a)
793
+
794
+ def test_legacy_mode_scalars(self):
795
+ # in legacy mode, str of floats get truncated, and complex scalars
796
+ # use * for non-finite imaginary part
797
+ np.set_printoptions(legacy='1.13')
798
+ assert_equal(str(np.float64(1.123456789123456789)), '1.12345678912')
799
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nan*j)')
800
+
801
+ np.set_printoptions(legacy=False)
802
+ assert_equal(str(np.float64(1.123456789123456789)),
803
+ '1.1234567891234568')
804
+ assert_equal(str(np.complex128(complex(1, np.nan))), '(1+nanj)')
805
+
806
+ def test_legacy_stray_comma(self):
807
+ np.set_printoptions(legacy='1.13')
808
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ..., 9997 9998 9999]')
809
+
810
+ np.set_printoptions(legacy=False)
811
+ assert_equal(str(np.arange(10000)), '[ 0 1 2 ... 9997 9998 9999]')
812
+
813
+ def test_dtype_linewidth_wrapping(self):
814
+ np.set_printoptions(linewidth=75)
815
+ assert_equal(repr(np.arange(10,20., dtype='f4')),
816
+ "array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19.], dtype=float32)")
817
+ assert_equal(repr(np.arange(10,23., dtype='f4')), textwrap.dedent("""\
818
+ array([10., 11., 12., 13., 14., 15., 16., 17., 18., 19., 20., 21., 22.],
819
+ dtype=float32)"""))
820
+
821
+ styp = '<U4'
822
+ assert_equal(repr(np.ones(3, dtype=styp)),
823
+ "array(['1', '1', '1'], dtype='{}')".format(styp))
824
+ assert_equal(repr(np.ones(12, dtype=styp)), textwrap.dedent("""\
825
+ array(['1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1', '1'],
826
+ dtype='{}')""".format(styp)))
827
+
828
+ @pytest.mark.parametrize(
829
+ ['native'],
830
+ [
831
+ ('bool',),
832
+ ('uint8',),
833
+ ('uint16',),
834
+ ('uint32',),
835
+ ('uint64',),
836
+ ('int8',),
837
+ ('int16',),
838
+ ('int32',),
839
+ ('int64',),
840
+ ('float16',),
841
+ ('float32',),
842
+ ('float64',),
843
+ ('U1',), # 4-byte width string
844
+ ],
845
+ )
846
+ def test_dtype_endianness_repr(self, native):
847
+ '''
848
+ there was an issue where
849
+ repr(array([0], dtype='<u2')) and repr(array([0], dtype='>u2'))
850
+ both returned the same thing:
851
+ array([0], dtype=uint16)
852
+ even though their dtypes have different endianness.
853
+ '''
854
+ native_dtype = np.dtype(native)
855
+ non_native_dtype = native_dtype.newbyteorder()
856
+ non_native_repr = repr(np.array([1], non_native_dtype))
857
+ native_repr = repr(np.array([1], native_dtype))
858
+ # preserve the sensible default of only showing dtype if nonstandard
859
+ assert ('dtype' in native_repr) ^ (native_dtype in _typelessdata),\
860
+ ("an array's repr should show dtype if and only if the type "
861
+ 'of the array is NOT one of the standard types '
862
+ '(e.g., int32, bool, float64).')
863
+ if non_native_dtype.itemsize > 1:
864
+ # if the type is >1 byte, the non-native endian version
865
+ # must show endianness.
866
+ assert non_native_repr != native_repr
867
+ assert f"dtype='{non_native_dtype.byteorder}" in non_native_repr
868
+
869
+ def test_linewidth_repr(self):
870
+ a = np.full(7, fill_value=2)
871
+ np.set_printoptions(linewidth=17)
872
+ assert_equal(
873
+ repr(a),
874
+ textwrap.dedent("""\
875
+ array([2, 2, 2,
876
+ 2, 2, 2,
877
+ 2])""")
878
+ )
879
+ np.set_printoptions(linewidth=17, legacy='1.13')
880
+ assert_equal(
881
+ repr(a),
882
+ textwrap.dedent("""\
883
+ array([2, 2, 2,
884
+ 2, 2, 2, 2])""")
885
+ )
886
+
887
+ a = np.full(8, fill_value=2)
888
+
889
+ np.set_printoptions(linewidth=18, legacy=False)
890
+ assert_equal(
891
+ repr(a),
892
+ textwrap.dedent("""\
893
+ array([2, 2, 2,
894
+ 2, 2, 2,
895
+ 2, 2])""")
896
+ )
897
+
898
+ np.set_printoptions(linewidth=18, legacy='1.13')
899
+ assert_equal(
900
+ repr(a),
901
+ textwrap.dedent("""\
902
+ array([2, 2, 2, 2,
903
+ 2, 2, 2, 2])""")
904
+ )
905
+
906
+ def test_linewidth_str(self):
907
+ a = np.full(18, fill_value=2)
908
+ np.set_printoptions(linewidth=18)
909
+ assert_equal(
910
+ str(a),
911
+ textwrap.dedent("""\
912
+ [2 2 2 2 2 2 2 2
913
+ 2 2 2 2 2 2 2 2
914
+ 2 2]""")
915
+ )
916
+ np.set_printoptions(linewidth=18, legacy='1.13')
917
+ assert_equal(
918
+ str(a),
919
+ textwrap.dedent("""\
920
+ [2 2 2 2 2 2 2 2 2
921
+ 2 2 2 2 2 2 2 2 2]""")
922
+ )
923
+
924
+ def test_edgeitems(self):
925
+ np.set_printoptions(edgeitems=1, threshold=1)
926
+ a = np.arange(27).reshape((3, 3, 3))
927
+ assert_equal(
928
+ repr(a),
929
+ textwrap.dedent("""\
930
+ array([[[ 0, ..., 2],
931
+ ...,
932
+ [ 6, ..., 8]],
933
+
934
+ ...,
935
+
936
+ [[18, ..., 20],
937
+ ...,
938
+ [24, ..., 26]]])""")
939
+ )
940
+
941
+ b = np.zeros((3, 3, 1, 1))
942
+ assert_equal(
943
+ repr(b),
944
+ textwrap.dedent("""\
945
+ array([[[[0.]],
946
+
947
+ ...,
948
+
949
+ [[0.]]],
950
+
951
+
952
+ ...,
953
+
954
+
955
+ [[[0.]],
956
+
957
+ ...,
958
+
959
+ [[0.]]]])""")
960
+ )
961
+
962
+ # 1.13 had extra trailing spaces, and was missing newlines
963
+ np.set_printoptions(legacy='1.13')
964
+
965
+ assert_equal(
966
+ repr(a),
967
+ textwrap.dedent("""\
968
+ array([[[ 0, ..., 2],
969
+ ...,
970
+ [ 6, ..., 8]],
971
+
972
+ ...,
973
+ [[18, ..., 20],
974
+ ...,
975
+ [24, ..., 26]]])""")
976
+ )
977
+
978
+ assert_equal(
979
+ repr(b),
980
+ textwrap.dedent("""\
981
+ array([[[[ 0.]],
982
+
983
+ ...,
984
+ [[ 0.]]],
985
+
986
+
987
+ ...,
988
+ [[[ 0.]],
989
+
990
+ ...,
991
+ [[ 0.]]]])""")
992
+ )
993
+
994
+ def test_edgeitems_structured(self):
995
+ np.set_printoptions(edgeitems=1, threshold=1)
996
+ A = np.arange(5*2*3, dtype="<i8").view([('i', "<i8", (5, 2, 3))])
997
+ reprA = (
998
+ "array([([[[ 0, ..., 2], [ 3, ..., 5]], ..., "
999
+ "[[24, ..., 26], [27, ..., 29]]],)],\n"
1000
+ " dtype=[('i', '<i8', (5, 2, 3))])"
1001
+ )
1002
+ assert_equal(repr(A), reprA)
1003
+
1004
+ def test_bad_args(self):
1005
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
1006
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
1007
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
1008
+
1009
+ assert_raises(TypeError, np.set_printoptions, precision='1')
1010
+ assert_raises(TypeError, np.set_printoptions, precision=1.5)
1011
+
1012
+ def test_unicode_object_array():
1013
+ expected = "array(['é'], dtype=object)"
1014
+ x = np.array(['\xe9'], dtype=object)
1015
+ assert_equal(repr(x), expected)
1016
+
1017
+
1018
+ class TestContextManager:
1019
+ def test_ctx_mgr(self):
1020
+ # test that context manager actually works
1021
+ with np.printoptions(precision=2):
1022
+ s = str(np.array([2.0]) / 3)
1023
+ assert_equal(s, '[0.67]')
1024
+
1025
+ def test_ctx_mgr_restores(self):
1026
+ # test that print options are actually restrored
1027
+ opts = np.get_printoptions()
1028
+ with np.printoptions(precision=opts['precision'] - 1,
1029
+ linewidth=opts['linewidth'] - 4):
1030
+ pass
1031
+ assert_equal(np.get_printoptions(), opts)
1032
+
1033
+ def test_ctx_mgr_exceptions(self):
1034
+ # test that print options are restored even if an exception is raised
1035
+ opts = np.get_printoptions()
1036
+ try:
1037
+ with np.printoptions(precision=2, linewidth=11):
1038
+ raise ValueError
1039
+ except ValueError:
1040
+ pass
1041
+ assert_equal(np.get_printoptions(), opts)
1042
+
1043
+ def test_ctx_mgr_as_smth(self):
1044
+ opts = {"precision": 2}
1045
+ with np.printoptions(**opts) as ctx:
1046
+ saved_opts = ctx.copy()
1047
+ assert_equal({k: saved_opts[k] for k in opts}, opts)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_casting_floatingpoint_errors.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from pytest import param
3
+ from numpy.testing import IS_WASM
4
+ import numpy as np
5
+
6
+
7
+ def values_and_dtypes():
8
+ """
9
+ Generate value+dtype pairs that generate floating point errors during
10
+ casts. The invalid casts to integers will generate "invalid" value
11
+ warnings, the float casts all generate "overflow".
12
+
13
+ (The Python int/float paths don't need to get tested in all the same
14
+ situations, but it does not hurt.)
15
+ """
16
+ # Casting to float16:
17
+ yield param(70000, "float16", id="int-to-f2")
18
+ yield param("70000", "float16", id="str-to-f2")
19
+ yield param(70000.0, "float16", id="float-to-f2")
20
+ yield param(np.longdouble(70000.), "float16", id="longdouble-to-f2")
21
+ yield param(np.float64(70000.), "float16", id="double-to-f2")
22
+ yield param(np.float32(70000.), "float16", id="float-to-f2")
23
+ # Casting to float32:
24
+ yield param(10**100, "float32", id="int-to-f4")
25
+ yield param(1e100, "float32", id="float-to-f2")
26
+ yield param(np.longdouble(1e300), "float32", id="longdouble-to-f2")
27
+ yield param(np.float64(1e300), "float32", id="double-to-f2")
28
+ # Casting to float64:
29
+ # If longdouble is double-double, its max can be rounded down to the double
30
+ # max. So we correct the double spacing (a bit weird, admittedly):
31
+ max_ld = np.finfo(np.longdouble).max
32
+ spacing = np.spacing(np.nextafter(np.finfo("f8").max, 0))
33
+ if max_ld - spacing > np.finfo("f8").max:
34
+ yield param(np.finfo(np.longdouble).max, "float64",
35
+ id="longdouble-to-f8")
36
+
37
+ # Cast to complex32:
38
+ yield param(2e300, "complex64", id="float-to-c8")
39
+ yield param(2e300+0j, "complex64", id="complex-to-c8")
40
+ yield param(2e300j, "complex64", id="complex-to-c8")
41
+ yield param(np.longdouble(2e300), "complex64", id="longdouble-to-c8")
42
+
43
+ # Invalid float to integer casts:
44
+ with np.errstate(over="ignore"):
45
+ for to_dt in np.typecodes["AllInteger"]:
46
+ for value in [np.inf, np.nan]:
47
+ for from_dt in np.typecodes["AllFloat"]:
48
+ from_dt = np.dtype(from_dt)
49
+ from_val = from_dt.type(value)
50
+
51
+ yield param(from_val, to_dt, id=f"{from_val}-to-{to_dt}")
52
+
53
+
54
+ def check_operations(dtype, value):
55
+ """
56
+ There are many dedicated paths in NumPy which cast and should check for
57
+ floating point errors which occurred during those casts.
58
+ """
59
+ if dtype.kind != 'i':
60
+ # These assignments use the stricter setitem logic:
61
+ def assignment():
62
+ arr = np.empty(3, dtype=dtype)
63
+ arr[0] = value
64
+
65
+ yield assignment
66
+
67
+ def fill():
68
+ arr = np.empty(3, dtype=dtype)
69
+ arr.fill(value)
70
+
71
+ yield fill
72
+
73
+ def copyto_scalar():
74
+ arr = np.empty(3, dtype=dtype)
75
+ np.copyto(arr, value, casting="unsafe")
76
+
77
+ yield copyto_scalar
78
+
79
+ def copyto():
80
+ arr = np.empty(3, dtype=dtype)
81
+ np.copyto(arr, np.array([value, value, value]), casting="unsafe")
82
+
83
+ yield copyto
84
+
85
+ def copyto_scalar_masked():
86
+ arr = np.empty(3, dtype=dtype)
87
+ np.copyto(arr, value, casting="unsafe",
88
+ where=[True, False, True])
89
+
90
+ yield copyto_scalar_masked
91
+
92
+ def copyto_masked():
93
+ arr = np.empty(3, dtype=dtype)
94
+ np.copyto(arr, np.array([value, value, value]), casting="unsafe",
95
+ where=[True, False, True])
96
+
97
+ yield copyto_masked
98
+
99
+ def direct_cast():
100
+ np.array([value, value, value]).astype(dtype)
101
+
102
+ yield direct_cast
103
+
104
+ def direct_cast_nd_strided():
105
+ arr = np.full((5, 5, 5), fill_value=value)[:, ::2, :]
106
+ arr.astype(dtype)
107
+
108
+ yield direct_cast_nd_strided
109
+
110
+ def boolean_array_assignment():
111
+ arr = np.empty(3, dtype=dtype)
112
+ arr[[True, False, True]] = np.array([value, value])
113
+
114
+ yield boolean_array_assignment
115
+
116
+ def integer_array_assignment():
117
+ arr = np.empty(3, dtype=dtype)
118
+ values = np.array([value, value])
119
+
120
+ arr[[0, 1]] = values
121
+
122
+ yield integer_array_assignment
123
+
124
+ def integer_array_assignment_with_subspace():
125
+ arr = np.empty((5, 3), dtype=dtype)
126
+ values = np.array([value, value, value])
127
+
128
+ arr[[0, 2]] = values
129
+
130
+ yield integer_array_assignment_with_subspace
131
+
132
+ def flat_assignment():
133
+ arr = np.empty((3,), dtype=dtype)
134
+ values = np.array([value, value, value])
135
+ arr.flat[:] = values
136
+
137
+ yield flat_assignment
138
+
139
+ @pytest.mark.skipif(IS_WASM, reason="no wasm fp exception support")
140
+ @pytest.mark.parametrize(["value", "dtype"], values_and_dtypes())
141
+ @pytest.mark.filterwarnings("ignore::numpy.ComplexWarning")
142
+ def test_floatingpoint_errors_casting(dtype, value):
143
+ dtype = np.dtype(dtype)
144
+ for operation in check_operations(dtype, value):
145
+ dtype = np.dtype(dtype)
146
+
147
+ match = "invalid" if dtype.kind in 'iu' else "overflow"
148
+ with pytest.warns(RuntimeWarning, match=match):
149
+ operation()
150
+
151
+ with np.errstate(all="raise"):
152
+ with pytest.raises(FloatingPointError, match=match):
153
+ operation()
154
+
.venv/lib/python3.11/site-packages/numpy/core/tests/test_cpu_features.py ADDED
@@ -0,0 +1,404 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys, platform, re, pytest
2
+ from numpy.core._multiarray_umath import (
3
+ __cpu_features__,
4
+ __cpu_baseline__,
5
+ __cpu_dispatch__,
6
+ )
7
+ import numpy as np
8
+ import subprocess
9
+ import pathlib
10
+ import os
11
+ import re
12
+
13
+ def assert_features_equal(actual, desired, fname):
14
+ __tracebackhide__ = True # Hide traceback for py.test
15
+ actual, desired = str(actual), str(desired)
16
+ if actual == desired:
17
+ return
18
+ detected = str(__cpu_features__).replace("'", "")
19
+ try:
20
+ with open("/proc/cpuinfo") as fd:
21
+ cpuinfo = fd.read(2048)
22
+ except Exception as err:
23
+ cpuinfo = str(err)
24
+
25
+ try:
26
+ import subprocess
27
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
28
+ auxv = auxv.decode()
29
+ except Exception as err:
30
+ auxv = str(err)
31
+
32
+ import textwrap
33
+ error_report = textwrap.indent(
34
+ """
35
+ ###########################################
36
+ ### Extra debugging information
37
+ ###########################################
38
+ -------------------------------------------
39
+ --- NumPy Detections
40
+ -------------------------------------------
41
+ %s
42
+ -------------------------------------------
43
+ --- SYS / CPUINFO
44
+ -------------------------------------------
45
+ %s....
46
+ -------------------------------------------
47
+ --- SYS / AUXV
48
+ -------------------------------------------
49
+ %s
50
+ """ % (detected, cpuinfo, auxv), prefix='\r')
51
+
52
+ raise AssertionError((
53
+ "Failure Detection\n"
54
+ " NAME: '%s'\n"
55
+ " ACTUAL: %s\n"
56
+ " DESIRED: %s\n"
57
+ "%s"
58
+ ) % (fname, actual, desired, error_report))
59
+
60
+ def _text_to_list(txt):
61
+ out = txt.strip("][\n").replace("'", "").split(', ')
62
+ return None if out[0] == "" else out
63
+
64
+ class AbstractTest:
65
+ features = []
66
+ features_groups = {}
67
+ features_map = {}
68
+ features_flags = set()
69
+
70
+ def load_flags(self):
71
+ # a hook
72
+ pass
73
+ def test_features(self):
74
+ self.load_flags()
75
+ for gname, features in self.features_groups.items():
76
+ test_features = [self.cpu_have(f) for f in features]
77
+ assert_features_equal(__cpu_features__.get(gname), all(test_features), gname)
78
+
79
+ for feature_name in self.features:
80
+ cpu_have = self.cpu_have(feature_name)
81
+ npy_have = __cpu_features__.get(feature_name)
82
+ assert_features_equal(npy_have, cpu_have, feature_name)
83
+
84
+ def cpu_have(self, feature_name):
85
+ map_names = self.features_map.get(feature_name, feature_name)
86
+ if isinstance(map_names, str):
87
+ return map_names in self.features_flags
88
+ for f in map_names:
89
+ if f in self.features_flags:
90
+ return True
91
+ return False
92
+
93
+ def load_flags_cpuinfo(self, magic_key):
94
+ self.features_flags = self.get_cpuinfo_item(magic_key)
95
+
96
+ def get_cpuinfo_item(self, magic_key):
97
+ values = set()
98
+ with open('/proc/cpuinfo') as fd:
99
+ for line in fd:
100
+ if not line.startswith(magic_key):
101
+ continue
102
+ flags_value = [s.strip() for s in line.split(':', 1)]
103
+ if len(flags_value) == 2:
104
+ values = values.union(flags_value[1].upper().split())
105
+ return values
106
+
107
+ def load_flags_auxv(self):
108
+ auxv = subprocess.check_output(['/bin/true'], env=dict(LD_SHOW_AUXV="1"))
109
+ for at in auxv.split(b'\n'):
110
+ if not at.startswith(b"AT_HWCAP"):
111
+ continue
112
+ hwcap_value = [s.strip() for s in at.split(b':', 1)]
113
+ if len(hwcap_value) == 2:
114
+ self.features_flags = self.features_flags.union(
115
+ hwcap_value[1].upper().decode().split()
116
+ )
117
+
118
+ @pytest.mark.skipif(
119
+ sys.platform == 'emscripten',
120
+ reason= (
121
+ "The subprocess module is not available on WASM platforms and"
122
+ " therefore this test class cannot be properly executed."
123
+ ),
124
+ )
125
+ class TestEnvPrivation:
126
+ cwd = pathlib.Path(__file__).parent.resolve()
127
+ env = os.environ.copy()
128
+ _enable = os.environ.pop('NPY_ENABLE_CPU_FEATURES', None)
129
+ _disable = os.environ.pop('NPY_DISABLE_CPU_FEATURES', None)
130
+ SUBPROCESS_ARGS = dict(cwd=cwd, capture_output=True, text=True, check=True)
131
+ unavailable_feats = [
132
+ feat for feat in __cpu_dispatch__ if not __cpu_features__[feat]
133
+ ]
134
+ UNAVAILABLE_FEAT = (
135
+ None if len(unavailable_feats) == 0
136
+ else unavailable_feats[0]
137
+ )
138
+ BASELINE_FEAT = None if len(__cpu_baseline__) == 0 else __cpu_baseline__[0]
139
+ SCRIPT = """
140
+ def main():
141
+ from numpy.core._multiarray_umath import __cpu_features__, __cpu_dispatch__
142
+
143
+ detected = [feat for feat in __cpu_dispatch__ if __cpu_features__[feat]]
144
+ print(detected)
145
+
146
+ if __name__ == "__main__":
147
+ main()
148
+ """
149
+
150
+ @pytest.fixture(autouse=True)
151
+ def setup_class(self, tmp_path_factory):
152
+ file = tmp_path_factory.mktemp("runtime_test_script")
153
+ file /= "_runtime_detect.py"
154
+ file.write_text(self.SCRIPT)
155
+ self.file = file
156
+ return
157
+
158
+ def _run(self):
159
+ return subprocess.run(
160
+ [sys.executable, self.file],
161
+ env=self.env,
162
+ **self.SUBPROCESS_ARGS,
163
+ )
164
+
165
+ # Helper function mimicing pytest.raises for subprocess call
166
+ def _expect_error(
167
+ self,
168
+ msg,
169
+ err_type,
170
+ no_error_msg="Failed to generate error"
171
+ ):
172
+ try:
173
+ self._run()
174
+ except subprocess.CalledProcessError as e:
175
+ assertion_message = f"Expected: {msg}\nGot: {e.stderr}"
176
+ assert re.search(msg, e.stderr), assertion_message
177
+
178
+ assertion_message = (
179
+ f"Expected error of type: {err_type}; see full "
180
+ f"error:\n{e.stderr}"
181
+ )
182
+ assert re.search(err_type, e.stderr), assertion_message
183
+ else:
184
+ assert False, no_error_msg
185
+
186
+ def setup_method(self):
187
+ """Ensure that the environment is reset"""
188
+ self.env = os.environ.copy()
189
+ return
190
+
191
+ def test_runtime_feature_selection(self):
192
+ """
193
+ Ensure that when selecting `NPY_ENABLE_CPU_FEATURES`, only the
194
+ features exactly specified are dispatched.
195
+ """
196
+
197
+ # Capture runtime-enabled features
198
+ out = self._run()
199
+ non_baseline_features = _text_to_list(out.stdout)
200
+
201
+ if non_baseline_features is None:
202
+ pytest.skip(
203
+ "No dispatchable features outside of baseline detected."
204
+ )
205
+ feature = non_baseline_features[0]
206
+
207
+ # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
208
+ # specified
209
+ self.env['NPY_ENABLE_CPU_FEATURES'] = feature
210
+ out = self._run()
211
+ enabled_features = _text_to_list(out.stdout)
212
+
213
+ # Ensure that only one feature is enabled, and it is exactly the one
214
+ # specified by `NPY_ENABLE_CPU_FEATURES`
215
+ assert set(enabled_features) == {feature}
216
+
217
+ if len(non_baseline_features) < 2:
218
+ pytest.skip("Only one non-baseline feature detected.")
219
+ # Capture runtime-enabled features when `NPY_ENABLE_CPU_FEATURES` is
220
+ # specified
221
+ self.env['NPY_ENABLE_CPU_FEATURES'] = ",".join(non_baseline_features)
222
+ out = self._run()
223
+ enabled_features = _text_to_list(out.stdout)
224
+
225
+ # Ensure that both features are enabled, and they are exactly the ones
226
+ # specified by `NPY_ENABLE_CPU_FEATURES`
227
+ assert set(enabled_features) == set(non_baseline_features)
228
+ return
229
+
230
+ @pytest.mark.parametrize("enabled, disabled",
231
+ [
232
+ ("feature", "feature"),
233
+ ("feature", "same"),
234
+ ])
235
+ def test_both_enable_disable_set(self, enabled, disabled):
236
+ """
237
+ Ensure that when both environment variables are set then an
238
+ ImportError is thrown
239
+ """
240
+ self.env['NPY_ENABLE_CPU_FEATURES'] = enabled
241
+ self.env['NPY_DISABLE_CPU_FEATURES'] = disabled
242
+ msg = "Both NPY_DISABLE_CPU_FEATURES and NPY_ENABLE_CPU_FEATURES"
243
+ err_type = "ImportError"
244
+ self._expect_error(msg, err_type)
245
+
246
+ @pytest.mark.skipif(
247
+ not __cpu_dispatch__,
248
+ reason=(
249
+ "NPY_*_CPU_FEATURES only parsed if "
250
+ "`__cpu_dispatch__` is non-empty"
251
+ )
252
+ )
253
+ @pytest.mark.parametrize("action", ["ENABLE", "DISABLE"])
254
+ def test_variable_too_long(self, action):
255
+ """
256
+ Test that an error is thrown if the environment variables are too long
257
+ to be processed. Current limit is 1024, but this may change later.
258
+ """
259
+ MAX_VAR_LENGTH = 1024
260
+ # Actual length is MAX_VAR_LENGTH + 1 due to null-termination
261
+ self.env[f'NPY_{action}_CPU_FEATURES'] = "t" * MAX_VAR_LENGTH
262
+ msg = (
263
+ f"Length of environment variable 'NPY_{action}_CPU_FEATURES' is "
264
+ f"{MAX_VAR_LENGTH + 1}, only {MAX_VAR_LENGTH} accepted"
265
+ )
266
+ err_type = "RuntimeError"
267
+ self._expect_error(msg, err_type)
268
+
269
+ @pytest.mark.skipif(
270
+ not __cpu_dispatch__,
271
+ reason=(
272
+ "NPY_*_CPU_FEATURES only parsed if "
273
+ "`__cpu_dispatch__` is non-empty"
274
+ )
275
+ )
276
+ def test_impossible_feature_disable(self):
277
+ """
278
+ Test that a RuntimeError is thrown if an impossible feature-disabling
279
+ request is made. This includes disabling a baseline feature.
280
+ """
281
+
282
+ if self.BASELINE_FEAT is None:
283
+ pytest.skip("There are no unavailable features to test with")
284
+ bad_feature = self.BASELINE_FEAT
285
+ self.env['NPY_DISABLE_CPU_FEATURES'] = bad_feature
286
+ msg = (
287
+ f"You cannot disable CPU feature '{bad_feature}', since it is "
288
+ "part of the baseline optimizations"
289
+ )
290
+ err_type = "RuntimeError"
291
+ self._expect_error(msg, err_type)
292
+
293
+ def test_impossible_feature_enable(self):
294
+ """
295
+ Test that a RuntimeError is thrown if an impossible feature-enabling
296
+ request is made. This includes enabling a feature not supported by the
297
+ machine, or disabling a baseline optimization.
298
+ """
299
+
300
+ if self.UNAVAILABLE_FEAT is None:
301
+ pytest.skip("There are no unavailable features to test with")
302
+ bad_feature = self.UNAVAILABLE_FEAT
303
+ self.env['NPY_ENABLE_CPU_FEATURES'] = bad_feature
304
+ msg = (
305
+ f"You cannot enable CPU features \\({bad_feature}\\), since "
306
+ "they are not supported by your machine."
307
+ )
308
+ err_type = "RuntimeError"
309
+ self._expect_error(msg, err_type)
310
+
311
+ # Ensure that only the bad feature gets reported
312
+ feats = f"{bad_feature}, {self.BASELINE_FEAT}"
313
+ self.env['NPY_ENABLE_CPU_FEATURES'] = feats
314
+ msg = (
315
+ f"You cannot enable CPU features \\({bad_feature}\\), since they "
316
+ "are not supported by your machine."
317
+ )
318
+ self._expect_error(msg, err_type)
319
+
320
+ is_linux = sys.platform.startswith('linux')
321
+ is_cygwin = sys.platform.startswith('cygwin')
322
+ machine = platform.machine()
323
+ is_x86 = re.match("^(amd64|x86|i386|i686)", machine, re.IGNORECASE)
324
+ @pytest.mark.skipif(
325
+ not (is_linux or is_cygwin) or not is_x86, reason="Only for Linux and x86"
326
+ )
327
+ class Test_X86_Features(AbstractTest):
328
+ features = [
329
+ "MMX", "SSE", "SSE2", "SSE3", "SSSE3", "SSE41", "POPCNT", "SSE42",
330
+ "AVX", "F16C", "XOP", "FMA4", "FMA3", "AVX2", "AVX512F", "AVX512CD",
331
+ "AVX512ER", "AVX512PF", "AVX5124FMAPS", "AVX5124VNNIW", "AVX512VPOPCNTDQ",
332
+ "AVX512VL", "AVX512BW", "AVX512DQ", "AVX512VNNI", "AVX512IFMA",
333
+ "AVX512VBMI", "AVX512VBMI2", "AVX512BITALG", "AVX512FP16",
334
+ ]
335
+ features_groups = dict(
336
+ AVX512_KNL = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF"],
337
+ AVX512_KNM = ["AVX512F", "AVX512CD", "AVX512ER", "AVX512PF", "AVX5124FMAPS",
338
+ "AVX5124VNNIW", "AVX512VPOPCNTDQ"],
339
+ AVX512_SKX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL"],
340
+ AVX512_CLX = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512VNNI"],
341
+ AVX512_CNL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
342
+ "AVX512VBMI"],
343
+ AVX512_ICL = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ", "AVX512VL", "AVX512IFMA",
344
+ "AVX512VBMI", "AVX512VNNI", "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ"],
345
+ AVX512_SPR = ["AVX512F", "AVX512CD", "AVX512BW", "AVX512DQ",
346
+ "AVX512VL", "AVX512IFMA", "AVX512VBMI", "AVX512VNNI",
347
+ "AVX512VBMI2", "AVX512BITALG", "AVX512VPOPCNTDQ",
348
+ "AVX512FP16"],
349
+ )
350
+ features_map = dict(
351
+ SSE3="PNI", SSE41="SSE4_1", SSE42="SSE4_2", FMA3="FMA",
352
+ AVX512VNNI="AVX512_VNNI", AVX512BITALG="AVX512_BITALG", AVX512VBMI2="AVX512_VBMI2",
353
+ AVX5124FMAPS="AVX512_4FMAPS", AVX5124VNNIW="AVX512_4VNNIW", AVX512VPOPCNTDQ="AVX512_VPOPCNTDQ",
354
+ AVX512FP16="AVX512_FP16",
355
+ )
356
+ def load_flags(self):
357
+ self.load_flags_cpuinfo("flags")
358
+
359
+ is_power = re.match("^(powerpc|ppc)64", machine, re.IGNORECASE)
360
+ @pytest.mark.skipif(not is_linux or not is_power, reason="Only for Linux and Power")
361
+ class Test_POWER_Features(AbstractTest):
362
+ features = ["VSX", "VSX2", "VSX3", "VSX4"]
363
+ features_map = dict(VSX2="ARCH_2_07", VSX3="ARCH_3_00", VSX4="ARCH_3_1")
364
+
365
+ def load_flags(self):
366
+ self.load_flags_auxv()
367
+
368
+
369
+ is_zarch = re.match("^(s390x)", machine, re.IGNORECASE)
370
+ @pytest.mark.skipif(not is_linux or not is_zarch,
371
+ reason="Only for Linux and IBM Z")
372
+ class Test_ZARCH_Features(AbstractTest):
373
+ features = ["VX", "VXE", "VXE2"]
374
+
375
+ def load_flags(self):
376
+ self.load_flags_auxv()
377
+
378
+
379
+ is_arm = re.match("^(arm|aarch64)", machine, re.IGNORECASE)
380
+ @pytest.mark.skipif(not is_linux or not is_arm, reason="Only for Linux and ARM")
381
+ class Test_ARM_Features(AbstractTest):
382
+ features = [
383
+ "NEON", "ASIMD", "FPHP", "ASIMDHP", "ASIMDDP", "ASIMDFHM"
384
+ ]
385
+ features_groups = dict(
386
+ NEON_FP16 = ["NEON", "HALF"],
387
+ NEON_VFPV4 = ["NEON", "VFPV4"],
388
+ )
389
+ def load_flags(self):
390
+ self.load_flags_cpuinfo("Features")
391
+ arch = self.get_cpuinfo_item("CPU architecture")
392
+ # in case of mounting virtual filesystem of aarch64 kernel
393
+ is_rootfs_v8 = int('0'+next(iter(arch))) > 7 if arch else 0
394
+ if re.match("^(aarch64|AARCH64)", machine) or is_rootfs_v8:
395
+ self.features_map = dict(
396
+ NEON="ASIMD", HALF="ASIMD", VFPV4="ASIMD"
397
+ )
398
+ else:
399
+ self.features_map = dict(
400
+ # ELF auxiliary vector and /proc/cpuinfo on Linux kernel(armv8 aarch32)
401
+ # doesn't provide information about ASIMD, so we assume that ASIMD is supported
402
+ # if the kernel reports any one of the following ARM8 features.
403
+ ASIMD=("AES", "SHA1", "SHA2", "PMULL", "CRC32")
404
+ )
.venv/lib/python3.11/site-packages/numpy/core/tests/test_custom_dtypes.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_array_equal
5
+ from numpy.core._multiarray_umath import (
6
+ _discover_array_parameters as discover_array_params, _get_sfloat_dtype)
7
+
8
+
9
+ SF = _get_sfloat_dtype()
10
+
11
+
12
+ class TestSFloat:
13
+ def _get_array(self, scaling, aligned=True):
14
+ if not aligned:
15
+ a = np.empty(3*8 + 1, dtype=np.uint8)[1:]
16
+ a = a.view(np.float64)
17
+ a[:] = [1., 2., 3.]
18
+ else:
19
+ a = np.array([1., 2., 3.])
20
+
21
+ a *= 1./scaling # the casting code also uses the reciprocal.
22
+ return a.view(SF(scaling))
23
+
24
+ def test_sfloat_rescaled(self):
25
+ sf = SF(1.)
26
+ sf2 = sf.scaled_by(2.)
27
+ assert sf2.get_scaling() == 2.
28
+ sf6 = sf2.scaled_by(3.)
29
+ assert sf6.get_scaling() == 6.
30
+
31
+ def test_class_discovery(self):
32
+ # This does not test much, since we always discover the scaling as 1.
33
+ # But most of NumPy (when writing) does not understand DType classes
34
+ dt, _ = discover_array_params([1., 2., 3.], dtype=SF)
35
+ assert dt == SF(1.)
36
+
37
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
38
+ def test_scaled_float_from_floats(self, scaling):
39
+ a = np.array([1., 2., 3.], dtype=SF(scaling))
40
+
41
+ assert a.dtype.get_scaling() == scaling
42
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
43
+
44
+ def test_repr(self):
45
+ # Check the repr, mainly to cover the code paths:
46
+ assert repr(SF(scaling=1.)) == "_ScaledFloatTestDType(scaling=1.0)"
47
+
48
+ def test_dtype_name(self):
49
+ assert SF(1.).name == "_ScaledFloatTestDType64"
50
+
51
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
52
+ def test_sfloat_from_float(self, scaling):
53
+ a = np.array([1., 2., 3.]).astype(dtype=SF(scaling))
54
+
55
+ assert a.dtype.get_scaling() == scaling
56
+ assert_array_equal(scaling * a.view(np.float64), [1., 2., 3.])
57
+
58
+ @pytest.mark.parametrize("aligned", [True, False])
59
+ @pytest.mark.parametrize("scaling", [1., -1., 2.])
60
+ def test_sfloat_getitem(self, aligned, scaling):
61
+ a = self._get_array(1., aligned)
62
+ assert a.tolist() == [1., 2., 3.]
63
+
64
+ @pytest.mark.parametrize("aligned", [True, False])
65
+ def test_sfloat_casts(self, aligned):
66
+ a = self._get_array(1., aligned)
67
+
68
+ assert np.can_cast(a, SF(-1.), casting="equiv")
69
+ assert not np.can_cast(a, SF(-1.), casting="no")
70
+ na = a.astype(SF(-1.))
71
+ assert_array_equal(-1 * na.view(np.float64), a.view(np.float64))
72
+
73
+ assert np.can_cast(a, SF(2.), casting="same_kind")
74
+ assert not np.can_cast(a, SF(2.), casting="safe")
75
+ a2 = a.astype(SF(2.))
76
+ assert_array_equal(2 * a2.view(np.float64), a.view(np.float64))
77
+
78
+ @pytest.mark.parametrize("aligned", [True, False])
79
+ def test_sfloat_cast_internal_errors(self, aligned):
80
+ a = self._get_array(2e300, aligned)
81
+
82
+ with pytest.raises(TypeError,
83
+ match="error raised inside the core-loop: non-finite factor!"):
84
+ a.astype(SF(2e-300))
85
+
86
+ def test_sfloat_promotion(self):
87
+ assert np.result_type(SF(2.), SF(3.)) == SF(3.)
88
+ assert np.result_type(SF(3.), SF(2.)) == SF(3.)
89
+ # Float64 -> SF(1.) and then promotes normally, so both of this work:
90
+ assert np.result_type(SF(3.), np.float64) == SF(3.)
91
+ assert np.result_type(np.float64, SF(0.5)) == SF(1.)
92
+
93
+ # Test an undefined promotion:
94
+ with pytest.raises(TypeError):
95
+ np.result_type(SF(1.), np.int64)
96
+
97
+ def test_basic_multiply(self):
98
+ a = self._get_array(2.)
99
+ b = self._get_array(4.)
100
+
101
+ res = a * b
102
+ # multiplies dtype scaling and content separately:
103
+ assert res.dtype.get_scaling() == 8.
104
+ expected_view = a.view(np.float64) * b.view(np.float64)
105
+ assert_array_equal(res.view(np.float64), expected_view)
106
+
107
+ def test_possible_and_impossible_reduce(self):
108
+ # For reductions to work, the first and last operand must have the
109
+ # same dtype. For this parametric DType that is not necessarily true.
110
+ a = self._get_array(2.)
111
+ # Addition reductin works (as of writing requires to pass initial
112
+ # because setting a scaled-float from the default `0` fails).
113
+ res = np.add.reduce(a, initial=0.)
114
+ assert res == a.astype(np.float64).sum()
115
+
116
+ # But each multiplication changes the factor, so a reduction is not
117
+ # possible (the relaxed version of the old refusal to handle any
118
+ # flexible dtype).
119
+ with pytest.raises(TypeError,
120
+ match="the resolved dtypes are not compatible"):
121
+ np.multiply.reduce(a)
122
+
123
+ def test_basic_ufunc_at(self):
124
+ float_a = np.array([1., 2., 3.])
125
+ b = self._get_array(2.)
126
+
127
+ float_b = b.view(np.float64).copy()
128
+ np.multiply.at(float_b, [1, 1, 1], float_a)
129
+ np.multiply.at(b, [1, 1, 1], float_a)
130
+
131
+ assert_array_equal(b.view(np.float64), float_b)
132
+
133
+ def test_basic_multiply_promotion(self):
134
+ float_a = np.array([1., 2., 3.])
135
+ b = self._get_array(2.)
136
+
137
+ res1 = float_a * b
138
+ res2 = b * float_a
139
+
140
+ # one factor is one, so we get the factor of b:
141
+ assert res1.dtype == res2.dtype == b.dtype
142
+ expected_view = float_a * b.view(np.float64)
143
+ assert_array_equal(res1.view(np.float64), expected_view)
144
+ assert_array_equal(res2.view(np.float64), expected_view)
145
+
146
+ # Check that promotion works when `out` is used:
147
+ np.multiply(b, float_a, out=res2)
148
+ with pytest.raises(TypeError):
149
+ # The promoter accepts this (maybe it should not), but the SFloat
150
+ # result cannot be cast to integer:
151
+ np.multiply(b, float_a, out=np.arange(3))
152
+
153
+ def test_basic_addition(self):
154
+ a = self._get_array(2.)
155
+ b = self._get_array(4.)
156
+
157
+ res = a + b
158
+ # addition uses the type promotion rules for the result:
159
+ assert res.dtype == np.result_type(a.dtype, b.dtype)
160
+ expected_view = (a.astype(res.dtype).view(np.float64) +
161
+ b.astype(res.dtype).view(np.float64))
162
+ assert_array_equal(res.view(np.float64), expected_view)
163
+
164
+ def test_addition_cast_safety(self):
165
+ """The addition method is special for the scaled float, because it
166
+ includes the "cast" between different factors, thus cast-safety
167
+ is influenced by the implementation.
168
+ """
169
+ a = self._get_array(2.)
170
+ b = self._get_array(-2.)
171
+ c = self._get_array(3.)
172
+
173
+ # sign change is "equiv":
174
+ np.add(a, b, casting="equiv")
175
+ with pytest.raises(TypeError):
176
+ np.add(a, b, casting="no")
177
+
178
+ # Different factor is "same_kind" (default) so check that "safe" fails
179
+ with pytest.raises(TypeError):
180
+ np.add(a, c, casting="safe")
181
+
182
+ # Check that casting the output fails also (done by the ufunc here)
183
+ with pytest.raises(TypeError):
184
+ np.add(a, a, out=c, casting="safe")
185
+
186
+ @pytest.mark.parametrize("ufunc",
187
+ [np.logical_and, np.logical_or, np.logical_xor])
188
+ def test_logical_ufuncs_casts_to_bool(self, ufunc):
189
+ a = self._get_array(2.)
190
+ a[0] = 0. # make sure first element is considered False.
191
+
192
+ float_equiv = a.astype(float)
193
+ expected = ufunc(float_equiv, float_equiv)
194
+ res = ufunc(a, a)
195
+ assert_array_equal(res, expected)
196
+
197
+ # also check that the same works for reductions:
198
+ expected = ufunc.reduce(float_equiv)
199
+ res = ufunc.reduce(a)
200
+ assert_array_equal(res, expected)
201
+
202
+ # The output casting does not match the bool, bool -> bool loop:
203
+ with pytest.raises(TypeError):
204
+ ufunc(a, a, out=np.empty(a.shape, dtype=int), casting="equiv")
205
+
206
+ def test_wrapped_and_wrapped_reductions(self):
207
+ a = self._get_array(2.)
208
+ float_equiv = a.astype(float)
209
+
210
+ expected = np.hypot(float_equiv, float_equiv)
211
+ res = np.hypot(a, a)
212
+ assert res.dtype == a.dtype
213
+ res_float = res.view(np.float64) * 2
214
+ assert_array_equal(res_float, expected)
215
+
216
+ # Also check reduction (keepdims, due to incorrect getitem)
217
+ res = np.hypot.reduce(a, keepdims=True)
218
+ assert res.dtype == a.dtype
219
+ expected = np.hypot.reduce(float_equiv, keepdims=True)
220
+ assert res.view(np.float64) * 2 == expected
221
+
222
+ def test_astype_class(self):
223
+ # Very simple test that we accept `.astype()` also on the class.
224
+ # ScaledFloat always returns the default descriptor, but it does
225
+ # check the relevant code paths.
226
+ arr = np.array([1., 2., 3.], dtype=object)
227
+
228
+ res = arr.astype(SF) # passing the class class
229
+ expected = arr.astype(SF(1.)) # above will have discovered 1. scaling
230
+ assert_array_equal(res.view(np.float64), expected.view(np.float64))
231
+
232
+ def test_creation_class(self):
233
+ arr1 = np.array([1., 2., 3.], dtype=SF)
234
+ assert arr1.dtype == SF(1.)
235
+ arr2 = np.array([1., 2., 3.], dtype=SF(1.))
236
+ assert_array_equal(arr1.view(np.float64), arr2.view(np.float64))
237
+
238
+
239
+ def test_type_pickle():
240
+ # can't actually unpickle, but we can pickle (if in namespace)
241
+ import pickle
242
+
243
+ np._ScaledFloatTestDType = SF
244
+
245
+ s = pickle.dumps(SF)
246
+ res = pickle.loads(s)
247
+ assert res is SF
248
+
249
+ del np._ScaledFloatTestDType
250
+
251
+
252
+ def test_is_numeric():
253
+ assert SF._is_numeric
.venv/lib/python3.11/site-packages/numpy/core/tests/test_datetime.py ADDED
The diff for this file is too large to render. See raw diff
 
.venv/lib/python3.11/site-packages/numpy/core/tests/test_defchararray.py ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.core.multiarray import _vec_string
5
+ from numpy.testing import (
6
+ assert_, assert_equal, assert_array_equal, assert_raises,
7
+ assert_raises_regex
8
+ )
9
+
10
+ kw_unicode_true = {'unicode': True} # make 2to3 work properly
11
+ kw_unicode_false = {'unicode': False}
12
+
13
+ class TestBasic:
14
+ def test_from_object_array(self):
15
+ A = np.array([['abc', 2],
16
+ ['long ', '0123456789']], dtype='O')
17
+ B = np.char.array(A)
18
+ assert_equal(B.dtype.itemsize, 10)
19
+ assert_array_equal(B, [[b'abc', b'2'],
20
+ [b'long', b'0123456789']])
21
+
22
+ def test_from_object_array_unicode(self):
23
+ A = np.array([['abc', 'Sigma \u03a3'],
24
+ ['long ', '0123456789']], dtype='O')
25
+ assert_raises(ValueError, np.char.array, (A,))
26
+ B = np.char.array(A, **kw_unicode_true)
27
+ assert_equal(B.dtype.itemsize, 10 * np.array('a', 'U').dtype.itemsize)
28
+ assert_array_equal(B, [['abc', 'Sigma \u03a3'],
29
+ ['long', '0123456789']])
30
+
31
+ def test_from_string_array(self):
32
+ A = np.array([[b'abc', b'foo'],
33
+ [b'long ', b'0123456789']])
34
+ assert_equal(A.dtype.type, np.bytes_)
35
+ B = np.char.array(A)
36
+ assert_array_equal(B, A)
37
+ assert_equal(B.dtype, A.dtype)
38
+ assert_equal(B.shape, A.shape)
39
+ B[0, 0] = 'changed'
40
+ assert_(B[0, 0] != A[0, 0])
41
+ C = np.char.asarray(A)
42
+ assert_array_equal(C, A)
43
+ assert_equal(C.dtype, A.dtype)
44
+ C[0, 0] = 'changed again'
45
+ assert_(C[0, 0] != B[0, 0])
46
+ assert_(C[0, 0] == A[0, 0])
47
+
48
+ def test_from_unicode_array(self):
49
+ A = np.array([['abc', 'Sigma \u03a3'],
50
+ ['long ', '0123456789']])
51
+ assert_equal(A.dtype.type, np.str_)
52
+ B = np.char.array(A)
53
+ assert_array_equal(B, A)
54
+ assert_equal(B.dtype, A.dtype)
55
+ assert_equal(B.shape, A.shape)
56
+ B = np.char.array(A, **kw_unicode_true)
57
+ assert_array_equal(B, A)
58
+ assert_equal(B.dtype, A.dtype)
59
+ assert_equal(B.shape, A.shape)
60
+
61
+ def fail():
62
+ np.char.array(A, **kw_unicode_false)
63
+
64
+ assert_raises(UnicodeEncodeError, fail)
65
+
66
+ def test_unicode_upconvert(self):
67
+ A = np.char.array(['abc'])
68
+ B = np.char.array(['\u03a3'])
69
+ assert_(issubclass((A + B).dtype.type, np.str_))
70
+
71
+ def test_from_string(self):
72
+ A = np.char.array(b'abc')
73
+ assert_equal(len(A), 1)
74
+ assert_equal(len(A[0]), 3)
75
+ assert_(issubclass(A.dtype.type, np.bytes_))
76
+
77
+ def test_from_unicode(self):
78
+ A = np.char.array('\u03a3')
79
+ assert_equal(len(A), 1)
80
+ assert_equal(len(A[0]), 1)
81
+ assert_equal(A.itemsize, 4)
82
+ assert_(issubclass(A.dtype.type, np.str_))
83
+
84
+ class TestVecString:
85
+ def test_non_existent_method(self):
86
+
87
+ def fail():
88
+ _vec_string('a', np.bytes_, 'bogus')
89
+
90
+ assert_raises(AttributeError, fail)
91
+
92
+ def test_non_string_array(self):
93
+
94
+ def fail():
95
+ _vec_string(1, np.bytes_, 'strip')
96
+
97
+ assert_raises(TypeError, fail)
98
+
99
+ def test_invalid_args_tuple(self):
100
+
101
+ def fail():
102
+ _vec_string(['a'], np.bytes_, 'strip', 1)
103
+
104
+ assert_raises(TypeError, fail)
105
+
106
+ def test_invalid_type_descr(self):
107
+
108
+ def fail():
109
+ _vec_string(['a'], 'BOGUS', 'strip')
110
+
111
+ assert_raises(TypeError, fail)
112
+
113
+ def test_invalid_function_args(self):
114
+
115
+ def fail():
116
+ _vec_string(['a'], np.bytes_, 'strip', (1,))
117
+
118
+ assert_raises(TypeError, fail)
119
+
120
+ def test_invalid_result_type(self):
121
+
122
+ def fail():
123
+ _vec_string(['a'], np.int_, 'strip')
124
+
125
+ assert_raises(TypeError, fail)
126
+
127
+ def test_broadcast_error(self):
128
+
129
+ def fail():
130
+ _vec_string([['abc', 'def']], np.int_, 'find', (['a', 'd', 'j'],))
131
+
132
+ assert_raises(ValueError, fail)
133
+
134
+
135
+ class TestWhitespace:
136
+ def setup_method(self):
137
+ self.A = np.array([['abc ', '123 '],
138
+ ['789 ', 'xyz ']]).view(np.chararray)
139
+ self.B = np.array([['abc', '123'],
140
+ ['789', 'xyz']]).view(np.chararray)
141
+
142
+ def test1(self):
143
+ assert_(np.all(self.A == self.B))
144
+ assert_(np.all(self.A >= self.B))
145
+ assert_(np.all(self.A <= self.B))
146
+ assert_(not np.any(self.A > self.B))
147
+ assert_(not np.any(self.A < self.B))
148
+ assert_(not np.any(self.A != self.B))
149
+
150
+ class TestChar:
151
+ def setup_method(self):
152
+ self.A = np.array('abc1', dtype='c').view(np.chararray)
153
+
154
+ def test_it(self):
155
+ assert_equal(self.A.shape, (4,))
156
+ assert_equal(self.A.upper()[:2].tobytes(), b'AB')
157
+
158
+ class TestComparisons:
159
+ def setup_method(self):
160
+ self.A = np.array([['abc', '123'],
161
+ ['789', 'xyz']]).view(np.chararray)
162
+ self.B = np.array([['efg', '123 '],
163
+ ['051', 'tuv']]).view(np.chararray)
164
+
165
+ def test_not_equal(self):
166
+ assert_array_equal((self.A != self.B), [[True, False], [True, True]])
167
+
168
+ def test_equal(self):
169
+ assert_array_equal((self.A == self.B), [[False, True], [False, False]])
170
+
171
+ def test_greater_equal(self):
172
+ assert_array_equal((self.A >= self.B), [[False, True], [True, True]])
173
+
174
+ def test_less_equal(self):
175
+ assert_array_equal((self.A <= self.B), [[True, True], [False, False]])
176
+
177
+ def test_greater(self):
178
+ assert_array_equal((self.A > self.B), [[False, False], [True, True]])
179
+
180
+ def test_less(self):
181
+ assert_array_equal((self.A < self.B), [[True, False], [False, False]])
182
+
183
+ def test_type(self):
184
+ out1 = np.char.equal(self.A, self.B)
185
+ out2 = np.char.equal('a', 'a')
186
+ assert_(isinstance(out1, np.ndarray))
187
+ assert_(isinstance(out2, np.ndarray))
188
+
189
+ class TestComparisonsMixed1(TestComparisons):
190
+ """Ticket #1276"""
191
+
192
+ def setup_method(self):
193
+ TestComparisons.setup_method(self)
194
+ self.B = np.array([['efg', '123 '],
195
+ ['051', 'tuv']], np.str_).view(np.chararray)
196
+
197
+ class TestComparisonsMixed2(TestComparisons):
198
+ """Ticket #1276"""
199
+
200
+ def setup_method(self):
201
+ TestComparisons.setup_method(self)
202
+ self.A = np.array([['abc', '123'],
203
+ ['789', 'xyz']], np.str_).view(np.chararray)
204
+
205
+ class TestInformation:
206
+ def setup_method(self):
207
+ self.A = np.array([[' abc ', ''],
208
+ ['12345', 'MixedCase'],
209
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
210
+ self.B = np.array([[' \u03a3 ', ''],
211
+ ['12345', 'MixedCase'],
212
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
213
+
214
+ def test_len(self):
215
+ assert_(issubclass(np.char.str_len(self.A).dtype.type, np.integer))
216
+ assert_array_equal(np.char.str_len(self.A), [[5, 0], [5, 9], [12, 5]])
217
+ assert_array_equal(np.char.str_len(self.B), [[3, 0], [5, 9], [12, 5]])
218
+
219
+ def test_count(self):
220
+ assert_(issubclass(self.A.count('').dtype.type, np.integer))
221
+ assert_array_equal(self.A.count('a'), [[1, 0], [0, 1], [0, 0]])
222
+ assert_array_equal(self.A.count('123'), [[0, 0], [1, 0], [1, 0]])
223
+ # Python doesn't seem to like counting NULL characters
224
+ # assert_array_equal(self.A.count('\0'), [[0, 0], [0, 0], [1, 0]])
225
+ assert_array_equal(self.A.count('a', 0, 2), [[1, 0], [0, 0], [0, 0]])
226
+ assert_array_equal(self.B.count('a'), [[0, 0], [0, 1], [0, 0]])
227
+ assert_array_equal(self.B.count('123'), [[0, 0], [1, 0], [1, 0]])
228
+ # assert_array_equal(self.B.count('\0'), [[0, 0], [0, 0], [1, 0]])
229
+
230
+ def test_endswith(self):
231
+ assert_(issubclass(self.A.endswith('').dtype.type, np.bool_))
232
+ assert_array_equal(self.A.endswith(' '), [[1, 0], [0, 0], [1, 0]])
233
+ assert_array_equal(self.A.endswith('3', 0, 3), [[0, 0], [1, 0], [1, 0]])
234
+
235
+ def fail():
236
+ self.A.endswith('3', 'fdjk')
237
+
238
+ assert_raises(TypeError, fail)
239
+
240
+ def test_find(self):
241
+ assert_(issubclass(self.A.find('a').dtype.type, np.integer))
242
+ assert_array_equal(self.A.find('a'), [[1, -1], [-1, 6], [-1, -1]])
243
+ assert_array_equal(self.A.find('3'), [[-1, -1], [2, -1], [2, -1]])
244
+ assert_array_equal(self.A.find('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
245
+ assert_array_equal(self.A.find(['1', 'P']), [[-1, -1], [0, -1], [0, 1]])
246
+
247
+ def test_index(self):
248
+
249
+ def fail():
250
+ self.A.index('a')
251
+
252
+ assert_raises(ValueError, fail)
253
+ assert_(np.char.index('abcba', 'b') == 1)
254
+ assert_(issubclass(np.char.index('abcba', 'b').dtype.type, np.integer))
255
+
256
+ def test_isalnum(self):
257
+ assert_(issubclass(self.A.isalnum().dtype.type, np.bool_))
258
+ assert_array_equal(self.A.isalnum(), [[False, False], [True, True], [False, True]])
259
+
260
+ def test_isalpha(self):
261
+ assert_(issubclass(self.A.isalpha().dtype.type, np.bool_))
262
+ assert_array_equal(self.A.isalpha(), [[False, False], [False, True], [False, True]])
263
+
264
+ def test_isdigit(self):
265
+ assert_(issubclass(self.A.isdigit().dtype.type, np.bool_))
266
+ assert_array_equal(self.A.isdigit(), [[False, False], [True, False], [False, False]])
267
+
268
+ def test_islower(self):
269
+ assert_(issubclass(self.A.islower().dtype.type, np.bool_))
270
+ assert_array_equal(self.A.islower(), [[True, False], [False, False], [False, False]])
271
+
272
+ def test_isspace(self):
273
+ assert_(issubclass(self.A.isspace().dtype.type, np.bool_))
274
+ assert_array_equal(self.A.isspace(), [[False, False], [False, False], [False, False]])
275
+
276
+ def test_istitle(self):
277
+ assert_(issubclass(self.A.istitle().dtype.type, np.bool_))
278
+ assert_array_equal(self.A.istitle(), [[False, False], [False, False], [False, False]])
279
+
280
+ def test_isupper(self):
281
+ assert_(issubclass(self.A.isupper().dtype.type, np.bool_))
282
+ assert_array_equal(self.A.isupper(), [[False, False], [False, False], [False, True]])
283
+
284
+ def test_rfind(self):
285
+ assert_(issubclass(self.A.rfind('a').dtype.type, np.integer))
286
+ assert_array_equal(self.A.rfind('a'), [[1, -1], [-1, 6], [-1, -1]])
287
+ assert_array_equal(self.A.rfind('3'), [[-1, -1], [2, -1], [6, -1]])
288
+ assert_array_equal(self.A.rfind('a', 0, 2), [[1, -1], [-1, -1], [-1, -1]])
289
+ assert_array_equal(self.A.rfind(['1', 'P']), [[-1, -1], [0, -1], [0, 2]])
290
+
291
+ def test_rindex(self):
292
+
293
+ def fail():
294
+ self.A.rindex('a')
295
+
296
+ assert_raises(ValueError, fail)
297
+ assert_(np.char.rindex('abcba', 'b') == 3)
298
+ assert_(issubclass(np.char.rindex('abcba', 'b').dtype.type, np.integer))
299
+
300
+ def test_startswith(self):
301
+ assert_(issubclass(self.A.startswith('').dtype.type, np.bool_))
302
+ assert_array_equal(self.A.startswith(' '), [[1, 0], [0, 0], [0, 0]])
303
+ assert_array_equal(self.A.startswith('1', 0, 3), [[0, 0], [1, 0], [1, 0]])
304
+
305
+ def fail():
306
+ self.A.startswith('3', 'fdjk')
307
+
308
+ assert_raises(TypeError, fail)
309
+
310
+
311
+ class TestMethods:
312
+ def setup_method(self):
313
+ self.A = np.array([[' abc ', ''],
314
+ ['12345', 'MixedCase'],
315
+ ['123 \t 345 \0 ', 'UPPER']],
316
+ dtype='S').view(np.chararray)
317
+ self.B = np.array([[' \u03a3 ', ''],
318
+ ['12345', 'MixedCase'],
319
+ ['123 \t 345 \0 ', 'UPPER']]).view(np.chararray)
320
+
321
+ def test_capitalize(self):
322
+ tgt = [[b' abc ', b''],
323
+ [b'12345', b'Mixedcase'],
324
+ [b'123 \t 345 \0 ', b'Upper']]
325
+ assert_(issubclass(self.A.capitalize().dtype.type, np.bytes_))
326
+ assert_array_equal(self.A.capitalize(), tgt)
327
+
328
+ tgt = [[' \u03c3 ', ''],
329
+ ['12345', 'Mixedcase'],
330
+ ['123 \t 345 \0 ', 'Upper']]
331
+ assert_(issubclass(self.B.capitalize().dtype.type, np.str_))
332
+ assert_array_equal(self.B.capitalize(), tgt)
333
+
334
+ def test_center(self):
335
+ assert_(issubclass(self.A.center(10).dtype.type, np.bytes_))
336
+ C = self.A.center([10, 20])
337
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
338
+
339
+ C = self.A.center(20, b'#')
340
+ assert_(np.all(C.startswith(b'#')))
341
+ assert_(np.all(C.endswith(b'#')))
342
+
343
+ C = np.char.center(b'FOO', [[10, 20], [15, 8]])
344
+ tgt = [[b' FOO ', b' FOO '],
345
+ [b' FOO ', b' FOO ']]
346
+ assert_(issubclass(C.dtype.type, np.bytes_))
347
+ assert_array_equal(C, tgt)
348
+
349
+ def test_decode(self):
350
+ A = np.char.array([b'\\u03a3'])
351
+ assert_(A.decode('unicode-escape')[0] == '\u03a3')
352
+
353
+ def test_encode(self):
354
+ B = self.B.encode('unicode_escape')
355
+ assert_(B[0][0] == str(' \\u03a3 ').encode('latin1'))
356
+
357
+ def test_expandtabs(self):
358
+ T = self.A.expandtabs()
359
+ assert_(T[2, 0] == b'123 345 \0')
360
+
361
+ def test_join(self):
362
+ # NOTE: list(b'123') == [49, 50, 51]
363
+ # so that b','.join(b'123') results to an error on Py3
364
+ A0 = self.A.decode('ascii')
365
+
366
+ A = np.char.join([',', '#'], A0)
367
+ assert_(issubclass(A.dtype.type, np.str_))
368
+ tgt = np.array([[' ,a,b,c, ', ''],
369
+ ['1,2,3,4,5', 'M#i#x#e#d#C#a#s#e'],
370
+ ['1,2,3, ,\t, ,3,4,5, ,\x00, ', 'U#P#P#E#R']])
371
+ assert_array_equal(np.char.join([',', '#'], A0), tgt)
372
+
373
+ def test_ljust(self):
374
+ assert_(issubclass(self.A.ljust(10).dtype.type, np.bytes_))
375
+
376
+ C = self.A.ljust([10, 20])
377
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
378
+
379
+ C = self.A.ljust(20, b'#')
380
+ assert_array_equal(C.startswith(b'#'), [
381
+ [False, True], [False, False], [False, False]])
382
+ assert_(np.all(C.endswith(b'#')))
383
+
384
+ C = np.char.ljust(b'FOO', [[10, 20], [15, 8]])
385
+ tgt = [[b'FOO ', b'FOO '],
386
+ [b'FOO ', b'FOO ']]
387
+ assert_(issubclass(C.dtype.type, np.bytes_))
388
+ assert_array_equal(C, tgt)
389
+
390
+ def test_lower(self):
391
+ tgt = [[b' abc ', b''],
392
+ [b'12345', b'mixedcase'],
393
+ [b'123 \t 345 \0 ', b'upper']]
394
+ assert_(issubclass(self.A.lower().dtype.type, np.bytes_))
395
+ assert_array_equal(self.A.lower(), tgt)
396
+
397
+ tgt = [[' \u03c3 ', ''],
398
+ ['12345', 'mixedcase'],
399
+ ['123 \t 345 \0 ', 'upper']]
400
+ assert_(issubclass(self.B.lower().dtype.type, np.str_))
401
+ assert_array_equal(self.B.lower(), tgt)
402
+
403
+ def test_lstrip(self):
404
+ tgt = [[b'abc ', b''],
405
+ [b'12345', b'MixedCase'],
406
+ [b'123 \t 345 \0 ', b'UPPER']]
407
+ assert_(issubclass(self.A.lstrip().dtype.type, np.bytes_))
408
+ assert_array_equal(self.A.lstrip(), tgt)
409
+
410
+ tgt = [[b' abc', b''],
411
+ [b'2345', b'ixedCase'],
412
+ [b'23 \t 345 \x00', b'UPPER']]
413
+ assert_array_equal(self.A.lstrip([b'1', b'M']), tgt)
414
+
415
+ tgt = [['\u03a3 ', ''],
416
+ ['12345', 'MixedCase'],
417
+ ['123 \t 345 \0 ', 'UPPER']]
418
+ assert_(issubclass(self.B.lstrip().dtype.type, np.str_))
419
+ assert_array_equal(self.B.lstrip(), tgt)
420
+
421
+ def test_partition(self):
422
+ P = self.A.partition([b'3', b'M'])
423
+ tgt = [[(b' abc ', b'', b''), (b'', b'', b'')],
424
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
425
+ [(b'12', b'3', b' \t 345 \0 '), (b'UPPER', b'', b'')]]
426
+ assert_(issubclass(P.dtype.type, np.bytes_))
427
+ assert_array_equal(P, tgt)
428
+
429
+ def test_replace(self):
430
+ R = self.A.replace([b'3', b'a'],
431
+ [b'##########', b'@'])
432
+ tgt = [[b' abc ', b''],
433
+ [b'12##########45', b'MixedC@se'],
434
+ [b'12########## \t ##########45 \x00', b'UPPER']]
435
+ assert_(issubclass(R.dtype.type, np.bytes_))
436
+ assert_array_equal(R, tgt)
437
+
438
+ def test_rjust(self):
439
+ assert_(issubclass(self.A.rjust(10).dtype.type, np.bytes_))
440
+
441
+ C = self.A.rjust([10, 20])
442
+ assert_array_equal(np.char.str_len(C), [[10, 20], [10, 20], [12, 20]])
443
+
444
+ C = self.A.rjust(20, b'#')
445
+ assert_(np.all(C.startswith(b'#')))
446
+ assert_array_equal(C.endswith(b'#'),
447
+ [[False, True], [False, False], [False, False]])
448
+
449
+ C = np.char.rjust(b'FOO', [[10, 20], [15, 8]])
450
+ tgt = [[b' FOO', b' FOO'],
451
+ [b' FOO', b' FOO']]
452
+ assert_(issubclass(C.dtype.type, np.bytes_))
453
+ assert_array_equal(C, tgt)
454
+
455
+ def test_rpartition(self):
456
+ P = self.A.rpartition([b'3', b'M'])
457
+ tgt = [[(b'', b'', b' abc '), (b'', b'', b'')],
458
+ [(b'12', b'3', b'45'), (b'', b'M', b'ixedCase')],
459
+ [(b'123 \t ', b'3', b'45 \0 '), (b'', b'', b'UPPER')]]
460
+ assert_(issubclass(P.dtype.type, np.bytes_))
461
+ assert_array_equal(P, tgt)
462
+
463
+ def test_rsplit(self):
464
+ A = self.A.rsplit(b'3')
465
+ tgt = [[[b' abc '], [b'']],
466
+ [[b'12', b'45'], [b'MixedCase']],
467
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
468
+ assert_(issubclass(A.dtype.type, np.object_))
469
+ assert_equal(A.tolist(), tgt)
470
+
471
+ def test_rstrip(self):
472
+ assert_(issubclass(self.A.rstrip().dtype.type, np.bytes_))
473
+
474
+ tgt = [[b' abc', b''],
475
+ [b'12345', b'MixedCase'],
476
+ [b'123 \t 345', b'UPPER']]
477
+ assert_array_equal(self.A.rstrip(), tgt)
478
+
479
+ tgt = [[b' abc ', b''],
480
+ [b'1234', b'MixedCase'],
481
+ [b'123 \t 345 \x00', b'UPP']
482
+ ]
483
+ assert_array_equal(self.A.rstrip([b'5', b'ER']), tgt)
484
+
485
+ tgt = [[' \u03a3', ''],
486
+ ['12345', 'MixedCase'],
487
+ ['123 \t 345', 'UPPER']]
488
+ assert_(issubclass(self.B.rstrip().dtype.type, np.str_))
489
+ assert_array_equal(self.B.rstrip(), tgt)
490
+
491
+ def test_strip(self):
492
+ tgt = [[b'abc', b''],
493
+ [b'12345', b'MixedCase'],
494
+ [b'123 \t 345', b'UPPER']]
495
+ assert_(issubclass(self.A.strip().dtype.type, np.bytes_))
496
+ assert_array_equal(self.A.strip(), tgt)
497
+
498
+ tgt = [[b' abc ', b''],
499
+ [b'234', b'ixedCas'],
500
+ [b'23 \t 345 \x00', b'UPP']]
501
+ assert_array_equal(self.A.strip([b'15', b'EReM']), tgt)
502
+
503
+ tgt = [['\u03a3', ''],
504
+ ['12345', 'MixedCase'],
505
+ ['123 \t 345', 'UPPER']]
506
+ assert_(issubclass(self.B.strip().dtype.type, np.str_))
507
+ assert_array_equal(self.B.strip(), tgt)
508
+
509
+ def test_split(self):
510
+ A = self.A.split(b'3')
511
+ tgt = [
512
+ [[b' abc '], [b'']],
513
+ [[b'12', b'45'], [b'MixedCase']],
514
+ [[b'12', b' \t ', b'45 \x00 '], [b'UPPER']]]
515
+ assert_(issubclass(A.dtype.type, np.object_))
516
+ assert_equal(A.tolist(), tgt)
517
+
518
+ def test_splitlines(self):
519
+ A = np.char.array(['abc\nfds\nwer']).splitlines()
520
+ assert_(issubclass(A.dtype.type, np.object_))
521
+ assert_(A.shape == (1,))
522
+ assert_(len(A[0]) == 3)
523
+
524
+ def test_swapcase(self):
525
+ tgt = [[b' ABC ', b''],
526
+ [b'12345', b'mIXEDcASE'],
527
+ [b'123 \t 345 \0 ', b'upper']]
528
+ assert_(issubclass(self.A.swapcase().dtype.type, np.bytes_))
529
+ assert_array_equal(self.A.swapcase(), tgt)
530
+
531
+ tgt = [[' \u03c3 ', ''],
532
+ ['12345', 'mIXEDcASE'],
533
+ ['123 \t 345 \0 ', 'upper']]
534
+ assert_(issubclass(self.B.swapcase().dtype.type, np.str_))
535
+ assert_array_equal(self.B.swapcase(), tgt)
536
+
537
+ def test_title(self):
538
+ tgt = [[b' Abc ', b''],
539
+ [b'12345', b'Mixedcase'],
540
+ [b'123 \t 345 \0 ', b'Upper']]
541
+ assert_(issubclass(self.A.title().dtype.type, np.bytes_))
542
+ assert_array_equal(self.A.title(), tgt)
543
+
544
+ tgt = [[' \u03a3 ', ''],
545
+ ['12345', 'Mixedcase'],
546
+ ['123 \t 345 \0 ', 'Upper']]
547
+ assert_(issubclass(self.B.title().dtype.type, np.str_))
548
+ assert_array_equal(self.B.title(), tgt)
549
+
550
+ def test_upper(self):
551
+ tgt = [[b' ABC ', b''],
552
+ [b'12345', b'MIXEDCASE'],
553
+ [b'123 \t 345 \0 ', b'UPPER']]
554
+ assert_(issubclass(self.A.upper().dtype.type, np.bytes_))
555
+ assert_array_equal(self.A.upper(), tgt)
556
+
557
+ tgt = [[' \u03a3 ', ''],
558
+ ['12345', 'MIXEDCASE'],
559
+ ['123 \t 345 \0 ', 'UPPER']]
560
+ assert_(issubclass(self.B.upper().dtype.type, np.str_))
561
+ assert_array_equal(self.B.upper(), tgt)
562
+
563
+ def test_isnumeric(self):
564
+
565
+ def fail():
566
+ self.A.isnumeric()
567
+
568
+ assert_raises(TypeError, fail)
569
+ assert_(issubclass(self.B.isnumeric().dtype.type, np.bool_))
570
+ assert_array_equal(self.B.isnumeric(), [
571
+ [False, False], [True, False], [False, False]])
572
+
573
+ def test_isdecimal(self):
574
+
575
+ def fail():
576
+ self.A.isdecimal()
577
+
578
+ assert_raises(TypeError, fail)
579
+ assert_(issubclass(self.B.isdecimal().dtype.type, np.bool_))
580
+ assert_array_equal(self.B.isdecimal(), [
581
+ [False, False], [True, False], [False, False]])
582
+
583
+
584
+ class TestOperations:
585
+ def setup_method(self):
586
+ self.A = np.array([['abc', '123'],
587
+ ['789', 'xyz']]).view(np.chararray)
588
+ self.B = np.array([['efg', '456'],
589
+ ['051', 'tuv']]).view(np.chararray)
590
+
591
+ def test_add(self):
592
+ AB = np.array([['abcefg', '123456'],
593
+ ['789051', 'xyztuv']]).view(np.chararray)
594
+ assert_array_equal(AB, (self.A + self.B))
595
+ assert_(len((self.A + self.B)[0][0]) == 6)
596
+
597
+ def test_radd(self):
598
+ QA = np.array([['qabc', 'q123'],
599
+ ['q789', 'qxyz']]).view(np.chararray)
600
+ assert_array_equal(QA, ('q' + self.A))
601
+
602
+ def test_mul(self):
603
+ A = self.A
604
+ for r in (2, 3, 5, 7, 197):
605
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
606
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
607
+
608
+ assert_array_equal(Ar, (self.A * r))
609
+
610
+ for ob in [object(), 'qrs']:
611
+ with assert_raises_regex(ValueError,
612
+ 'Can only multiply by integers'):
613
+ A*ob
614
+
615
+ def test_rmul(self):
616
+ A = self.A
617
+ for r in (2, 3, 5, 7, 197):
618
+ Ar = np.array([[A[0, 0]*r, A[0, 1]*r],
619
+ [A[1, 0]*r, A[1, 1]*r]]).view(np.chararray)
620
+ assert_array_equal(Ar, (r * self.A))
621
+
622
+ for ob in [object(), 'qrs']:
623
+ with assert_raises_regex(ValueError,
624
+ 'Can only multiply by integers'):
625
+ ob * A
626
+
627
+ def test_mod(self):
628
+ """Ticket #856"""
629
+ F = np.array([['%d', '%f'], ['%s', '%r']]).view(np.chararray)
630
+ C = np.array([[3, 7], [19, 1]])
631
+ FC = np.array([['3', '7.000000'],
632
+ ['19', '1']]).view(np.chararray)
633
+ assert_array_equal(FC, F % C)
634
+
635
+ A = np.array([['%.3f', '%d'], ['%s', '%r']]).view(np.chararray)
636
+ A1 = np.array([['1.000', '1'], ['1', '1']]).view(np.chararray)
637
+ assert_array_equal(A1, (A % 1))
638
+
639
+ A2 = np.array([['1.000', '2'], ['3', '4']]).view(np.chararray)
640
+ assert_array_equal(A2, (A % [[1, 2], [3, 4]]))
641
+
642
+ def test_rmod(self):
643
+ assert_(("%s" % self.A) == str(self.A))
644
+ assert_(("%r" % self.A) == repr(self.A))
645
+
646
+ for ob in [42, object()]:
647
+ with assert_raises_regex(
648
+ TypeError, "unsupported operand type.* and 'chararray'"):
649
+ ob % self.A
650
+
651
+ def test_slice(self):
652
+ """Regression test for https://github.com/numpy/numpy/issues/5982"""
653
+
654
+ arr = np.array([['abc ', 'def '], ['geh ', 'ijk ']],
655
+ dtype='S4').view(np.chararray)
656
+ sl1 = arr[:]
657
+ assert_array_equal(sl1, arr)
658
+ assert_(sl1.base is arr)
659
+ assert_(sl1.base.base is arr.base)
660
+
661
+ sl2 = arr[:, :]
662
+ assert_array_equal(sl2, arr)
663
+ assert_(sl2.base is arr)
664
+ assert_(sl2.base.base is arr.base)
665
+
666
+ assert_(arr[0, 0] == b'abc')
667
+
668
+
669
+ def test_empty_indexing():
670
+ """Regression test for ticket 1948."""
671
+ # Check that indexing a chararray with an empty list/array returns an
672
+ # empty chararray instead of a chararray with a single empty string in it.
673
+ s = np.chararray((4,))
674
+ assert_(s[[]].size == 0)
675
+
676
+
677
+ @pytest.mark.parametrize(["dt1", "dt2"],
678
+ [("S", "U"), ("U", "S"), ("S", "O"), ("U", "O"),
679
+ ("S", "d"), ("S", "V")])
680
+ def test_add_types(dt1, dt2):
681
+ arr1 = np.array([1234234], dtype=dt1)
682
+ # If the following fails, e.g. use a number and test "V" explicitly
683
+ arr2 = np.array([b"423"], dtype=dt2)
684
+ with pytest.raises(TypeError,
685
+ match=f".*same dtype kind.*{arr1.dtype}.*{arr2.dtype}"):
686
+ np.char.add(arr1, arr2)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_deprecations.py ADDED
@@ -0,0 +1,817 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests related to deprecation warnings. Also a convenient place
3
+ to document how deprecations should eventually be turned into errors.
4
+
5
+ """
6
+ import datetime
7
+ import operator
8
+ import warnings
9
+ import pytest
10
+ import tempfile
11
+ import re
12
+ import sys
13
+
14
+ import numpy as np
15
+ from numpy.testing import (
16
+ assert_raises, assert_warns, assert_, assert_array_equal, SkipTest,
17
+ KnownFailureException, break_cycles,
18
+ )
19
+
20
+ from numpy.core._multiarray_tests import fromstring_null_term_c_api
21
+
22
+ try:
23
+ import pytz
24
+ _has_pytz = True
25
+ except ImportError:
26
+ _has_pytz = False
27
+
28
+
29
+ class _DeprecationTestCase:
30
+ # Just as warning: warnings uses re.match, so the start of this message
31
+ # must match.
32
+ message = ''
33
+ warning_cls = DeprecationWarning
34
+
35
+ def setup_method(self):
36
+ self.warn_ctx = warnings.catch_warnings(record=True)
37
+ self.log = self.warn_ctx.__enter__()
38
+
39
+ # Do *not* ignore other DeprecationWarnings. Ignoring warnings
40
+ # can give very confusing results because of
41
+ # https://bugs.python.org/issue4180 and it is probably simplest to
42
+ # try to keep the tests cleanly giving only the right warning type.
43
+ # (While checking them set to "error" those are ignored anyway)
44
+ # We still have them show up, because otherwise they would be raised
45
+ warnings.filterwarnings("always", category=self.warning_cls)
46
+ warnings.filterwarnings("always", message=self.message,
47
+ category=self.warning_cls)
48
+
49
+ def teardown_method(self):
50
+ self.warn_ctx.__exit__()
51
+
52
+ def assert_deprecated(self, function, num=1, ignore_others=False,
53
+ function_fails=False,
54
+ exceptions=np._NoValue,
55
+ args=(), kwargs={}):
56
+ """Test if DeprecationWarnings are given and raised.
57
+
58
+ This first checks if the function when called gives `num`
59
+ DeprecationWarnings, after that it tries to raise these
60
+ DeprecationWarnings and compares them with `exceptions`.
61
+ The exceptions can be different for cases where this code path
62
+ is simply not anticipated and the exception is replaced.
63
+
64
+ Parameters
65
+ ----------
66
+ function : callable
67
+ The function to test
68
+ num : int
69
+ Number of DeprecationWarnings to expect. This should normally be 1.
70
+ ignore_others : bool
71
+ Whether warnings of the wrong type should be ignored (note that
72
+ the message is not checked)
73
+ function_fails : bool
74
+ If the function would normally fail, setting this will check for
75
+ warnings inside a try/except block.
76
+ exceptions : Exception or tuple of Exceptions
77
+ Exception to expect when turning the warnings into an error.
78
+ The default checks for DeprecationWarnings. If exceptions is
79
+ empty the function is expected to run successfully.
80
+ args : tuple
81
+ Arguments for `function`
82
+ kwargs : dict
83
+ Keyword arguments for `function`
84
+ """
85
+ __tracebackhide__ = True # Hide traceback for py.test
86
+
87
+ # reset the log
88
+ self.log[:] = []
89
+
90
+ if exceptions is np._NoValue:
91
+ exceptions = (self.warning_cls,)
92
+
93
+ try:
94
+ function(*args, **kwargs)
95
+ except (Exception if function_fails else tuple()):
96
+ pass
97
+
98
+ # just in case, clear the registry
99
+ num_found = 0
100
+ for warning in self.log:
101
+ if warning.category is self.warning_cls:
102
+ num_found += 1
103
+ elif not ignore_others:
104
+ raise AssertionError(
105
+ "expected %s but got: %s" %
106
+ (self.warning_cls.__name__, warning.category))
107
+ if num is not None and num_found != num:
108
+ msg = "%i warnings found but %i expected." % (len(self.log), num)
109
+ lst = [str(w) for w in self.log]
110
+ raise AssertionError("\n".join([msg] + lst))
111
+
112
+ with warnings.catch_warnings():
113
+ warnings.filterwarnings("error", message=self.message,
114
+ category=self.warning_cls)
115
+ try:
116
+ function(*args, **kwargs)
117
+ if exceptions != tuple():
118
+ raise AssertionError(
119
+ "No error raised during function call")
120
+ except exceptions:
121
+ if exceptions == tuple():
122
+ raise AssertionError(
123
+ "Error raised during function call")
124
+
125
+ def assert_not_deprecated(self, function, args=(), kwargs={}):
126
+ """Test that warnings are not raised.
127
+
128
+ This is just a shorthand for:
129
+
130
+ self.assert_deprecated(function, num=0, ignore_others=True,
131
+ exceptions=tuple(), args=args, kwargs=kwargs)
132
+ """
133
+ self.assert_deprecated(function, num=0, ignore_others=True,
134
+ exceptions=tuple(), args=args, kwargs=kwargs)
135
+
136
+
137
+ class _VisibleDeprecationTestCase(_DeprecationTestCase):
138
+ warning_cls = np.VisibleDeprecationWarning
139
+
140
+
141
+ class TestDatetime64Timezone(_DeprecationTestCase):
142
+ """Parsing of datetime64 with timezones deprecated in 1.11.0, because
143
+ datetime64 is now timezone naive rather than UTC only.
144
+
145
+ It will be quite a while before we can remove this, because, at the very
146
+ least, a lot of existing code uses the 'Z' modifier to avoid conversion
147
+ from local time to UTC, even if otherwise it handles time in a timezone
148
+ naive fashion.
149
+ """
150
+ def test_string(self):
151
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00+01',))
152
+ self.assert_deprecated(np.datetime64, args=('2000-01-01T00Z',))
153
+
154
+ @pytest.mark.skipif(not _has_pytz,
155
+ reason="The pytz module is not available.")
156
+ def test_datetime(self):
157
+ tz = pytz.timezone('US/Eastern')
158
+ dt = datetime.datetime(2000, 1, 1, 0, 0, tzinfo=tz)
159
+ self.assert_deprecated(np.datetime64, args=(dt,))
160
+
161
+
162
+ class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
163
+ """Assigning the 'data' attribute of an ndarray is unsafe as pointed
164
+ out in gh-7093. Eventually, such assignment should NOT be allowed, but
165
+ in the interests of maintaining backwards compatibility, only a Deprecation-
166
+ Warning will be raised instead for the time being to give developers time to
167
+ refactor relevant code.
168
+ """
169
+
170
+ def test_data_attr_assignment(self):
171
+ a = np.arange(10)
172
+ b = np.linspace(0, 1, 10)
173
+
174
+ self.message = ("Assigning the 'data' attribute is an "
175
+ "inherently unsafe operation and will "
176
+ "be removed in the future.")
177
+ self.assert_deprecated(a.__setattr__, args=('data', b.data))
178
+
179
+
180
+ class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
181
+ """
182
+ If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
183
+ represent the number in base 2 (positive) or 2's complement (negative) form,
184
+ the function used to silently ignore the parameter and return a representation
185
+ using the minimal number of bits needed for the form in question. Such behavior
186
+ is now considered unsafe from a user perspective and will raise an error in the future.
187
+ """
188
+
189
+ def test_insufficient_width_positive(self):
190
+ args = (10,)
191
+ kwargs = {'width': 2}
192
+
193
+ self.message = ("Insufficient bit width provided. This behavior "
194
+ "will raise an error in the future.")
195
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
196
+
197
+ def test_insufficient_width_negative(self):
198
+ args = (-5,)
199
+ kwargs = {'width': 2}
200
+
201
+ self.message = ("Insufficient bit width provided. This behavior "
202
+ "will raise an error in the future.")
203
+ self.assert_deprecated(np.binary_repr, args=args, kwargs=kwargs)
204
+
205
+
206
+ class TestDTypeAttributeIsDTypeDeprecation(_DeprecationTestCase):
207
+ # Deprecated 2021-01-05, NumPy 1.21
208
+ message = r".*`.dtype` attribute"
209
+
210
+ def test_deprecation_dtype_attribute_is_dtype(self):
211
+ class dt:
212
+ dtype = "f8"
213
+
214
+ class vdt(np.void):
215
+ dtype = "f,f"
216
+
217
+ self.assert_deprecated(lambda: np.dtype(dt))
218
+ self.assert_deprecated(lambda: np.dtype(dt()))
219
+ self.assert_deprecated(lambda: np.dtype(vdt))
220
+ self.assert_deprecated(lambda: np.dtype(vdt(1)))
221
+
222
+
223
+ class TestTestDeprecated:
224
+ def test_assert_deprecated(self):
225
+ test_case_instance = _DeprecationTestCase()
226
+ test_case_instance.setup_method()
227
+ assert_raises(AssertionError,
228
+ test_case_instance.assert_deprecated,
229
+ lambda: None)
230
+
231
+ def foo():
232
+ warnings.warn("foo", category=DeprecationWarning, stacklevel=2)
233
+
234
+ test_case_instance.assert_deprecated(foo)
235
+ test_case_instance.teardown_method()
236
+
237
+
238
+ class TestNonNumericConjugate(_DeprecationTestCase):
239
+ """
240
+ Deprecate no-op behavior of ndarray.conjugate on non-numeric dtypes,
241
+ which conflicts with the error behavior of np.conjugate.
242
+ """
243
+ def test_conjugate(self):
244
+ for a in np.array(5), np.array(5j):
245
+ self.assert_not_deprecated(a.conjugate)
246
+ for a in (np.array('s'), np.array('2016', 'M'),
247
+ np.array((1, 2), [('a', int), ('b', int)])):
248
+ self.assert_deprecated(a.conjugate)
249
+
250
+
251
+ class TestNPY_CHAR(_DeprecationTestCase):
252
+ # 2017-05-03, 1.13.0
253
+ def test_npy_char_deprecation(self):
254
+ from numpy.core._multiarray_tests import npy_char_deprecation
255
+ self.assert_deprecated(npy_char_deprecation)
256
+ assert_(npy_char_deprecation() == 'S1')
257
+
258
+
259
+ class TestPyArray_AS1D(_DeprecationTestCase):
260
+ def test_npy_pyarrayas1d_deprecation(self):
261
+ from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
262
+ assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
263
+
264
+
265
+ class TestPyArray_AS2D(_DeprecationTestCase):
266
+ def test_npy_pyarrayas2d_deprecation(self):
267
+ from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
268
+ assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
269
+
270
+
271
+ class TestDatetimeEvent(_DeprecationTestCase):
272
+ # 2017-08-11, 1.14.0
273
+ def test_3_tuple(self):
274
+ for cls in (np.datetime64, np.timedelta64):
275
+ # two valid uses - (unit, num) and (unit, num, den, None)
276
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2)))
277
+ self.assert_not_deprecated(cls, args=(1, ('ms', 2, 1, None)))
278
+
279
+ # trying to use the event argument, removed in 1.7.0, is deprecated
280
+ # it used to be a uint8
281
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 'event')))
282
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 63)))
283
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 'event')))
284
+ self.assert_deprecated(cls, args=(1, ('ms', 2, 1, 63)))
285
+
286
+
287
+ class TestTruthTestingEmptyArrays(_DeprecationTestCase):
288
+ # 2017-09-25, 1.14.0
289
+ message = '.*truth value of an empty array is ambiguous.*'
290
+
291
+ def test_1d(self):
292
+ self.assert_deprecated(bool, args=(np.array([]),))
293
+
294
+ def test_2d(self):
295
+ self.assert_deprecated(bool, args=(np.zeros((1, 0)),))
296
+ self.assert_deprecated(bool, args=(np.zeros((0, 1)),))
297
+ self.assert_deprecated(bool, args=(np.zeros((0, 0)),))
298
+
299
+
300
+ class TestBincount(_DeprecationTestCase):
301
+ # 2017-06-01, 1.14.0
302
+ def test_bincount_minlength(self):
303
+ self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
304
+
305
+
306
+
307
+ class TestGeneratorSum(_DeprecationTestCase):
308
+ # 2018-02-25, 1.15.0
309
+ def test_generator_sum(self):
310
+ self.assert_deprecated(np.sum, args=((i for i in range(5)),))
311
+
312
+
313
+ class TestFromstring(_DeprecationTestCase):
314
+ # 2017-10-19, 1.14
315
+ def test_fromstring(self):
316
+ self.assert_deprecated(np.fromstring, args=('\x00'*80,))
317
+
318
+
319
+ class TestFromStringAndFileInvalidData(_DeprecationTestCase):
320
+ # 2019-06-08, 1.17.0
321
+ # Tests should be moved to real tests when deprecation is done.
322
+ message = "string or file could not be read to its end"
323
+
324
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
325
+ def test_deprecate_unparsable_data_file(self, invalid_str):
326
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
327
+
328
+ with tempfile.TemporaryFile(mode="w") as f:
329
+ x.tofile(f, sep=',', format='%.2f')
330
+ f.write(invalid_str)
331
+
332
+ f.seek(0)
333
+ self.assert_deprecated(lambda: np.fromfile(f, sep=","))
334
+ f.seek(0)
335
+ self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
336
+ # Should not raise:
337
+ with warnings.catch_warnings():
338
+ warnings.simplefilter("error", DeprecationWarning)
339
+ f.seek(0)
340
+ res = np.fromfile(f, sep=",", count=4)
341
+ assert_array_equal(res, x)
342
+
343
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
344
+ def test_deprecate_unparsable_string(self, invalid_str):
345
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
346
+ x_str = "1.51,2,3.51,4{}".format(invalid_str)
347
+
348
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
349
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
350
+
351
+ # The C-level API can use not fixed size, but 0 terminated strings,
352
+ # so test that as well:
353
+ bytestr = x_str.encode("ascii")
354
+ self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
355
+
356
+ with assert_warns(DeprecationWarning):
357
+ # this is slightly strange, in that fromstring leaves data
358
+ # potentially uninitialized (would be good to error when all is
359
+ # read, but count is larger then actual data maybe).
360
+ res = np.fromstring(x_str, sep=",", count=5)
361
+ assert_array_equal(res[:-1], x)
362
+
363
+ with warnings.catch_warnings():
364
+ warnings.simplefilter("error", DeprecationWarning)
365
+
366
+ # Should not raise:
367
+ res = np.fromstring(x_str, sep=",", count=4)
368
+ assert_array_equal(res, x)
369
+
370
+
371
+ class Test_GetSet_NumericOps(_DeprecationTestCase):
372
+ # 2018-09-20, 1.16.0
373
+ def test_get_numeric_ops(self):
374
+ from numpy.core._multiarray_tests import getset_numericops
375
+ self.assert_deprecated(getset_numericops, num=2)
376
+
377
+ # empty kwargs prevents any state actually changing which would break
378
+ # other tests.
379
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
380
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
381
+
382
+
383
+ class TestShape1Fields(_DeprecationTestCase):
384
+ warning_cls = FutureWarning
385
+
386
+ # 2019-05-20, 1.17.0
387
+ def test_shape_1_fields(self):
388
+ self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
389
+
390
+
391
+ class TestNonZero(_DeprecationTestCase):
392
+ # 2019-05-26, 1.17.0
393
+ def test_zerod(self):
394
+ self.assert_deprecated(lambda: np.nonzero(np.array(0)))
395
+ self.assert_deprecated(lambda: np.nonzero(np.array(1)))
396
+
397
+
398
+ class TestToString(_DeprecationTestCase):
399
+ # 2020-03-06 1.19.0
400
+ message = re.escape("tostring() is deprecated. Use tobytes() instead.")
401
+
402
+ def test_tostring(self):
403
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
404
+ self.assert_deprecated(arr.tostring)
405
+
406
+ def test_tostring_matches_tobytes(self):
407
+ arr = np.array(list(b"test\xFF"), dtype=np.uint8)
408
+ b = arr.tobytes()
409
+ with assert_warns(DeprecationWarning):
410
+ s = arr.tostring()
411
+ assert s == b
412
+
413
+
414
+ class TestDTypeCoercion(_DeprecationTestCase):
415
+ # 2020-02-06 1.19.0
416
+ message = "Converting .* to a dtype .*is deprecated"
417
+ deprecated_types = [
418
+ # The builtin scalar super types:
419
+ np.generic, np.flexible, np.number,
420
+ np.inexact, np.floating, np.complexfloating,
421
+ np.integer, np.unsignedinteger, np.signedinteger,
422
+ # character is a deprecated S1 special case:
423
+ np.character,
424
+ ]
425
+
426
+ def test_dtype_coercion(self):
427
+ for scalar_type in self.deprecated_types:
428
+ self.assert_deprecated(np.dtype, args=(scalar_type,))
429
+
430
+ def test_array_construction(self):
431
+ for scalar_type in self.deprecated_types:
432
+ self.assert_deprecated(np.array, args=([], scalar_type,))
433
+
434
+ def test_not_deprecated(self):
435
+ # All specific types are not deprecated:
436
+ for group in np.sctypes.values():
437
+ for scalar_type in group:
438
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
439
+
440
+ for scalar_type in [type, dict, list, tuple]:
441
+ # Typical python types are coerced to object currently:
442
+ self.assert_not_deprecated(np.dtype, args=(scalar_type,))
443
+
444
+
445
+ class BuiltInRoundComplexDType(_DeprecationTestCase):
446
+ # 2020-03-31 1.19.0
447
+ deprecated_types = [np.csingle, np.cdouble, np.clongdouble]
448
+ not_deprecated_types = [
449
+ np.int8, np.int16, np.int32, np.int64,
450
+ np.uint8, np.uint16, np.uint32, np.uint64,
451
+ np.float16, np.float32, np.float64,
452
+ ]
453
+
454
+ def test_deprecated(self):
455
+ for scalar_type in self.deprecated_types:
456
+ scalar = scalar_type(0)
457
+ self.assert_deprecated(round, args=(scalar,))
458
+ self.assert_deprecated(round, args=(scalar, 0))
459
+ self.assert_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
460
+
461
+ def test_not_deprecated(self):
462
+ for scalar_type in self.not_deprecated_types:
463
+ scalar = scalar_type(0)
464
+ self.assert_not_deprecated(round, args=(scalar,))
465
+ self.assert_not_deprecated(round, args=(scalar, 0))
466
+ self.assert_not_deprecated(round, args=(scalar,), kwargs={'ndigits': 0})
467
+
468
+
469
+ class TestIncorrectAdvancedIndexWithEmptyResult(_DeprecationTestCase):
470
+ # 2020-05-27, NumPy 1.20.0
471
+ message = "Out of bound index found. This was previously ignored.*"
472
+
473
+ @pytest.mark.parametrize("index", [([3, 0],), ([0, 0], [3, 0])])
474
+ def test_empty_subspace(self, index):
475
+ # Test for both a single and two/multiple advanced indices. These
476
+ # This will raise an IndexError in the future.
477
+ arr = np.ones((2, 2, 0))
478
+ self.assert_deprecated(arr.__getitem__, args=(index,))
479
+ self.assert_deprecated(arr.__setitem__, args=(index, 0.))
480
+
481
+ # for this array, the subspace is only empty after applying the slice
482
+ arr2 = np.ones((2, 2, 1))
483
+ index2 = (slice(0, 0),) + index
484
+ self.assert_deprecated(arr2.__getitem__, args=(index2,))
485
+ self.assert_deprecated(arr2.__setitem__, args=(index2, 0.))
486
+
487
+ def test_empty_index_broadcast_not_deprecated(self):
488
+ arr = np.ones((2, 2, 2))
489
+
490
+ index = ([[3], [2]], []) # broadcast to an empty result.
491
+ self.assert_not_deprecated(arr.__getitem__, args=(index,))
492
+ self.assert_not_deprecated(arr.__setitem__,
493
+ args=(index, np.empty((2, 0, 2))))
494
+
495
+
496
+ class TestNonExactMatchDeprecation(_DeprecationTestCase):
497
+ # 2020-04-22
498
+ def test_non_exact_match(self):
499
+ arr = np.array([[3, 6, 6], [4, 5, 1]])
500
+ # misspelt mode check
501
+ self.assert_deprecated(lambda: np.ravel_multi_index(arr, (7, 6), mode='Cilp'))
502
+ # using completely different word with first character as R
503
+ self.assert_deprecated(lambda: np.searchsorted(arr[0], 4, side='Random'))
504
+
505
+
506
+ class TestMatrixInOuter(_DeprecationTestCase):
507
+ # 2020-05-13 NumPy 1.20.0
508
+ message = (r"add.outer\(\) was passed a numpy matrix as "
509
+ r"(first|second) argument.")
510
+
511
+ def test_deprecated(self):
512
+ arr = np.array([1, 2, 3])
513
+ m = np.array([1, 2, 3]).view(np.matrix)
514
+ self.assert_deprecated(np.add.outer, args=(m, m), num=2)
515
+ self.assert_deprecated(np.add.outer, args=(arr, m))
516
+ self.assert_deprecated(np.add.outer, args=(m, arr))
517
+ self.assert_not_deprecated(np.add.outer, args=(arr, arr))
518
+
519
+
520
+ class FlatteningConcatenateUnsafeCast(_DeprecationTestCase):
521
+ # NumPy 1.20, 2020-09-03
522
+ message = "concatenate with `axis=None` will use same-kind casting"
523
+
524
+ def test_deprecated(self):
525
+ self.assert_deprecated(np.concatenate,
526
+ args=(([0.], [1.]),),
527
+ kwargs=dict(axis=None, out=np.empty(2, dtype=np.int64)))
528
+
529
+ def test_not_deprecated(self):
530
+ self.assert_not_deprecated(np.concatenate,
531
+ args=(([0.], [1.]),),
532
+ kwargs={'axis': None, 'out': np.empty(2, dtype=np.int64),
533
+ 'casting': "unsafe"})
534
+
535
+ with assert_raises(TypeError):
536
+ # Tests should notice if the deprecation warning is given first...
537
+ np.concatenate(([0.], [1.]), out=np.empty(2, dtype=np.int64),
538
+ casting="same_kind")
539
+
540
+
541
+ class TestDeprecatedUnpickleObjectScalar(_DeprecationTestCase):
542
+ # Deprecated 2020-11-24, NumPy 1.20
543
+ """
544
+ Technically, it should be impossible to create numpy object scalars,
545
+ but there was an unpickle path that would in theory allow it. That
546
+ path is invalid and must lead to the warning.
547
+ """
548
+ message = "Unpickling a scalar with object dtype is deprecated."
549
+
550
+ def test_deprecated(self):
551
+ ctor = np.core.multiarray.scalar
552
+ self.assert_deprecated(lambda: ctor(np.dtype("O"), 1))
553
+
554
+
555
+ class TestSingleElementSignature(_DeprecationTestCase):
556
+ # Deprecated 2021-04-01, NumPy 1.21
557
+ message = r"The use of a length 1"
558
+
559
+ def test_deprecated(self):
560
+ self.assert_deprecated(lambda: np.add(1, 2, signature="d"))
561
+ self.assert_deprecated(lambda: np.add(1, 2, sig=(np.dtype("l"),)))
562
+
563
+
564
+ class TestCtypesGetter(_DeprecationTestCase):
565
+ # Deprecated 2021-05-18, Numpy 1.21.0
566
+ warning_cls = DeprecationWarning
567
+ ctypes = np.array([1]).ctypes
568
+
569
+ @pytest.mark.parametrize(
570
+ "name", ["get_data", "get_shape", "get_strides", "get_as_parameter"]
571
+ )
572
+ def test_deprecated(self, name: str) -> None:
573
+ func = getattr(self.ctypes, name)
574
+ self.assert_deprecated(lambda: func())
575
+
576
+ @pytest.mark.parametrize(
577
+ "name", ["data", "shape", "strides", "_as_parameter_"]
578
+ )
579
+ def test_not_deprecated(self, name: str) -> None:
580
+ self.assert_not_deprecated(lambda: getattr(self.ctypes, name))
581
+
582
+
583
+ PARTITION_DICT = {
584
+ "partition method": np.arange(10).partition,
585
+ "argpartition method": np.arange(10).argpartition,
586
+ "partition function": lambda kth: np.partition(np.arange(10), kth),
587
+ "argpartition function": lambda kth: np.argpartition(np.arange(10), kth),
588
+ }
589
+
590
+
591
+ @pytest.mark.parametrize("func", PARTITION_DICT.values(), ids=PARTITION_DICT)
592
+ class TestPartitionBoolIndex(_DeprecationTestCase):
593
+ # Deprecated 2021-09-29, NumPy 1.22
594
+ warning_cls = DeprecationWarning
595
+ message = "Passing booleans as partition index is deprecated"
596
+
597
+ def test_deprecated(self, func):
598
+ self.assert_deprecated(lambda: func(True))
599
+ self.assert_deprecated(lambda: func([False, True]))
600
+
601
+ def test_not_deprecated(self, func):
602
+ self.assert_not_deprecated(lambda: func(1))
603
+ self.assert_not_deprecated(lambda: func([0, 1]))
604
+
605
+
606
+ class TestMachAr(_DeprecationTestCase):
607
+ # Deprecated 2022-11-22, NumPy 1.25
608
+ warning_cls = DeprecationWarning
609
+
610
+ def test_deprecated_module(self):
611
+ self.assert_deprecated(lambda: getattr(np.core, "MachAr"))
612
+
613
+
614
+ class TestQuantileInterpolationDeprecation(_DeprecationTestCase):
615
+ # Deprecated 2021-11-08, NumPy 1.22
616
+ @pytest.mark.parametrize("func",
617
+ [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
618
+ def test_deprecated(self, func):
619
+ self.assert_deprecated(
620
+ lambda: func([0., 1.], 0., interpolation="linear"))
621
+ self.assert_deprecated(
622
+ lambda: func([0., 1.], 0., interpolation="nearest"))
623
+
624
+ @pytest.mark.parametrize("func",
625
+ [np.percentile, np.quantile, np.nanpercentile, np.nanquantile])
626
+ def test_both_passed(self, func):
627
+ with warnings.catch_warnings():
628
+ # catch the DeprecationWarning so that it does not raise:
629
+ warnings.simplefilter("always", DeprecationWarning)
630
+ with pytest.raises(TypeError):
631
+ func([0., 1.], 0., interpolation="nearest", method="nearest")
632
+
633
+
634
+ class TestMemEventHook(_DeprecationTestCase):
635
+ # Deprecated 2021-11-18, NumPy 1.23
636
+ def test_mem_seteventhook(self):
637
+ # The actual tests are within the C code in
638
+ # multiarray/_multiarray_tests.c.src
639
+ import numpy.core._multiarray_tests as ma_tests
640
+ with pytest.warns(DeprecationWarning,
641
+ match='PyDataMem_SetEventHook is deprecated'):
642
+ ma_tests.test_pydatamem_seteventhook_start()
643
+ # force an allocation and free of a numpy array
644
+ # needs to be larger then limit of small memory cacher in ctors.c
645
+ a = np.zeros(1000)
646
+ del a
647
+ break_cycles()
648
+ with pytest.warns(DeprecationWarning,
649
+ match='PyDataMem_SetEventHook is deprecated'):
650
+ ma_tests.test_pydatamem_seteventhook_end()
651
+
652
+
653
+ class TestArrayFinalizeNone(_DeprecationTestCase):
654
+ message = "Setting __array_finalize__ = None"
655
+
656
+ def test_use_none_is_deprecated(self):
657
+ # Deprecated way that ndarray itself showed nothing needs finalizing.
658
+ class NoFinalize(np.ndarray):
659
+ __array_finalize__ = None
660
+
661
+ self.assert_deprecated(lambda: np.array(1).view(NoFinalize))
662
+
663
+ class TestAxisNotMAXDIMS(_DeprecationTestCase):
664
+ # Deprecated 2022-01-08, NumPy 1.23
665
+ message = r"Using `axis=32` \(MAXDIMS\) is deprecated"
666
+
667
+ def test_deprecated(self):
668
+ a = np.zeros((1,)*32)
669
+ self.assert_deprecated(lambda: np.repeat(a, 1, axis=np.MAXDIMS))
670
+
671
+
672
+ class TestLoadtxtParseIntsViaFloat(_DeprecationTestCase):
673
+ # Deprecated 2022-07-03, NumPy 1.23
674
+ # This test can be removed without replacement after the deprecation.
675
+ # The tests:
676
+ # * numpy/lib/tests/test_loadtxt.py::test_integer_signs
677
+ # * lib/tests/test_loadtxt.py::test_implicit_cast_float_to_int_fails
678
+ # Have a warning filter that needs to be removed.
679
+ message = r"loadtxt\(\): Parsing an integer via a float is deprecated.*"
680
+
681
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
682
+ def test_deprecated_warning(self, dtype):
683
+ with pytest.warns(DeprecationWarning, match=self.message):
684
+ np.loadtxt(["10.5"], dtype=dtype)
685
+
686
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
687
+ def test_deprecated_raised(self, dtype):
688
+ # The DeprecationWarning is chained when raised, so test manually:
689
+ with warnings.catch_warnings():
690
+ warnings.simplefilter("error", DeprecationWarning)
691
+ try:
692
+ np.loadtxt(["10.5"], dtype=dtype)
693
+ except ValueError as e:
694
+ assert isinstance(e.__cause__, DeprecationWarning)
695
+
696
+
697
+ class TestScalarConversion(_DeprecationTestCase):
698
+ # 2023-01-02, 1.25.0
699
+ def test_float_conversion(self):
700
+ self.assert_deprecated(float, args=(np.array([3.14]),))
701
+
702
+ def test_behaviour(self):
703
+ b = np.array([[3.14]])
704
+ c = np.zeros(5)
705
+ with pytest.warns(DeprecationWarning):
706
+ c[0] = b
707
+
708
+
709
+ class TestPyIntConversion(_DeprecationTestCase):
710
+ message = r".*stop allowing conversion of out-of-bound.*"
711
+
712
+ @pytest.mark.parametrize("dtype", np.typecodes["AllInteger"])
713
+ def test_deprecated_scalar(self, dtype):
714
+ dtype = np.dtype(dtype)
715
+ info = np.iinfo(dtype)
716
+
717
+ # Cover the most common creation paths (all end up in the
718
+ # same place):
719
+ def scalar(value, dtype):
720
+ dtype.type(value)
721
+
722
+ def assign(value, dtype):
723
+ arr = np.array([0, 0, 0], dtype=dtype)
724
+ arr[2] = value
725
+
726
+ def create(value, dtype):
727
+ np.array([value], dtype=dtype)
728
+
729
+ for creation_func in [scalar, assign, create]:
730
+ try:
731
+ self.assert_deprecated(
732
+ lambda: creation_func(info.min - 1, dtype))
733
+ except OverflowError:
734
+ pass # OverflowErrors always happened also before and are OK.
735
+
736
+ try:
737
+ self.assert_deprecated(
738
+ lambda: creation_func(info.max + 1, dtype))
739
+ except OverflowError:
740
+ pass # OverflowErrors always happened also before and are OK.
741
+
742
+
743
+ class TestDeprecatedGlobals(_DeprecationTestCase):
744
+ # Deprecated 2022-11-17, NumPy 1.24
745
+ def test_type_aliases(self):
746
+ # from builtins
747
+ self.assert_deprecated(lambda: np.bool8)
748
+ self.assert_deprecated(lambda: np.int0)
749
+ self.assert_deprecated(lambda: np.uint0)
750
+ self.assert_deprecated(lambda: np.bytes0)
751
+ self.assert_deprecated(lambda: np.str0)
752
+ self.assert_deprecated(lambda: np.object0)
753
+
754
+
755
+ @pytest.mark.parametrize("name",
756
+ ["bool", "long", "ulong", "str", "bytes", "object"])
757
+ def test_future_scalar_attributes(name):
758
+ # FutureWarning added 2022-11-17, NumPy 1.24,
759
+ assert name not in dir(np) # we may want to not add them
760
+ with pytest.warns(FutureWarning,
761
+ match=f"In the future .*{name}"):
762
+ assert not hasattr(np, name)
763
+
764
+ # Unfortunately, they are currently still valid via `np.dtype()`
765
+ np.dtype(name)
766
+ name in np.sctypeDict
767
+
768
+
769
+ # Ignore the above future attribute warning for this test.
770
+ @pytest.mark.filterwarnings("ignore:In the future:FutureWarning")
771
+ class TestRemovedGlobals:
772
+ # Removed 2023-01-12, NumPy 1.24.0
773
+ # Not a deprecation, but the large error was added to aid those who missed
774
+ # the previous deprecation, and should be removed similarly to one
775
+ # (or faster).
776
+ @pytest.mark.parametrize("name",
777
+ ["object", "bool", "float", "complex", "str", "int"])
778
+ def test_attributeerror_includes_info(self, name):
779
+ msg = f".*\n`np.{name}` was a deprecated alias for the builtin"
780
+ with pytest.raises(AttributeError, match=msg):
781
+ getattr(np, name)
782
+
783
+
784
+ class TestDeprecatedFinfo(_DeprecationTestCase):
785
+ # Deprecated in NumPy 1.25, 2023-01-16
786
+ def test_deprecated_none(self):
787
+ self.assert_deprecated(np.finfo, args=(None,))
788
+
789
+ class TestFromnumeric(_DeprecationTestCase):
790
+ # 2023-02-28, 1.25.0
791
+ def test_round_(self):
792
+ self.assert_deprecated(lambda: np.round_(np.array([1.5, 2.5, 3.5])))
793
+
794
+ # 2023-03-02, 1.25.0
795
+ def test_cumproduct(self):
796
+ self.assert_deprecated(lambda: np.cumproduct(np.array([1, 2, 3])))
797
+
798
+ # 2023-03-02, 1.25.0
799
+ def test_product(self):
800
+ self.assert_deprecated(lambda: np.product(np.array([1, 2, 3])))
801
+
802
+ # 2023-03-02, 1.25.0
803
+ def test_sometrue(self):
804
+ self.assert_deprecated(lambda: np.sometrue(np.array([True, False])))
805
+
806
+ # 2023-03-02, 1.25.0
807
+ def test_alltrue(self):
808
+ self.assert_deprecated(lambda: np.alltrue(np.array([True, False])))
809
+
810
+
811
+ class TestMathAlias(_DeprecationTestCase):
812
+ # Deprecated in Numpy 1.25, 2023-04-06
813
+ def test_deprecated_np_math(self):
814
+ self.assert_deprecated(lambda: np.math)
815
+
816
+ def test_deprecated_np_lib_math(self):
817
+ self.assert_deprecated(lambda: np.lib.math)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_dlpack.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import pytest
3
+
4
+ import numpy as np
5
+ from numpy.testing import assert_array_equal, IS_PYPY
6
+
7
+
8
+ class TestDLPack:
9
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
10
+ def test_dunder_dlpack_refcount(self):
11
+ x = np.arange(5)
12
+ y = x.__dlpack__()
13
+ assert sys.getrefcount(x) == 3
14
+ del y
15
+ assert sys.getrefcount(x) == 2
16
+
17
+ def test_dunder_dlpack_stream(self):
18
+ x = np.arange(5)
19
+ x.__dlpack__(stream=None)
20
+
21
+ with pytest.raises(RuntimeError):
22
+ x.__dlpack__(stream=1)
23
+
24
+ def test_strides_not_multiple_of_itemsize(self):
25
+ dt = np.dtype([('int', np.int32), ('char', np.int8)])
26
+ y = np.zeros((5,), dtype=dt)
27
+ z = y['int']
28
+
29
+ with pytest.raises(BufferError):
30
+ np.from_dlpack(z)
31
+
32
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy can't get refcounts.")
33
+ def test_from_dlpack_refcount(self):
34
+ x = np.arange(5)
35
+ y = np.from_dlpack(x)
36
+ assert sys.getrefcount(x) == 3
37
+ del y
38
+ assert sys.getrefcount(x) == 2
39
+
40
+ @pytest.mark.parametrize("dtype", [
41
+ np.bool_,
42
+ np.int8, np.int16, np.int32, np.int64,
43
+ np.uint8, np.uint16, np.uint32, np.uint64,
44
+ np.float16, np.float32, np.float64,
45
+ np.complex64, np.complex128
46
+ ])
47
+ def test_dtype_passthrough(self, dtype):
48
+ x = np.arange(5).astype(dtype)
49
+ y = np.from_dlpack(x)
50
+
51
+ assert y.dtype == x.dtype
52
+ assert_array_equal(x, y)
53
+
54
+ def test_invalid_dtype(self):
55
+ x = np.asarray(np.datetime64('2021-05-27'))
56
+
57
+ with pytest.raises(BufferError):
58
+ np.from_dlpack(x)
59
+
60
+ def test_invalid_byte_swapping(self):
61
+ dt = np.dtype('=i8').newbyteorder()
62
+ x = np.arange(5, dtype=dt)
63
+
64
+ with pytest.raises(BufferError):
65
+ np.from_dlpack(x)
66
+
67
+ def test_non_contiguous(self):
68
+ x = np.arange(25).reshape((5, 5))
69
+
70
+ y1 = x[0]
71
+ assert_array_equal(y1, np.from_dlpack(y1))
72
+
73
+ y2 = x[:, 0]
74
+ assert_array_equal(y2, np.from_dlpack(y2))
75
+
76
+ y3 = x[1, :]
77
+ assert_array_equal(y3, np.from_dlpack(y3))
78
+
79
+ y4 = x[1]
80
+ assert_array_equal(y4, np.from_dlpack(y4))
81
+
82
+ y5 = np.diagonal(x).copy()
83
+ assert_array_equal(y5, np.from_dlpack(y5))
84
+
85
+ @pytest.mark.parametrize("ndim", range(33))
86
+ def test_higher_dims(self, ndim):
87
+ shape = (1,) * ndim
88
+ x = np.zeros(shape, dtype=np.float64)
89
+
90
+ assert shape == np.from_dlpack(x).shape
91
+
92
+ def test_dlpack_device(self):
93
+ x = np.arange(5)
94
+ assert x.__dlpack_device__() == (1, 0)
95
+ y = np.from_dlpack(x)
96
+ assert y.__dlpack_device__() == (1, 0)
97
+ z = y[::2]
98
+ assert z.__dlpack_device__() == (1, 0)
99
+
100
+ def dlpack_deleter_exception(self):
101
+ x = np.arange(5)
102
+ _ = x.__dlpack__()
103
+ raise RuntimeError
104
+
105
+ def test_dlpack_destructor_exception(self):
106
+ with pytest.raises(RuntimeError):
107
+ self.dlpack_deleter_exception()
108
+
109
+ def test_readonly(self):
110
+ x = np.arange(5)
111
+ x.flags.writeable = False
112
+ with pytest.raises(BufferError):
113
+ x.__dlpack__()
114
+
115
+ def test_ndim0(self):
116
+ x = np.array(1.0)
117
+ y = np.from_dlpack(x)
118
+ assert_array_equal(x, y)
119
+
120
+ def test_size1dims_arrays(self):
121
+ x = np.ndarray(dtype='f8', shape=(10, 5, 1), strides=(8, 80, 4),
122
+ buffer=np.ones(1000, dtype=np.uint8), order='F')
123
+ y = np.from_dlpack(x)
124
+ assert_array_equal(x, y)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_dtype.py ADDED
@@ -0,0 +1,1906 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import operator
3
+ import pytest
4
+ import ctypes
5
+ import gc
6
+ import types
7
+ from typing import Any
8
+
9
+ import numpy as np
10
+ import numpy.dtypes
11
+ from numpy.core._rational_tests import rational
12
+ from numpy.core._multiarray_tests import create_custom_field_dtype
13
+ from numpy.testing import (
14
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT,
15
+ IS_PYSTON, _OLD_PROMOTION)
16
+ from numpy.compat import pickle
17
+ from itertools import permutations
18
+ import random
19
+
20
+ import hypothesis
21
+ from hypothesis.extra import numpy as hynp
22
+
23
+
24
+
25
+ def assert_dtype_equal(a, b):
26
+ assert_equal(a, b)
27
+ assert_equal(hash(a), hash(b),
28
+ "two equivalent types do not hash to the same value !")
29
+
30
+ def assert_dtype_not_equal(a, b):
31
+ assert_(a != b)
32
+ assert_(hash(a) != hash(b),
33
+ "two different types hash to the same value !")
34
+
35
+ class TestBuiltin:
36
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
37
+ np.compat.unicode])
38
+ def test_run(self, t):
39
+ """Only test hash runs at all."""
40
+ dt = np.dtype(t)
41
+ hash(dt)
42
+
43
+ @pytest.mark.parametrize('t', [int, float])
44
+ def test_dtype(self, t):
45
+ # Make sure equivalent byte order char hash the same (e.g. < and = on
46
+ # little endian)
47
+ dt = np.dtype(t)
48
+ dt2 = dt.newbyteorder("<")
49
+ dt3 = dt.newbyteorder(">")
50
+ if dt == dt2:
51
+ assert_(dt.byteorder != dt2.byteorder, "bogus test")
52
+ assert_dtype_equal(dt, dt2)
53
+ else:
54
+ assert_(dt.byteorder != dt3.byteorder, "bogus test")
55
+ assert_dtype_equal(dt, dt3)
56
+
57
+ def test_equivalent_dtype_hashing(self):
58
+ # Make sure equivalent dtypes with different type num hash equal
59
+ uintp = np.dtype(np.uintp)
60
+ if uintp.itemsize == 4:
61
+ left = uintp
62
+ right = np.dtype(np.uint32)
63
+ else:
64
+ left = uintp
65
+ right = np.dtype(np.ulonglong)
66
+ assert_(left == right)
67
+ assert_(hash(left) == hash(right))
68
+
69
+ def test_invalid_types(self):
70
+ # Make sure invalid type strings raise an error
71
+
72
+ assert_raises(TypeError, np.dtype, 'O3')
73
+ assert_raises(TypeError, np.dtype, 'O5')
74
+ assert_raises(TypeError, np.dtype, 'O7')
75
+ assert_raises(TypeError, np.dtype, 'b3')
76
+ assert_raises(TypeError, np.dtype, 'h4')
77
+ assert_raises(TypeError, np.dtype, 'I5')
78
+ assert_raises(TypeError, np.dtype, 'e3')
79
+ assert_raises(TypeError, np.dtype, 'f5')
80
+
81
+ if np.dtype('g').itemsize == 8 or np.dtype('g').itemsize == 16:
82
+ assert_raises(TypeError, np.dtype, 'g12')
83
+ elif np.dtype('g').itemsize == 12:
84
+ assert_raises(TypeError, np.dtype, 'g16')
85
+
86
+ if np.dtype('l').itemsize == 8:
87
+ assert_raises(TypeError, np.dtype, 'l4')
88
+ assert_raises(TypeError, np.dtype, 'L4')
89
+ else:
90
+ assert_raises(TypeError, np.dtype, 'l8')
91
+ assert_raises(TypeError, np.dtype, 'L8')
92
+
93
+ if np.dtype('q').itemsize == 8:
94
+ assert_raises(TypeError, np.dtype, 'q4')
95
+ assert_raises(TypeError, np.dtype, 'Q4')
96
+ else:
97
+ assert_raises(TypeError, np.dtype, 'q8')
98
+ assert_raises(TypeError, np.dtype, 'Q8')
99
+
100
+ def test_richcompare_invalid_dtype_equality(self):
101
+ # Make sure objects that cannot be converted to valid
102
+ # dtypes results in False/True when compared to valid dtypes.
103
+ # Here 7 cannot be converted to dtype. No exceptions should be raised
104
+
105
+ assert not np.dtype(np.int32) == 7, "dtype richcompare failed for =="
106
+ assert np.dtype(np.int32) != 7, "dtype richcompare failed for !="
107
+
108
+ @pytest.mark.parametrize(
109
+ 'operation',
110
+ [operator.le, operator.lt, operator.ge, operator.gt])
111
+ def test_richcompare_invalid_dtype_comparison(self, operation):
112
+ # Make sure TypeError is raised for comparison operators
113
+ # for invalid dtypes. Here 7 is an invalid dtype.
114
+
115
+ with pytest.raises(TypeError):
116
+ operation(np.dtype(np.int32), 7)
117
+
118
+ @pytest.mark.parametrize("dtype",
119
+ ['Bool', 'Bytes0', 'Complex32', 'Complex64',
120
+ 'Datetime64', 'Float16', 'Float32', 'Float64',
121
+ 'Int8', 'Int16', 'Int32', 'Int64',
122
+ 'Object0', 'Str0', 'Timedelta64',
123
+ 'UInt8', 'UInt16', 'Uint32', 'UInt32',
124
+ 'Uint64', 'UInt64', 'Void0',
125
+ "Float128", "Complex128"])
126
+ def test_numeric_style_types_are_invalid(self, dtype):
127
+ with assert_raises(TypeError):
128
+ np.dtype(dtype)
129
+
130
+ def test_remaining_dtypes_with_bad_bytesize(self):
131
+ # The np.<name> aliases were deprecated, these probably should be too
132
+ assert np.dtype("int0") is np.dtype("intp")
133
+ assert np.dtype("uint0") is np.dtype("uintp")
134
+ assert np.dtype("bool8") is np.dtype("bool")
135
+ assert np.dtype("bytes0") is np.dtype("bytes")
136
+ assert np.dtype("str0") is np.dtype("str")
137
+ assert np.dtype("object0") is np.dtype("object")
138
+
139
+ @pytest.mark.parametrize(
140
+ 'value',
141
+ ['m8', 'M8', 'datetime64', 'timedelta64',
142
+ 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
143
+ '>f', '<f', '=f', '|f',
144
+ ])
145
+ def test_dtype_bytes_str_equivalence(self, value):
146
+ bytes_value = value.encode('ascii')
147
+ from_bytes = np.dtype(bytes_value)
148
+ from_str = np.dtype(value)
149
+ assert_dtype_equal(from_bytes, from_str)
150
+
151
+ def test_dtype_from_bytes(self):
152
+ # Empty bytes object
153
+ assert_raises(TypeError, np.dtype, b'')
154
+ # Byte order indicator, but no type
155
+ assert_raises(TypeError, np.dtype, b'|')
156
+
157
+ # Single character with ordinal < NPY_NTYPES returns
158
+ # type by index into _builtin_descrs
159
+ assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
160
+ assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
161
+
162
+ # Single character where value is a valid type code
163
+ assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
164
+
165
+ # Bytes with non-ascii values raise errors
166
+ assert_raises(TypeError, np.dtype, b'\xff')
167
+ assert_raises(TypeError, np.dtype, b's\xff')
168
+
169
+ def test_bad_param(self):
170
+ # Can't give a size that's too small
171
+ assert_raises(ValueError, np.dtype,
172
+ {'names':['f0', 'f1'],
173
+ 'formats':['i4', 'i1'],
174
+ 'offsets':[0, 4],
175
+ 'itemsize':4})
176
+ # If alignment is enabled, the alignment (4) must divide the itemsize
177
+ assert_raises(ValueError, np.dtype,
178
+ {'names':['f0', 'f1'],
179
+ 'formats':['i4', 'i1'],
180
+ 'offsets':[0, 4],
181
+ 'itemsize':9}, align=True)
182
+ # If alignment is enabled, the individual fields must be aligned
183
+ assert_raises(ValueError, np.dtype,
184
+ {'names':['f0', 'f1'],
185
+ 'formats':['i1', 'f4'],
186
+ 'offsets':[0, 2]}, align=True)
187
+
188
+ def test_field_order_equality(self):
189
+ x = np.dtype({'names': ['A', 'B'],
190
+ 'formats': ['i4', 'f4'],
191
+ 'offsets': [0, 4]})
192
+ y = np.dtype({'names': ['B', 'A'],
193
+ 'formats': ['i4', 'f4'],
194
+ 'offsets': [4, 0]})
195
+ assert_equal(x == y, False)
196
+ # This is an safe cast (not equiv) due to the different names:
197
+ assert np.can_cast(x, y, casting="safe")
198
+
199
+ @pytest.mark.parametrize(
200
+ ["type_char", "char_size", "scalar_type"],
201
+ [["U", 4, np.str_],
202
+ ["S", 1, np.bytes_]])
203
+ def test_create_string_dtypes_directly(
204
+ self, type_char, char_size, scalar_type):
205
+ dtype_class = type(np.dtype(type_char))
206
+
207
+ dtype = dtype_class(8)
208
+ assert dtype.type is scalar_type
209
+ assert dtype.itemsize == 8*char_size
210
+
211
+ def test_create_invalid_string_errors(self):
212
+ one_too_big = np.iinfo(np.intc).max + 1
213
+ with pytest.raises(TypeError):
214
+ type(np.dtype("U"))(one_too_big // 4)
215
+
216
+ with pytest.raises(TypeError):
217
+ # Code coverage for very large numbers:
218
+ type(np.dtype("U"))(np.iinfo(np.intp).max // 4 + 1)
219
+
220
+ if one_too_big < sys.maxsize:
221
+ with pytest.raises(TypeError):
222
+ type(np.dtype("S"))(one_too_big)
223
+
224
+ with pytest.raises(ValueError):
225
+ type(np.dtype("U"))(-1)
226
+
227
+
228
+ class TestRecord:
229
+ def test_equivalent_record(self):
230
+ """Test whether equivalent record dtypes hash the same."""
231
+ a = np.dtype([('yo', int)])
232
+ b = np.dtype([('yo', int)])
233
+ assert_dtype_equal(a, b)
234
+
235
+ def test_different_names(self):
236
+ # In theory, they may hash the same (collision) ?
237
+ a = np.dtype([('yo', int)])
238
+ b = np.dtype([('ye', int)])
239
+ assert_dtype_not_equal(a, b)
240
+
241
+ def test_different_titles(self):
242
+ # In theory, they may hash the same (collision) ?
243
+ a = np.dtype({'names': ['r', 'b'],
244
+ 'formats': ['u1', 'u1'],
245
+ 'titles': ['Red pixel', 'Blue pixel']})
246
+ b = np.dtype({'names': ['r', 'b'],
247
+ 'formats': ['u1', 'u1'],
248
+ 'titles': ['RRed pixel', 'Blue pixel']})
249
+ assert_dtype_not_equal(a, b)
250
+
251
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
252
+ def test_refcount_dictionary_setting(self):
253
+ names = ["name1"]
254
+ formats = ["f8"]
255
+ titles = ["t1"]
256
+ offsets = [0]
257
+ d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
258
+ refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
259
+ np.dtype(d)
260
+ refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
261
+ assert refcounts == refcounts_new
262
+
263
+ def test_mutate(self):
264
+ # Mutating a dtype should reset the cached hash value.
265
+ # NOTE: Mutating should be deprecated, but new API added to replace it.
266
+ a = np.dtype([('yo', int)])
267
+ b = np.dtype([('yo', int)])
268
+ c = np.dtype([('ye', int)])
269
+ assert_dtype_equal(a, b)
270
+ assert_dtype_not_equal(a, c)
271
+ a.names = ['ye']
272
+ assert_dtype_equal(a, c)
273
+ assert_dtype_not_equal(a, b)
274
+ state = b.__reduce__()[2]
275
+ a.__setstate__(state)
276
+ assert_dtype_equal(a, b)
277
+ assert_dtype_not_equal(a, c)
278
+
279
+ def test_mutate_error(self):
280
+ # NOTE: Mutating should be deprecated, but new API added to replace it.
281
+ a = np.dtype("i,i")
282
+
283
+ with pytest.raises(ValueError, match="must replace all names at once"):
284
+ a.names = ["f0"]
285
+
286
+ with pytest.raises(ValueError, match=".*and not string"):
287
+ a.names = ["f0", b"not a unicode name"]
288
+
289
+ def test_not_lists(self):
290
+ """Test if an appropriate exception is raised when passing bad values to
291
+ the dtype constructor.
292
+ """
293
+ assert_raises(TypeError, np.dtype,
294
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
295
+ assert_raises(TypeError, np.dtype,
296
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
297
+
298
+ def test_aligned_size(self):
299
+ # Check that structured dtypes get padded to an aligned size
300
+ dt = np.dtype('i4, i1', align=True)
301
+ assert_equal(dt.itemsize, 8)
302
+ dt = np.dtype([('f0', 'i4'), ('f1', 'i1')], align=True)
303
+ assert_equal(dt.itemsize, 8)
304
+ dt = np.dtype({'names':['f0', 'f1'],
305
+ 'formats':['i4', 'u1'],
306
+ 'offsets':[0, 4]}, align=True)
307
+ assert_equal(dt.itemsize, 8)
308
+ dt = np.dtype({'f0': ('i4', 0), 'f1':('u1', 4)}, align=True)
309
+ assert_equal(dt.itemsize, 8)
310
+ # Nesting should preserve that alignment
311
+ dt1 = np.dtype([('f0', 'i4'),
312
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
313
+ ('f2', 'i1')], align=True)
314
+ assert_equal(dt1.itemsize, 20)
315
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
316
+ 'formats':['i4',
317
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
318
+ 'i1'],
319
+ 'offsets':[0, 4, 16]}, align=True)
320
+ assert_equal(dt2.itemsize, 20)
321
+ dt3 = np.dtype({'f0': ('i4', 0),
322
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
323
+ 'f2': ('i1', 16)}, align=True)
324
+ assert_equal(dt3.itemsize, 20)
325
+ assert_equal(dt1, dt2)
326
+ assert_equal(dt2, dt3)
327
+ # Nesting should preserve packing
328
+ dt1 = np.dtype([('f0', 'i4'),
329
+ ('f1', [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')]),
330
+ ('f2', 'i1')], align=False)
331
+ assert_equal(dt1.itemsize, 11)
332
+ dt2 = np.dtype({'names':['f0', 'f1', 'f2'],
333
+ 'formats':['i4',
334
+ [('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')],
335
+ 'i1'],
336
+ 'offsets':[0, 4, 10]}, align=False)
337
+ assert_equal(dt2.itemsize, 11)
338
+ dt3 = np.dtype({'f0': ('i4', 0),
339
+ 'f1': ([('f1', 'i1'), ('f2', 'i4'), ('f3', 'i1')], 4),
340
+ 'f2': ('i1', 10)}, align=False)
341
+ assert_equal(dt3.itemsize, 11)
342
+ assert_equal(dt1, dt2)
343
+ assert_equal(dt2, dt3)
344
+ # Array of subtype should preserve alignment
345
+ dt1 = np.dtype([('a', '|i1'),
346
+ ('b', [('f0', '<i2'),
347
+ ('f1', '<f4')], 2)], align=True)
348
+ assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
349
+ ('b', [('f0', '<i2'), ('', '|V2'),
350
+ ('f1', '<f4')], (2,))])
351
+
352
+ def test_union_struct(self):
353
+ # Should be able to create union dtypes
354
+ dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
355
+ 'offsets':[0, 0, 2]}, align=True)
356
+ assert_equal(dt.itemsize, 4)
357
+ a = np.array([3], dtype='<u4').view(dt)
358
+ a['f1'] = 10
359
+ a['f2'] = 36
360
+ assert_equal(a['f0'], 10 + 36*256*256)
361
+ # Should be able to specify fields out of order
362
+ dt = np.dtype({'names':['f0', 'f1', 'f2'], 'formats':['<u4', '<u2', '<u2'],
363
+ 'offsets':[4, 0, 2]}, align=True)
364
+ assert_equal(dt.itemsize, 8)
365
+ # field name should not matter: assignment is by position
366
+ dt2 = np.dtype({'names':['f2', 'f0', 'f1'],
367
+ 'formats':['<u4', '<u2', '<u2'],
368
+ 'offsets':[4, 0, 2]}, align=True)
369
+ vals = [(0, 1, 2), (3, 2**15-1, 4)]
370
+ vals2 = [(0, 1, 2), (3, 2**15-1, 4)]
371
+ a = np.array(vals, dt)
372
+ b = np.array(vals2, dt2)
373
+ assert_equal(a.astype(dt2), b)
374
+ assert_equal(b.astype(dt), a)
375
+ assert_equal(a.view(dt2), b)
376
+ assert_equal(b.view(dt), a)
377
+ # Should not be able to overlap objects with other types
378
+ assert_raises(TypeError, np.dtype,
379
+ {'names':['f0', 'f1'],
380
+ 'formats':['O', 'i1'],
381
+ 'offsets':[0, 2]})
382
+ assert_raises(TypeError, np.dtype,
383
+ {'names':['f0', 'f1'],
384
+ 'formats':['i4', 'O'],
385
+ 'offsets':[0, 3]})
386
+ assert_raises(TypeError, np.dtype,
387
+ {'names':['f0', 'f1'],
388
+ 'formats':[[('a', 'O')], 'i1'],
389
+ 'offsets':[0, 2]})
390
+ assert_raises(TypeError, np.dtype,
391
+ {'names':['f0', 'f1'],
392
+ 'formats':['i4', [('a', 'O')]],
393
+ 'offsets':[0, 3]})
394
+ # Out of order should still be ok, however
395
+ dt = np.dtype({'names':['f0', 'f1'],
396
+ 'formats':['i1', 'O'],
397
+ 'offsets':[np.dtype('intp').itemsize, 0]})
398
+
399
+ @pytest.mark.parametrize(["obj", "dtype", "expected"],
400
+ [([], ("(2)f4,"), np.empty((0, 2), dtype="f4")),
401
+ (3, "(3)f4,", [3, 3, 3]),
402
+ (np.float64(2), "(2)f4,", [2, 2]),
403
+ ([((0, 1), (1, 2)), ((2,),)], '(2,2)f4', None),
404
+ (["1", "2"], "(2)i,", None)])
405
+ def test_subarray_list(self, obj, dtype, expected):
406
+ dtype = np.dtype(dtype)
407
+ res = np.array(obj, dtype=dtype)
408
+
409
+ if expected is None:
410
+ # iterate the 1-d list to fill the array
411
+ expected = np.empty(len(obj), dtype=dtype)
412
+ for i in range(len(expected)):
413
+ expected[i] = obj[i]
414
+
415
+ assert_array_equal(res, expected)
416
+
417
+ def test_comma_datetime(self):
418
+ dt = np.dtype('M8[D],datetime64[Y],i8')
419
+ assert_equal(dt, np.dtype([('f0', 'M8[D]'),
420
+ ('f1', 'datetime64[Y]'),
421
+ ('f2', 'i8')]))
422
+
423
+ def test_from_dictproxy(self):
424
+ # Tests for PR #5920
425
+ dt = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'f4']})
426
+ assert_dtype_equal(dt, np.dtype(dt.fields))
427
+ dt2 = np.dtype((np.void, dt.fields))
428
+ assert_equal(dt2.fields, dt.fields)
429
+
430
+ def test_from_dict_with_zero_width_field(self):
431
+ # Regression test for #6430 / #2196
432
+ dt = np.dtype([('val1', np.float32, (0,)), ('val2', int)])
433
+ dt2 = np.dtype({'names': ['val1', 'val2'],
434
+ 'formats': [(np.float32, (0,)), int]})
435
+
436
+ assert_dtype_equal(dt, dt2)
437
+ assert_equal(dt.fields['val1'][0].itemsize, 0)
438
+ assert_equal(dt.itemsize, dt.fields['val2'][0].itemsize)
439
+
440
+ def test_bool_commastring(self):
441
+ d = np.dtype('?,?,?') # raises?
442
+ assert_equal(len(d.names), 3)
443
+ for n in d.names:
444
+ assert_equal(d.fields[n][0], np.dtype('?'))
445
+
446
+ def test_nonint_offsets(self):
447
+ # gh-8059
448
+ def make_dtype(off):
449
+ return np.dtype({'names': ['A'], 'formats': ['i4'],
450
+ 'offsets': [off]})
451
+
452
+ assert_raises(TypeError, make_dtype, 'ASD')
453
+ assert_raises(OverflowError, make_dtype, 2**70)
454
+ assert_raises(TypeError, make_dtype, 2.3)
455
+ assert_raises(ValueError, make_dtype, -10)
456
+
457
+ # no errors here:
458
+ dt = make_dtype(np.uint32(0))
459
+ np.zeros(1, dtype=dt)[0].item()
460
+
461
+ def test_fields_by_index(self):
462
+ dt = np.dtype([('a', np.int8), ('b', np.float32, 3)])
463
+ assert_dtype_equal(dt[0], np.dtype(np.int8))
464
+ assert_dtype_equal(dt[1], np.dtype((np.float32, 3)))
465
+ assert_dtype_equal(dt[-1], dt[1])
466
+ assert_dtype_equal(dt[-2], dt[0])
467
+ assert_raises(IndexError, lambda: dt[-3])
468
+
469
+ assert_raises(TypeError, operator.getitem, dt, 3.0)
470
+
471
+ assert_equal(dt[1], dt[np.int8(1)])
472
+
473
+ @pytest.mark.parametrize('align_flag',[False, True])
474
+ def test_multifield_index(self, align_flag):
475
+ # indexing with a list produces subfields
476
+ # the align flag should be preserved
477
+ dt = np.dtype([
478
+ (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
479
+ ], align=align_flag)
480
+
481
+ dt_sub = dt[['B', 'col1']]
482
+ assert_equal(
483
+ dt_sub,
484
+ np.dtype({
485
+ 'names': ['B', 'col1'],
486
+ 'formats': ['<f8', '<U20'],
487
+ 'offsets': [88, 0],
488
+ 'titles': [None, 'title'],
489
+ 'itemsize': 96
490
+ })
491
+ )
492
+ assert_equal(dt_sub.isalignedstruct, align_flag)
493
+
494
+ dt_sub = dt[['B']]
495
+ assert_equal(
496
+ dt_sub,
497
+ np.dtype({
498
+ 'names': ['B'],
499
+ 'formats': ['<f8'],
500
+ 'offsets': [88],
501
+ 'itemsize': 96
502
+ })
503
+ )
504
+ assert_equal(dt_sub.isalignedstruct, align_flag)
505
+
506
+ dt_sub = dt[[]]
507
+ assert_equal(
508
+ dt_sub,
509
+ np.dtype({
510
+ 'names': [],
511
+ 'formats': [],
512
+ 'offsets': [],
513
+ 'itemsize': 96
514
+ })
515
+ )
516
+ assert_equal(dt_sub.isalignedstruct, align_flag)
517
+
518
+ assert_raises(TypeError, operator.getitem, dt, ())
519
+ assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
520
+ assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
521
+ assert_raises(KeyError, operator.getitem, dt, ['fake'])
522
+ assert_raises(KeyError, operator.getitem, dt, ['title'])
523
+ assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
524
+
525
+ def test_partial_dict(self):
526
+ # 'names' is missing
527
+ assert_raises(ValueError, np.dtype,
528
+ {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
529
+
530
+ def test_fieldless_views(self):
531
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
532
+ 'itemsize':8})
533
+ assert_raises(ValueError, a.view, np.dtype([]))
534
+
535
+ d = np.dtype((np.dtype([]), 10))
536
+ assert_equal(d.shape, (10,))
537
+ assert_equal(d.itemsize, 0)
538
+ assert_equal(d.base, np.dtype([]))
539
+
540
+ arr = np.fromiter((() for i in range(10)), [])
541
+ assert_equal(arr.dtype, np.dtype([]))
542
+ assert_raises(ValueError, np.frombuffer, b'', dtype=[])
543
+ assert_equal(np.frombuffer(b'', dtype=[], count=2),
544
+ np.empty(2, dtype=[]))
545
+
546
+ assert_raises(ValueError, np.dtype, ([], 'f8'))
547
+ assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
548
+
549
+ assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
550
+ np.ones(2, dtype=bool))
551
+
552
+ assert_equal(np.zeros((1, 2), dtype=[]) == a,
553
+ np.ones((1, 2), dtype=bool))
554
+
555
+ def test_nonstructured_with_object(self):
556
+ # See gh-23277, the dtype here thinks it contain objects, if the
557
+ # assert about that fails, the test becomes meaningless (which is OK)
558
+ arr = np.recarray((0,), dtype="O")
559
+ assert arr.dtype.names is None # no fields
560
+ assert arr.dtype.hasobject # but claims to contain objects
561
+ del arr # the deletion failed previously.
562
+
563
+
564
+ class TestSubarray:
565
+ def test_single_subarray(self):
566
+ a = np.dtype((int, (2)))
567
+ b = np.dtype((int, (2,)))
568
+ assert_dtype_equal(a, b)
569
+
570
+ assert_equal(type(a.subdtype[1]), tuple)
571
+ assert_equal(type(b.subdtype[1]), tuple)
572
+
573
+ def test_equivalent_record(self):
574
+ """Test whether equivalent subarray dtypes hash the same."""
575
+ a = np.dtype((int, (2, 3)))
576
+ b = np.dtype((int, (2, 3)))
577
+ assert_dtype_equal(a, b)
578
+
579
+ def test_nonequivalent_record(self):
580
+ """Test whether different subarray dtypes hash differently."""
581
+ a = np.dtype((int, (2, 3)))
582
+ b = np.dtype((int, (3, 2)))
583
+ assert_dtype_not_equal(a, b)
584
+
585
+ a = np.dtype((int, (2, 3)))
586
+ b = np.dtype((int, (2, 2)))
587
+ assert_dtype_not_equal(a, b)
588
+
589
+ a = np.dtype((int, (1, 2, 3)))
590
+ b = np.dtype((int, (1, 2)))
591
+ assert_dtype_not_equal(a, b)
592
+
593
+ def test_shape_equal(self):
594
+ """Test some data types that are equal"""
595
+ assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
596
+ # FutureWarning during deprecation period; after it is passed this
597
+ # should instead check that "(1)f8" == "1f8" == ("f8", 1).
598
+ with pytest.warns(FutureWarning):
599
+ assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
600
+ assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
601
+ assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
602
+ d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
603
+ assert_dtype_equal(np.dtype(d), np.dtype(d))
604
+
605
+ def test_shape_simple(self):
606
+ """Test some simple cases that shouldn't be equal"""
607
+ assert_dtype_not_equal(np.dtype('f8'), np.dtype(('f8', (1,))))
608
+ assert_dtype_not_equal(np.dtype(('f8', (1,))), np.dtype(('f8', (1, 1))))
609
+ assert_dtype_not_equal(np.dtype(('f4', (3, 2))), np.dtype(('f4', (2, 3))))
610
+
611
+ def test_shape_monster(self):
612
+ """Test some more complicated cases that shouldn't be equal"""
613
+ assert_dtype_not_equal(
614
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
615
+ np.dtype(([('a', 'f4', (1, 2)), ('b', 'f8', (1, 3))], (2, 2))))
616
+ assert_dtype_not_equal(
617
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
618
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'i8', (1, 3))], (2, 2))))
619
+ assert_dtype_not_equal(
620
+ np.dtype(([('a', 'f4', (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
621
+ np.dtype(([('e', 'f8', (1, 3)), ('d', 'f4', (2, 1))], (2, 2))))
622
+ assert_dtype_not_equal(
623
+ np.dtype(([('a', [('a', 'i4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))),
624
+ np.dtype(([('a', [('a', 'u4', 6)], (2, 1)), ('b', 'f8', (1, 3))], (2, 2))))
625
+
626
+ def test_shape_sequence(self):
627
+ # Any sequence of integers should work as shape, but the result
628
+ # should be a tuple (immutable) of base type integers.
629
+ a = np.array([1, 2, 3], dtype=np.int16)
630
+ l = [1, 2, 3]
631
+ # Array gets converted
632
+ dt = np.dtype([('a', 'f4', a)])
633
+ assert_(isinstance(dt['a'].shape, tuple))
634
+ assert_(isinstance(dt['a'].shape[0], int))
635
+ # List gets converted
636
+ dt = np.dtype([('a', 'f4', l)])
637
+ assert_(isinstance(dt['a'].shape, tuple))
638
+ #
639
+
640
+ class IntLike:
641
+ def __index__(self):
642
+ return 3
643
+
644
+ def __int__(self):
645
+ # (a PyNumber_Check fails without __int__)
646
+ return 3
647
+
648
+ dt = np.dtype([('a', 'f4', IntLike())])
649
+ assert_(isinstance(dt['a'].shape, tuple))
650
+ assert_(isinstance(dt['a'].shape[0], int))
651
+ dt = np.dtype([('a', 'f4', (IntLike(),))])
652
+ assert_(isinstance(dt['a'].shape, tuple))
653
+ assert_(isinstance(dt['a'].shape[0], int))
654
+
655
+ def test_shape_matches_ndim(self):
656
+ dt = np.dtype([('a', 'f4', ())])
657
+ assert_equal(dt['a'].shape, ())
658
+ assert_equal(dt['a'].ndim, 0)
659
+
660
+ dt = np.dtype([('a', 'f4')])
661
+ assert_equal(dt['a'].shape, ())
662
+ assert_equal(dt['a'].ndim, 0)
663
+
664
+ dt = np.dtype([('a', 'f4', 4)])
665
+ assert_equal(dt['a'].shape, (4,))
666
+ assert_equal(dt['a'].ndim, 1)
667
+
668
+ dt = np.dtype([('a', 'f4', (1, 2, 3))])
669
+ assert_equal(dt['a'].shape, (1, 2, 3))
670
+ assert_equal(dt['a'].ndim, 3)
671
+
672
+ def test_shape_invalid(self):
673
+ # Check that the shape is valid.
674
+ max_int = np.iinfo(np.intc).max
675
+ max_intp = np.iinfo(np.intp).max
676
+ # Too large values (the datatype is part of this)
677
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_int // 4 + 1)])
678
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_int + 1)])
679
+ assert_raises(ValueError, np.dtype, [('a', 'f4', (max_int, 2))])
680
+ # Takes a different code path (fails earlier:
681
+ assert_raises(ValueError, np.dtype, [('a', 'f4', max_intp + 1)])
682
+ # Negative values
683
+ assert_raises(ValueError, np.dtype, [('a', 'f4', -1)])
684
+ assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))])
685
+
686
+ def test_alignment(self):
687
+ #Check that subarrays are aligned
688
+ t1 = np.dtype('(1,)i4', align=True)
689
+ t2 = np.dtype('2i4', align=True)
690
+ assert_equal(t1.alignment, t2.alignment)
691
+
692
+ def test_aligned_empty(self):
693
+ # Mainly regression test for gh-19696: construction failed completely
694
+ dt = np.dtype([], align=True)
695
+ assert dt == np.dtype([])
696
+ dt = np.dtype({"names": [], "formats": [], "itemsize": 0}, align=True)
697
+ assert dt == np.dtype([])
698
+
699
+ def test_subarray_base_item(self):
700
+ arr = np.ones(3, dtype=[("f", "i", 3)])
701
+ # Extracting the field "absorbs" the subarray into a view:
702
+ assert arr["f"].base is arr
703
+ # Extract the structured item, and then check the tuple component:
704
+ item = arr.item(0)
705
+ assert type(item) is tuple and len(item) == 1
706
+ assert item[0].base is arr
707
+
708
+ def test_subarray_cast_copies(self):
709
+ # Older versions of NumPy did NOT copy, but they got the ownership
710
+ # wrong (not actually knowing the correct base!). Versions since 1.21
711
+ # (I think) crashed fairly reliable. This defines the correct behavior
712
+ # as a copy. Keeping the ownership would be possible (but harder)
713
+ arr = np.ones(3, dtype=[("f", "i", 3)])
714
+ cast = arr.astype(object)
715
+ for fields in cast:
716
+ assert type(fields) == tuple and len(fields) == 1
717
+ subarr = fields[0]
718
+ assert subarr.base is None
719
+ assert subarr.flags.owndata
720
+
721
+
722
+ def iter_struct_object_dtypes():
723
+ """
724
+ Iterates over a few complex dtypes and object pattern which
725
+ fill the array with a given object (defaults to a singleton).
726
+
727
+ Yields
728
+ ------
729
+ dtype : dtype
730
+ pattern : tuple
731
+ Structured tuple for use with `np.array`.
732
+ count : int
733
+ Number of objects stored in the dtype.
734
+ singleton : object
735
+ A singleton object. The returned pattern is constructed so that
736
+ all objects inside the datatype are set to the singleton.
737
+ """
738
+ obj = object()
739
+
740
+ dt = np.dtype([('b', 'O', (2, 3))])
741
+ p = ([[obj] * 3] * 2,)
742
+ yield pytest.param(dt, p, 6, obj, id="<subarray>")
743
+
744
+ dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
745
+ p = (0, [[obj] * 3] * 2)
746
+ yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
747
+
748
+ dt = np.dtype([('a', 'i4'),
749
+ ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
750
+ p = (0, [[(obj, 0)] * 3] * 2)
751
+ yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
752
+
753
+ dt = np.dtype([('a', 'i4'),
754
+ ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
755
+ p = (0, [[(obj, obj)] * 3] * 2)
756
+ yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
757
+
758
+
759
+ @pytest.mark.skipif(
760
+ sys.version_info >= (3, 12),
761
+ reason="Python 3.12 has immortal refcounts, this test will no longer "
762
+ "work. See gh-23986"
763
+ )
764
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
765
+ class TestStructuredObjectRefcounting:
766
+ """These tests cover various uses of complicated structured types which
767
+ include objects and thus require reference counting.
768
+ """
769
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
770
+ iter_struct_object_dtypes())
771
+ @pytest.mark.parametrize(["creation_func", "creation_obj"], [
772
+ pytest.param(np.empty, None,
773
+ # None is probably used for too many things
774
+ marks=pytest.mark.skip("unreliable due to python's behaviour")),
775
+ (np.ones, 1),
776
+ (np.zeros, 0)])
777
+ def test_structured_object_create_delete(self, dt, pat, count, singleton,
778
+ creation_func, creation_obj):
779
+ """Structured object reference counting in creation and deletion"""
780
+ # The test assumes that 0, 1, and None are singletons.
781
+ gc.collect()
782
+ before = sys.getrefcount(creation_obj)
783
+ arr = creation_func(3, dt)
784
+
785
+ now = sys.getrefcount(creation_obj)
786
+ assert now - before == count * 3
787
+ del arr
788
+ now = sys.getrefcount(creation_obj)
789
+ assert now == before
790
+
791
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
792
+ iter_struct_object_dtypes())
793
+ def test_structured_object_item_setting(self, dt, pat, count, singleton):
794
+ """Structured object reference counting for simple item setting"""
795
+ one = 1
796
+
797
+ gc.collect()
798
+ before = sys.getrefcount(singleton)
799
+ arr = np.array([pat] * 3, dt)
800
+ assert sys.getrefcount(singleton) - before == count * 3
801
+ # Fill with `1` and check that it was replaced correctly:
802
+ before2 = sys.getrefcount(one)
803
+ arr[...] = one
804
+ after2 = sys.getrefcount(one)
805
+ assert after2 - before2 == count * 3
806
+ del arr
807
+ gc.collect()
808
+ assert sys.getrefcount(one) == before2
809
+ assert sys.getrefcount(singleton) == before
810
+
811
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
812
+ iter_struct_object_dtypes())
813
+ @pytest.mark.parametrize(
814
+ ['shape', 'index', 'items_changed'],
815
+ [((3,), ([0, 2],), 2),
816
+ ((3, 2), ([0, 2], slice(None)), 4),
817
+ ((3, 2), ([0, 2], [1]), 2),
818
+ ((3,), ([True, False, True]), 2)])
819
+ def test_structured_object_indexing(self, shape, index, items_changed,
820
+ dt, pat, count, singleton):
821
+ """Structured object reference counting for advanced indexing."""
822
+ # Use two small negative values (should be singletons, but less likely
823
+ # to run into race-conditions). This failed in some threaded envs
824
+ # When using 0 and 1. If it fails again, should remove all explicit
825
+ # checks, and rely on `pytest-leaks` reference count checker only.
826
+ val0 = -4
827
+ val1 = -5
828
+
829
+ arr = np.full(shape, val0, dt)
830
+
831
+ gc.collect()
832
+ before_val0 = sys.getrefcount(val0)
833
+ before_val1 = sys.getrefcount(val1)
834
+ # Test item getting:
835
+ part = arr[index]
836
+ after_val0 = sys.getrefcount(val0)
837
+ assert after_val0 - before_val0 == count * items_changed
838
+ del part
839
+ # Test item setting:
840
+ arr[index] = val1
841
+ gc.collect()
842
+ after_val0 = sys.getrefcount(val0)
843
+ after_val1 = sys.getrefcount(val1)
844
+ assert before_val0 - after_val0 == count * items_changed
845
+ assert after_val1 - before_val1 == count * items_changed
846
+
847
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
848
+ iter_struct_object_dtypes())
849
+ def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
850
+ """Structured object reference counting for specialized functions.
851
+ The older functions such as take and repeat use different code paths
852
+ then item setting (when writing this).
853
+ """
854
+ indices = [0, 1]
855
+
856
+ arr = np.array([pat] * 3, dt)
857
+ gc.collect()
858
+ before = sys.getrefcount(singleton)
859
+ res = arr.take(indices)
860
+ after = sys.getrefcount(singleton)
861
+ assert after - before == count * 2
862
+ new = res.repeat(10)
863
+ gc.collect()
864
+ after_repeat = sys.getrefcount(singleton)
865
+ assert after_repeat - after == count * 2 * 10
866
+
867
+
868
+ class TestStructuredDtypeSparseFields:
869
+ """Tests subarray fields which contain sparse dtypes so that
870
+ not all memory is used by the dtype work. Such dtype's should
871
+ leave the underlying memory unchanged.
872
+ """
873
+ dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
874
+ 'offsets':[0, 4]}, (2, 3))])
875
+ sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
876
+ 'offsets':[4]}, (2, 3))])
877
+
878
+ def test_sparse_field_assignment(self):
879
+ arr = np.zeros(3, self.dtype)
880
+ sparse_arr = arr.view(self.sparse_dtype)
881
+
882
+ sparse_arr[...] = np.finfo(np.float32).max
883
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
884
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
885
+
886
+ def test_sparse_field_assignment_fancy(self):
887
+ # Fancy assignment goes to the copyswap function for complex types:
888
+ arr = np.zeros(3, self.dtype)
889
+ sparse_arr = arr.view(self.sparse_dtype)
890
+
891
+ sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
892
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
893
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
894
+
895
+
896
+ class TestMonsterType:
897
+ """Test deeply nested subtypes."""
898
+
899
+ def test1(self):
900
+ simple1 = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
901
+ 'titles': ['Red pixel', 'Blue pixel']})
902
+ a = np.dtype([('yo', int), ('ye', simple1),
903
+ ('yi', np.dtype((int, (3, 2))))])
904
+ b = np.dtype([('yo', int), ('ye', simple1),
905
+ ('yi', np.dtype((int, (3, 2))))])
906
+ assert_dtype_equal(a, b)
907
+
908
+ c = np.dtype([('yo', int), ('ye', simple1),
909
+ ('yi', np.dtype((a, (3, 2))))])
910
+ d = np.dtype([('yo', int), ('ye', simple1),
911
+ ('yi', np.dtype((a, (3, 2))))])
912
+ assert_dtype_equal(c, d)
913
+
914
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
915
+ def test_list_recursion(self):
916
+ l = list()
917
+ l.append(('f', l))
918
+ with pytest.raises(RecursionError):
919
+ np.dtype(l)
920
+
921
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
922
+ def test_tuple_recursion(self):
923
+ d = np.int32
924
+ for i in range(100000):
925
+ d = (d, (1,))
926
+ with pytest.raises(RecursionError):
927
+ np.dtype(d)
928
+
929
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
930
+ def test_dict_recursion(self):
931
+ d = dict(names=['self'], formats=[None], offsets=[0])
932
+ d['formats'][0] = d
933
+ with pytest.raises(RecursionError):
934
+ np.dtype(d)
935
+
936
+
937
+ class TestMetadata:
938
+ def test_no_metadata(self):
939
+ d = np.dtype(int)
940
+ assert_(d.metadata is None)
941
+
942
+ def test_metadata_takes_dict(self):
943
+ d = np.dtype(int, metadata={'datum': 1})
944
+ assert_(d.metadata == {'datum': 1})
945
+
946
+ def test_metadata_rejects_nondict(self):
947
+ assert_raises(TypeError, np.dtype, int, metadata='datum')
948
+ assert_raises(TypeError, np.dtype, int, metadata=1)
949
+ assert_raises(TypeError, np.dtype, int, metadata=None)
950
+
951
+ def test_nested_metadata(self):
952
+ d = np.dtype([('a', np.dtype(int, metadata={'datum': 1}))])
953
+ assert_(d['a'].metadata == {'datum': 1})
954
+
955
+ def test_base_metadata_copied(self):
956
+ d = np.dtype((np.void, np.dtype('i4,i4', metadata={'datum': 1})))
957
+ assert_(d.metadata == {'datum': 1})
958
+
959
+ class TestString:
960
+ def test_complex_dtype_str(self):
961
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
962
+ ('rtile', '>f4', (64, 36))], (3,)),
963
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
964
+ ('bright', '>f4', (8, 36))])])
965
+ assert_equal(str(dt),
966
+ "[('top', [('tiles', ('>f4', (64, 64)), (1,)), "
967
+ "('rtile', '>f4', (64, 36))], (3,)), "
968
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
969
+ "('bright', '>f4', (8, 36))])]")
970
+
971
+ # If the sticky aligned flag is set to True, it makes the
972
+ # str() function use a dict representation with an 'aligned' flag
973
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
974
+ ('rtile', '>f4', (64, 36))],
975
+ (3,)),
976
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
977
+ ('bright', '>f4', (8, 36))])],
978
+ align=True)
979
+ assert_equal(str(dt),
980
+ "{'names': ['top', 'bottom'],"
981
+ " 'formats': [([('tiles', ('>f4', (64, 64)), (1,)), "
982
+ "('rtile', '>f4', (64, 36))], (3,)), "
983
+ "[('bleft', ('>f4', (8, 64)), (1,)), "
984
+ "('bright', '>f4', (8, 36))]],"
985
+ " 'offsets': [0, 76800],"
986
+ " 'itemsize': 80000,"
987
+ " 'aligned': True}")
988
+ with np.printoptions(legacy='1.21'):
989
+ assert_equal(str(dt),
990
+ "{'names':['top','bottom'], "
991
+ "'formats':[([('tiles', ('>f4', (64, 64)), (1,)), "
992
+ "('rtile', '>f4', (64, 36))], (3,)),"
993
+ "[('bleft', ('>f4', (8, 64)), (1,)), "
994
+ "('bright', '>f4', (8, 36))]], "
995
+ "'offsets':[0,76800], "
996
+ "'itemsize':80000, "
997
+ "'aligned':True}")
998
+ assert_equal(np.dtype(eval(str(dt))), dt)
999
+
1000
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
1001
+ 'offsets': [0, 1, 2],
1002
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']})
1003
+ assert_equal(str(dt),
1004
+ "[(('Red pixel', 'r'), 'u1'), "
1005
+ "(('Green pixel', 'g'), 'u1'), "
1006
+ "(('Blue pixel', 'b'), 'u1')]")
1007
+
1008
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
1009
+ 'formats': ['<u4', 'u1', 'u1', 'u1'],
1010
+ 'offsets': [0, 0, 1, 2],
1011
+ 'titles': ['Color', 'Red pixel',
1012
+ 'Green pixel', 'Blue pixel']})
1013
+ assert_equal(str(dt),
1014
+ "{'names': ['rgba', 'r', 'g', 'b'],"
1015
+ " 'formats': ['<u4', 'u1', 'u1', 'u1'],"
1016
+ " 'offsets': [0, 0, 1, 2],"
1017
+ " 'titles': ['Color', 'Red pixel', "
1018
+ "'Green pixel', 'Blue pixel'],"
1019
+ " 'itemsize': 4}")
1020
+
1021
+ dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
1022
+ 'offsets': [0, 2],
1023
+ 'titles': ['Red pixel', 'Blue pixel']})
1024
+ assert_equal(str(dt),
1025
+ "{'names': ['r', 'b'],"
1026
+ " 'formats': ['u1', 'u1'],"
1027
+ " 'offsets': [0, 2],"
1028
+ " 'titles': ['Red pixel', 'Blue pixel'],"
1029
+ " 'itemsize': 3}")
1030
+
1031
+ dt = np.dtype([('a', '<m8[D]'), ('b', '<M8[us]')])
1032
+ assert_equal(str(dt),
1033
+ "[('a', '<m8[D]'), ('b', '<M8[us]')]")
1034
+
1035
+ def test_repr_structured(self):
1036
+ dt = np.dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)),
1037
+ ('rtile', '>f4', (64, 36))], (3,)),
1038
+ ('bottom', [('bleft', ('>f4', (8, 64)), (1,)),
1039
+ ('bright', '>f4', (8, 36))])])
1040
+ assert_equal(repr(dt),
1041
+ "dtype([('top', [('tiles', ('>f4', (64, 64)), (1,)), "
1042
+ "('rtile', '>f4', (64, 36))], (3,)), "
1043
+ "('bottom', [('bleft', ('>f4', (8, 64)), (1,)), "
1044
+ "('bright', '>f4', (8, 36))])])")
1045
+
1046
+ dt = np.dtype({'names': ['r', 'g', 'b'], 'formats': ['u1', 'u1', 'u1'],
1047
+ 'offsets': [0, 1, 2],
1048
+ 'titles': ['Red pixel', 'Green pixel', 'Blue pixel']},
1049
+ align=True)
1050
+ assert_equal(repr(dt),
1051
+ "dtype([(('Red pixel', 'r'), 'u1'), "
1052
+ "(('Green pixel', 'g'), 'u1'), "
1053
+ "(('Blue pixel', 'b'), 'u1')], align=True)")
1054
+
1055
+ def test_repr_structured_not_packed(self):
1056
+ dt = np.dtype({'names': ['rgba', 'r', 'g', 'b'],
1057
+ 'formats': ['<u4', 'u1', 'u1', 'u1'],
1058
+ 'offsets': [0, 0, 1, 2],
1059
+ 'titles': ['Color', 'Red pixel',
1060
+ 'Green pixel', 'Blue pixel']}, align=True)
1061
+ assert_equal(repr(dt),
1062
+ "dtype({'names': ['rgba', 'r', 'g', 'b'],"
1063
+ " 'formats': ['<u4', 'u1', 'u1', 'u1'],"
1064
+ " 'offsets': [0, 0, 1, 2],"
1065
+ " 'titles': ['Color', 'Red pixel', "
1066
+ "'Green pixel', 'Blue pixel'],"
1067
+ " 'itemsize': 4}, align=True)")
1068
+
1069
+ dt = np.dtype({'names': ['r', 'b'], 'formats': ['u1', 'u1'],
1070
+ 'offsets': [0, 2],
1071
+ 'titles': ['Red pixel', 'Blue pixel'],
1072
+ 'itemsize': 4})
1073
+ assert_equal(repr(dt),
1074
+ "dtype({'names': ['r', 'b'], "
1075
+ "'formats': ['u1', 'u1'], "
1076
+ "'offsets': [0, 2], "
1077
+ "'titles': ['Red pixel', 'Blue pixel'], "
1078
+ "'itemsize': 4})")
1079
+
1080
+ def test_repr_structured_datetime(self):
1081
+ dt = np.dtype([('a', '<M8[D]'), ('b', '<m8[us]')])
1082
+ assert_equal(repr(dt),
1083
+ "dtype([('a', '<M8[D]'), ('b', '<m8[us]')])")
1084
+
1085
+ def test_repr_str_subarray(self):
1086
+ dt = np.dtype(('<i2', (1,)))
1087
+ assert_equal(repr(dt), "dtype(('<i2', (1,)))")
1088
+ assert_equal(str(dt), "('<i2', (1,))")
1089
+
1090
+ def test_base_dtype_with_object_type(self):
1091
+ # Issue gh-2798, should not error.
1092
+ np.array(['a'], dtype="O").astype(("O", [("name", "O")]))
1093
+
1094
+ def test_empty_string_to_object(self):
1095
+ # Pull request #4722
1096
+ np.array(["", ""]).astype(object)
1097
+
1098
+ def test_void_subclass_unsized(self):
1099
+ dt = np.dtype(np.record)
1100
+ assert_equal(repr(dt), "dtype('V')")
1101
+ assert_equal(str(dt), '|V0')
1102
+ assert_equal(dt.name, 'record')
1103
+
1104
+ def test_void_subclass_sized(self):
1105
+ dt = np.dtype((np.record, 2))
1106
+ assert_equal(repr(dt), "dtype('V2')")
1107
+ assert_equal(str(dt), '|V2')
1108
+ assert_equal(dt.name, 'record16')
1109
+
1110
+ def test_void_subclass_fields(self):
1111
+ dt = np.dtype((np.record, [('a', '<u2')]))
1112
+ assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
1113
+ assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
1114
+ assert_equal(dt.name, 'record16')
1115
+
1116
+
1117
+ class TestDtypeAttributeDeletion:
1118
+
1119
+ def test_dtype_non_writable_attributes_deletion(self):
1120
+ dt = np.dtype(np.double)
1121
+ attr = ["subdtype", "descr", "str", "name", "base", "shape",
1122
+ "isbuiltin", "isnative", "isalignedstruct", "fields",
1123
+ "metadata", "hasobject"]
1124
+
1125
+ for s in attr:
1126
+ assert_raises(AttributeError, delattr, dt, s)
1127
+
1128
+ def test_dtype_writable_attributes_deletion(self):
1129
+ dt = np.dtype(np.double)
1130
+ attr = ["names"]
1131
+ for s in attr:
1132
+ assert_raises(AttributeError, delattr, dt, s)
1133
+
1134
+
1135
+ class TestDtypeAttributes:
1136
+ def test_descr_has_trailing_void(self):
1137
+ # see gh-6359
1138
+ dtype = np.dtype({
1139
+ 'names': ['A', 'B'],
1140
+ 'formats': ['f4', 'f4'],
1141
+ 'offsets': [0, 8],
1142
+ 'itemsize': 16})
1143
+ new_dtype = np.dtype(dtype.descr)
1144
+ assert_equal(new_dtype.itemsize, 16)
1145
+
1146
+ def test_name_dtype_subclass(self):
1147
+ # Ticket #4357
1148
+ class user_def_subcls(np.void):
1149
+ pass
1150
+ assert_equal(np.dtype(user_def_subcls).name, 'user_def_subcls')
1151
+
1152
+ def test_zero_stride(self):
1153
+ arr = np.ones(1, dtype="i8")
1154
+ arr = np.broadcast_to(arr, 10)
1155
+ assert arr.strides == (0,)
1156
+ with pytest.raises(ValueError):
1157
+ arr.dtype = "i1"
1158
+
1159
+ class TestDTypeMakeCanonical:
1160
+ def check_canonical(self, dtype, canonical):
1161
+ """
1162
+ Check most properties relevant to "canonical" versions of a dtype,
1163
+ which is mainly native byte order for datatypes supporting this.
1164
+
1165
+ The main work is checking structured dtypes with fields, where we
1166
+ reproduce most the actual logic used in the C-code.
1167
+ """
1168
+ assert type(dtype) is type(canonical)
1169
+
1170
+ # a canonical DType should always have equivalent casting (both ways)
1171
+ assert np.can_cast(dtype, canonical, casting="equiv")
1172
+ assert np.can_cast(canonical, dtype, casting="equiv")
1173
+ # a canonical dtype (and its fields) is always native (checks fields):
1174
+ assert canonical.isnative
1175
+
1176
+ # Check that canonical of canonical is the same (no casting):
1177
+ assert np.result_type(canonical) == canonical
1178
+
1179
+ if not dtype.names:
1180
+ # The flags currently never change for unstructured dtypes
1181
+ assert dtype.flags == canonical.flags
1182
+ return
1183
+
1184
+ # Must have all the needs API flag set:
1185
+ assert dtype.flags & 0b10000
1186
+
1187
+ # Check that the fields are identical (including titles):
1188
+ assert dtype.fields.keys() == canonical.fields.keys()
1189
+
1190
+ def aligned_offset(offset, alignment):
1191
+ # round up offset:
1192
+ return - (-offset // alignment) * alignment
1193
+
1194
+ totalsize = 0
1195
+ max_alignment = 1
1196
+ for name in dtype.names:
1197
+ # each field is also canonical:
1198
+ new_field_descr = canonical.fields[name][0]
1199
+ self.check_canonical(dtype.fields[name][0], new_field_descr)
1200
+
1201
+ # Must have the "inherited" object related flags:
1202
+ expected = 0b11011 & new_field_descr.flags
1203
+ assert (canonical.flags & expected) == expected
1204
+
1205
+ if canonical.isalignedstruct:
1206
+ totalsize = aligned_offset(totalsize, new_field_descr.alignment)
1207
+ max_alignment = max(new_field_descr.alignment, max_alignment)
1208
+
1209
+ assert canonical.fields[name][1] == totalsize
1210
+ # if a title exists, they must match (otherwise empty tuple):
1211
+ assert dtype.fields[name][2:] == canonical.fields[name][2:]
1212
+
1213
+ totalsize += new_field_descr.itemsize
1214
+
1215
+ if canonical.isalignedstruct:
1216
+ totalsize = aligned_offset(totalsize, max_alignment)
1217
+ assert canonical.itemsize == totalsize
1218
+ assert canonical.alignment == max_alignment
1219
+
1220
+ def test_simple(self):
1221
+ dt = np.dtype(">i4")
1222
+ assert np.result_type(dt).isnative
1223
+ assert np.result_type(dt).num == dt.num
1224
+
1225
+ # dtype with empty space:
1226
+ struct_dt = np.dtype(">i4,<i1,i8,V3")[["f0", "f2"]]
1227
+ canonical = np.result_type(struct_dt)
1228
+ assert canonical.itemsize == 4+8
1229
+ assert canonical.isnative
1230
+
1231
+ # aligned struct dtype with empty space:
1232
+ struct_dt = np.dtype(">i1,<i4,i8,V3", align=True)[["f0", "f2"]]
1233
+ canonical = np.result_type(struct_dt)
1234
+ assert canonical.isalignedstruct
1235
+ assert canonical.itemsize == np.dtype("i8").alignment + 8
1236
+ assert canonical.isnative
1237
+
1238
+ def test_object_flag_not_inherited(self):
1239
+ # The following dtype still indicates "object", because its included
1240
+ # in the unaccessible space (maybe this could change at some point):
1241
+ arr = np.ones(3, "i,O,i")[["f0", "f2"]]
1242
+ assert arr.dtype.hasobject
1243
+ canonical_dt = np.result_type(arr.dtype)
1244
+ assert not canonical_dt.hasobject
1245
+
1246
+ @pytest.mark.slow
1247
+ @hypothesis.given(dtype=hynp.nested_dtypes())
1248
+ def test_make_canonical_hypothesis(self, dtype):
1249
+ canonical = np.result_type(dtype)
1250
+ self.check_canonical(dtype, canonical)
1251
+ # result_type with two arguments should always give identical results:
1252
+ two_arg_result = np.result_type(dtype, dtype)
1253
+ assert np.can_cast(two_arg_result, canonical, casting="no")
1254
+
1255
+ @pytest.mark.slow
1256
+ @hypothesis.given(
1257
+ dtype=hypothesis.extra.numpy.array_dtypes(
1258
+ subtype_strategy=hypothesis.extra.numpy.array_dtypes(),
1259
+ min_size=5, max_size=10, allow_subarrays=True))
1260
+ def test_structured(self, dtype):
1261
+ # Pick 4 of the fields at random. This will leave empty space in the
1262
+ # dtype (since we do not canonicalize it here).
1263
+ field_subset = random.sample(dtype.names, k=4)
1264
+ dtype_with_empty_space = dtype[field_subset]
1265
+ assert dtype_with_empty_space.itemsize == dtype.itemsize
1266
+ canonicalized = np.result_type(dtype_with_empty_space)
1267
+ self.check_canonical(dtype_with_empty_space, canonicalized)
1268
+ # promotion with two arguments should always give identical results:
1269
+ two_arg_result = np.promote_types(
1270
+ dtype_with_empty_space, dtype_with_empty_space)
1271
+ assert np.can_cast(two_arg_result, canonicalized, casting="no")
1272
+
1273
+ # Ensure that we also check aligned struct (check the opposite, in
1274
+ # case hypothesis grows support for `align`. Then repeat the test:
1275
+ dtype_aligned = np.dtype(dtype.descr, align=not dtype.isalignedstruct)
1276
+ dtype_with_empty_space = dtype_aligned[field_subset]
1277
+ assert dtype_with_empty_space.itemsize == dtype_aligned.itemsize
1278
+ canonicalized = np.result_type(dtype_with_empty_space)
1279
+ self.check_canonical(dtype_with_empty_space, canonicalized)
1280
+ # promotion with two arguments should always give identical results:
1281
+ two_arg_result = np.promote_types(
1282
+ dtype_with_empty_space, dtype_with_empty_space)
1283
+ assert np.can_cast(two_arg_result, canonicalized, casting="no")
1284
+
1285
+
1286
+ class TestPickling:
1287
+
1288
+ def check_pickling(self, dtype):
1289
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
1290
+ buf = pickle.dumps(dtype, proto)
1291
+ # The dtype pickling itself pickles `np.dtype` if it is pickled
1292
+ # as a singleton `dtype` should be stored in the buffer:
1293
+ assert b"_DType_reconstruct" not in buf
1294
+ assert b"dtype" in buf
1295
+ pickled = pickle.loads(buf)
1296
+ assert_equal(pickled, dtype)
1297
+ assert_equal(pickled.descr, dtype.descr)
1298
+ if dtype.metadata is not None:
1299
+ assert_equal(pickled.metadata, dtype.metadata)
1300
+ # Check the reconstructed dtype is functional
1301
+ x = np.zeros(3, dtype=dtype)
1302
+ y = np.zeros(3, dtype=pickled)
1303
+ assert_equal(x, y)
1304
+ assert_equal(x[0], y[0])
1305
+
1306
+ @pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
1307
+ np.compat.unicode, bool])
1308
+ def test_builtin(self, t):
1309
+ self.check_pickling(np.dtype(t))
1310
+
1311
+ def test_structured(self):
1312
+ dt = np.dtype(([('a', '>f4', (2, 1)), ('b', '<f8', (1, 3))], (2, 2)))
1313
+ self.check_pickling(dt)
1314
+
1315
+ def test_structured_aligned(self):
1316
+ dt = np.dtype('i4, i1', align=True)
1317
+ self.check_pickling(dt)
1318
+
1319
+ def test_structured_unaligned(self):
1320
+ dt = np.dtype('i4, i1', align=False)
1321
+ self.check_pickling(dt)
1322
+
1323
+ def test_structured_padded(self):
1324
+ dt = np.dtype({
1325
+ 'names': ['A', 'B'],
1326
+ 'formats': ['f4', 'f4'],
1327
+ 'offsets': [0, 8],
1328
+ 'itemsize': 16})
1329
+ self.check_pickling(dt)
1330
+
1331
+ def test_structured_titles(self):
1332
+ dt = np.dtype({'names': ['r', 'b'],
1333
+ 'formats': ['u1', 'u1'],
1334
+ 'titles': ['Red pixel', 'Blue pixel']})
1335
+ self.check_pickling(dt)
1336
+
1337
+ @pytest.mark.parametrize('base', ['m8', 'M8'])
1338
+ @pytest.mark.parametrize('unit', ['', 'Y', 'M', 'W', 'D', 'h', 'm', 's',
1339
+ 'ms', 'us', 'ns', 'ps', 'fs', 'as'])
1340
+ def test_datetime(self, base, unit):
1341
+ dt = np.dtype('%s[%s]' % (base, unit) if unit else base)
1342
+ self.check_pickling(dt)
1343
+ if unit:
1344
+ dt = np.dtype('%s[7%s]' % (base, unit))
1345
+ self.check_pickling(dt)
1346
+
1347
+ def test_metadata(self):
1348
+ dt = np.dtype(int, metadata={'datum': 1})
1349
+ self.check_pickling(dt)
1350
+
1351
+ @pytest.mark.parametrize("DType",
1352
+ [type(np.dtype(t)) for t in np.typecodes['All']] +
1353
+ [np.dtype(rational), np.dtype])
1354
+ def test_pickle_types(self, DType):
1355
+ # Check that DTypes (the classes/types) roundtrip when pickling
1356
+ for proto in range(pickle.HIGHEST_PROTOCOL + 1):
1357
+ roundtrip_DType = pickle.loads(pickle.dumps(DType, proto))
1358
+ assert roundtrip_DType is DType
1359
+
1360
+
1361
+ class TestPromotion:
1362
+ """Test cases related to more complex DType promotions. Further promotion
1363
+ tests are defined in `test_numeric.py`
1364
+ """
1365
+ @np._no_nep50_warning()
1366
+ @pytest.mark.parametrize(["other", "expected", "expected_weak"],
1367
+ [(2**16-1, np.complex64, None),
1368
+ (2**32-1, np.complex128, np.complex64),
1369
+ (np.float16(2), np.complex64, None),
1370
+ (np.float32(2), np.complex64, None),
1371
+ (np.longdouble(2), np.complex64, np.clongdouble),
1372
+ # Base of the double value to sidestep any rounding issues:
1373
+ (np.longdouble(np.nextafter(1.7e308, 0.)),
1374
+ np.complex128, np.clongdouble),
1375
+ # Additionally use "nextafter" so the cast can't round down:
1376
+ (np.longdouble(np.nextafter(1.7e308, np.inf)),
1377
+ np.clongdouble, None),
1378
+ # repeat for complex scalars:
1379
+ (np.complex64(2), np.complex64, None),
1380
+ (np.clongdouble(2), np.complex64, np.clongdouble),
1381
+ # Base of the double value to sidestep any rounding issues:
1382
+ (np.clongdouble(np.nextafter(1.7e308, 0.) * 1j),
1383
+ np.complex128, np.clongdouble),
1384
+ # Additionally use "nextafter" so the cast can't round down:
1385
+ (np.clongdouble(np.nextafter(1.7e308, np.inf)),
1386
+ np.clongdouble, None),
1387
+ ])
1388
+ def test_complex_other_value_based(self,
1389
+ weak_promotion, other, expected, expected_weak):
1390
+ if weak_promotion and expected_weak is not None:
1391
+ expected = expected_weak
1392
+
1393
+ # This would change if we modify the value based promotion
1394
+ min_complex = np.dtype(np.complex64)
1395
+
1396
+ res = np.result_type(other, min_complex)
1397
+ assert res == expected
1398
+ # Check the same for a simple ufunc call that uses the same logic:
1399
+ res = np.minimum(other, np.ones(3, dtype=min_complex)).dtype
1400
+ assert res == expected
1401
+
1402
+ @pytest.mark.parametrize(["other", "expected"],
1403
+ [(np.bool_, np.complex128),
1404
+ (np.int64, np.complex128),
1405
+ (np.float16, np.complex64),
1406
+ (np.float32, np.complex64),
1407
+ (np.float64, np.complex128),
1408
+ (np.longdouble, np.clongdouble),
1409
+ (np.complex64, np.complex64),
1410
+ (np.complex128, np.complex128),
1411
+ (np.clongdouble, np.clongdouble),
1412
+ ])
1413
+ def test_complex_scalar_value_based(self, other, expected):
1414
+ # This would change if we modify the value based promotion
1415
+ complex_scalar = 1j
1416
+
1417
+ res = np.result_type(other, complex_scalar)
1418
+ assert res == expected
1419
+ # Check the same for a simple ufunc call that uses the same logic:
1420
+ res = np.minimum(np.ones(3, dtype=other), complex_scalar).dtype
1421
+ assert res == expected
1422
+
1423
+ def test_complex_pyscalar_promote_rational(self):
1424
+ with pytest.raises(TypeError,
1425
+ match=r".* no common DType exists for the given inputs"):
1426
+ np.result_type(1j, rational)
1427
+
1428
+ with pytest.raises(TypeError,
1429
+ match=r".* no common DType exists for the given inputs"):
1430
+ np.result_type(1j, rational(1, 2))
1431
+
1432
+ @pytest.mark.parametrize("val", [2, 2**32, 2**63, 2**64, 2*100])
1433
+ def test_python_integer_promotion(self, val):
1434
+ # If we only path scalars (mainly python ones!), the result must take
1435
+ # into account that the integer may be considered int32, int64, uint64,
1436
+ # or object depending on the input value. So test those paths!
1437
+ expected_dtype = np.result_type(np.array(val).dtype, np.array(0).dtype)
1438
+ assert np.result_type(val, 0) == expected_dtype
1439
+ # For completeness sake, also check with a NumPy scalar as second arg:
1440
+ assert np.result_type(val, np.int8(0)) == expected_dtype
1441
+
1442
+ @pytest.mark.parametrize(["other", "expected"],
1443
+ [(1, rational), (1., np.float64)])
1444
+ @np._no_nep50_warning()
1445
+ def test_float_int_pyscalar_promote_rational(
1446
+ self, weak_promotion, other, expected):
1447
+ # Note that rationals are a bit akward as they promote with float64
1448
+ # or default ints, but not float16 or uint8/int8 (which looks
1449
+ # inconsistent here). The new promotion fixes this (partially?)
1450
+ if not weak_promotion and type(other) == float:
1451
+ # The float version, checks float16 in the legacy path, which fails
1452
+ # the integer version seems to check int8 (also), so it can
1453
+ # pass.
1454
+ with pytest.raises(TypeError,
1455
+ match=r".* do not have a common DType"):
1456
+ np.result_type(other, rational)
1457
+ else:
1458
+ assert np.result_type(other, rational) == expected
1459
+
1460
+ assert np.result_type(other, rational(1, 2)) == expected
1461
+
1462
+ @pytest.mark.parametrize(["dtypes", "expected"], [
1463
+ # These promotions are not associative/commutative:
1464
+ ([np.uint16, np.int16, np.float16], np.float32),
1465
+ ([np.uint16, np.int8, np.float16], np.float32),
1466
+ ([np.uint8, np.int16, np.float16], np.float32),
1467
+ # The following promotions are not ambiguous, but cover code
1468
+ # paths of abstract promotion (no particular logic being tested)
1469
+ ([1, 1, np.float64], np.float64),
1470
+ ([1, 1., np.complex128], np.complex128),
1471
+ ([1, 1j, np.float64], np.complex128),
1472
+ ([1., 1., np.int64], np.float64),
1473
+ ([1., 1j, np.float64], np.complex128),
1474
+ ([1j, 1j, np.float64], np.complex128),
1475
+ ([1, True, np.bool_], np.int_),
1476
+ ])
1477
+ def test_permutations_do_not_influence_result(self, dtypes, expected):
1478
+ # Tests that most permutations do not influence the result. In the
1479
+ # above some uint and int combintations promote to a larger integer
1480
+ # type, which would then promote to a larger than necessary float.
1481
+ for perm in permutations(dtypes):
1482
+ assert np.result_type(*perm) == expected
1483
+
1484
+
1485
+ def test_rational_dtype():
1486
+ # test for bug gh-5719
1487
+ a = np.array([1111], dtype=rational).astype
1488
+ assert_raises(OverflowError, a, 'int8')
1489
+
1490
+ # test that dtype detection finds user-defined types
1491
+ x = rational(1)
1492
+ assert_equal(np.array([x,x]).dtype, np.dtype(rational))
1493
+
1494
+
1495
+ def test_dtypes_are_true():
1496
+ # test for gh-6294
1497
+ assert bool(np.dtype('f8'))
1498
+ assert bool(np.dtype('i8'))
1499
+ assert bool(np.dtype([('a', 'i8'), ('b', 'f4')]))
1500
+
1501
+
1502
+ def test_invalid_dtype_string():
1503
+ # test for gh-10440
1504
+ assert_raises(TypeError, np.dtype, 'f8,i8,[f8,i8]')
1505
+ assert_raises(TypeError, np.dtype, 'Fl\xfcgel')
1506
+
1507
+
1508
+ def test_keyword_argument():
1509
+ # test for https://github.com/numpy/numpy/pull/16574#issuecomment-642660971
1510
+ assert np.dtype(dtype=np.float64) == np.dtype(np.float64)
1511
+
1512
+
1513
+ def test_ulong_dtype():
1514
+ # test for gh-21063
1515
+ assert np.dtype("ulong") == np.dtype(np.uint)
1516
+
1517
+
1518
+ class TestFromDTypeAttribute:
1519
+ def test_simple(self):
1520
+ class dt:
1521
+ dtype = np.dtype("f8")
1522
+
1523
+ assert np.dtype(dt) == np.float64
1524
+ assert np.dtype(dt()) == np.float64
1525
+
1526
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
1527
+ def test_recursion(self):
1528
+ class dt:
1529
+ pass
1530
+
1531
+ dt.dtype = dt
1532
+ with pytest.raises(RecursionError):
1533
+ np.dtype(dt)
1534
+
1535
+ dt_instance = dt()
1536
+ dt_instance.dtype = dt
1537
+ with pytest.raises(RecursionError):
1538
+ np.dtype(dt_instance)
1539
+
1540
+ def test_void_subtype(self):
1541
+ class dt(np.void):
1542
+ # This code path is fully untested before, so it is unclear
1543
+ # what this should be useful for. Note that if np.void is used
1544
+ # numpy will think we are deallocating a base type [1.17, 2019-02].
1545
+ dtype = np.dtype("f,f")
1546
+
1547
+ np.dtype(dt)
1548
+ np.dtype(dt(1))
1549
+
1550
+ @pytest.mark.skipif(IS_PYSTON, reason="Pyston disables recursion checking")
1551
+ def test_void_subtype_recursion(self):
1552
+ class vdt(np.void):
1553
+ pass
1554
+
1555
+ vdt.dtype = vdt
1556
+
1557
+ with pytest.raises(RecursionError):
1558
+ np.dtype(vdt)
1559
+
1560
+ with pytest.raises(RecursionError):
1561
+ np.dtype(vdt(1))
1562
+
1563
+
1564
+ class TestDTypeClasses:
1565
+ @pytest.mark.parametrize("dtype", list(np.typecodes['All']) + [rational])
1566
+ def test_basic_dtypes_subclass_properties(self, dtype):
1567
+ # Note: Except for the isinstance and type checks, these attributes
1568
+ # are considered currently private and may change.
1569
+ dtype = np.dtype(dtype)
1570
+ assert isinstance(dtype, np.dtype)
1571
+ assert type(dtype) is not np.dtype
1572
+ if dtype.type.__name__ != "rational":
1573
+ dt_name = type(dtype).__name__.lower().removesuffix("dtype")
1574
+ if dt_name == "uint" or dt_name == "int":
1575
+ # The scalar names has a `c` attached because "int" is Python
1576
+ # int and that is long...
1577
+ dt_name += "c"
1578
+ sc_name = dtype.type.__name__
1579
+ assert dt_name == sc_name.strip("_")
1580
+ assert type(dtype).__module__ == "numpy.dtypes"
1581
+
1582
+ assert getattr(numpy.dtypes, type(dtype).__name__) is type(dtype)
1583
+ else:
1584
+ assert type(dtype).__name__ == "dtype[rational]"
1585
+ assert type(dtype).__module__ == "numpy"
1586
+
1587
+ assert not type(dtype)._abstract
1588
+
1589
+ # the flexible dtypes and datetime/timedelta have additional parameters
1590
+ # which are more than just storage information, these would need to be
1591
+ # given when creating a dtype:
1592
+ parametric = (np.void, np.str_, np.bytes_, np.datetime64, np.timedelta64)
1593
+ if dtype.type not in parametric:
1594
+ assert not type(dtype)._parametric
1595
+ assert type(dtype)() is dtype
1596
+ else:
1597
+ assert type(dtype)._parametric
1598
+ with assert_raises(TypeError):
1599
+ type(dtype)()
1600
+
1601
+ def test_dtype_superclass(self):
1602
+ assert type(np.dtype) is not type
1603
+ assert isinstance(np.dtype, type)
1604
+
1605
+ assert type(np.dtype).__name__ == "_DTypeMeta"
1606
+ assert type(np.dtype).__module__ == "numpy"
1607
+ assert np.dtype._abstract
1608
+
1609
+ def test_is_numeric(self):
1610
+ all_codes = set(np.typecodes['All'])
1611
+ numeric_codes = set(np.typecodes['AllInteger'] +
1612
+ np.typecodes['AllFloat'] + '?')
1613
+ non_numeric_codes = all_codes - numeric_codes
1614
+
1615
+ for code in numeric_codes:
1616
+ assert type(np.dtype(code))._is_numeric
1617
+
1618
+ for code in non_numeric_codes:
1619
+ assert not type(np.dtype(code))._is_numeric
1620
+
1621
+ @pytest.mark.parametrize("int_", ["UInt", "Int"])
1622
+ @pytest.mark.parametrize("size", [8, 16, 32, 64])
1623
+ def test_integer_alias_names(self, int_, size):
1624
+ DType = getattr(numpy.dtypes, f"{int_}{size}DType")
1625
+ sctype = getattr(numpy, f"{int_.lower()}{size}")
1626
+ assert DType.type is sctype
1627
+ assert DType.__name__.lower().removesuffix("dtype") == sctype.__name__
1628
+
1629
+ @pytest.mark.parametrize("name",
1630
+ ["Half", "Float", "Double", "CFloat", "CDouble"])
1631
+ def test_float_alias_names(self, name):
1632
+ with pytest.raises(AttributeError):
1633
+ getattr(numpy.dtypes, name + "DType") is numpy.dtypes.Float16DType
1634
+
1635
+
1636
+ class TestFromCTypes:
1637
+
1638
+ @staticmethod
1639
+ def check(ctype, dtype):
1640
+ dtype = np.dtype(dtype)
1641
+ assert_equal(np.dtype(ctype), dtype)
1642
+ assert_equal(np.dtype(ctype()), dtype)
1643
+
1644
+ def test_array(self):
1645
+ c8 = ctypes.c_uint8
1646
+ self.check( 3 * c8, (np.uint8, (3,)))
1647
+ self.check( 1 * c8, (np.uint8, (1,)))
1648
+ self.check( 0 * c8, (np.uint8, (0,)))
1649
+ self.check(1 * (3 * c8), ((np.uint8, (3,)), (1,)))
1650
+ self.check(3 * (1 * c8), ((np.uint8, (1,)), (3,)))
1651
+
1652
+ def test_padded_structure(self):
1653
+ class PaddedStruct(ctypes.Structure):
1654
+ _fields_ = [
1655
+ ('a', ctypes.c_uint8),
1656
+ ('b', ctypes.c_uint16)
1657
+ ]
1658
+ expected = np.dtype([
1659
+ ('a', np.uint8),
1660
+ ('b', np.uint16)
1661
+ ], align=True)
1662
+ self.check(PaddedStruct, expected)
1663
+
1664
+ def test_bit_fields(self):
1665
+ class BitfieldStruct(ctypes.Structure):
1666
+ _fields_ = [
1667
+ ('a', ctypes.c_uint8, 7),
1668
+ ('b', ctypes.c_uint8, 1)
1669
+ ]
1670
+ assert_raises(TypeError, np.dtype, BitfieldStruct)
1671
+ assert_raises(TypeError, np.dtype, BitfieldStruct())
1672
+
1673
+ def test_pointer(self):
1674
+ p_uint8 = ctypes.POINTER(ctypes.c_uint8)
1675
+ assert_raises(TypeError, np.dtype, p_uint8)
1676
+
1677
+ def test_void_pointer(self):
1678
+ self.check(ctypes.c_void_p, np.uintp)
1679
+
1680
+ def test_union(self):
1681
+ class Union(ctypes.Union):
1682
+ _fields_ = [
1683
+ ('a', ctypes.c_uint8),
1684
+ ('b', ctypes.c_uint16),
1685
+ ]
1686
+ expected = np.dtype(dict(
1687
+ names=['a', 'b'],
1688
+ formats=[np.uint8, np.uint16],
1689
+ offsets=[0, 0],
1690
+ itemsize=2
1691
+ ))
1692
+ self.check(Union, expected)
1693
+
1694
+ def test_union_with_struct_packed(self):
1695
+ class Struct(ctypes.Structure):
1696
+ _pack_ = 1
1697
+ _fields_ = [
1698
+ ('one', ctypes.c_uint8),
1699
+ ('two', ctypes.c_uint32)
1700
+ ]
1701
+
1702
+ class Union(ctypes.Union):
1703
+ _fields_ = [
1704
+ ('a', ctypes.c_uint8),
1705
+ ('b', ctypes.c_uint16),
1706
+ ('c', ctypes.c_uint32),
1707
+ ('d', Struct),
1708
+ ]
1709
+ expected = np.dtype(dict(
1710
+ names=['a', 'b', 'c', 'd'],
1711
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
1712
+ offsets=[0, 0, 0, 0],
1713
+ itemsize=ctypes.sizeof(Union)
1714
+ ))
1715
+ self.check(Union, expected)
1716
+
1717
+ def test_union_packed(self):
1718
+ class Struct(ctypes.Structure):
1719
+ _fields_ = [
1720
+ ('one', ctypes.c_uint8),
1721
+ ('two', ctypes.c_uint32)
1722
+ ]
1723
+ _pack_ = 1
1724
+ class Union(ctypes.Union):
1725
+ _pack_ = 1
1726
+ _fields_ = [
1727
+ ('a', ctypes.c_uint8),
1728
+ ('b', ctypes.c_uint16),
1729
+ ('c', ctypes.c_uint32),
1730
+ ('d', Struct),
1731
+ ]
1732
+ expected = np.dtype(dict(
1733
+ names=['a', 'b', 'c', 'd'],
1734
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
1735
+ offsets=[0, 0, 0, 0],
1736
+ itemsize=ctypes.sizeof(Union)
1737
+ ))
1738
+ self.check(Union, expected)
1739
+
1740
+ def test_packed_structure(self):
1741
+ class PackedStructure(ctypes.Structure):
1742
+ _pack_ = 1
1743
+ _fields_ = [
1744
+ ('a', ctypes.c_uint8),
1745
+ ('b', ctypes.c_uint16)
1746
+ ]
1747
+ expected = np.dtype([
1748
+ ('a', np.uint8),
1749
+ ('b', np.uint16)
1750
+ ])
1751
+ self.check(PackedStructure, expected)
1752
+
1753
+ def test_large_packed_structure(self):
1754
+ class PackedStructure(ctypes.Structure):
1755
+ _pack_ = 2
1756
+ _fields_ = [
1757
+ ('a', ctypes.c_uint8),
1758
+ ('b', ctypes.c_uint16),
1759
+ ('c', ctypes.c_uint8),
1760
+ ('d', ctypes.c_uint16),
1761
+ ('e', ctypes.c_uint32),
1762
+ ('f', ctypes.c_uint32),
1763
+ ('g', ctypes.c_uint8)
1764
+ ]
1765
+ expected = np.dtype(dict(
1766
+ formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
1767
+ offsets=[0, 2, 4, 6, 8, 12, 16],
1768
+ names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
1769
+ itemsize=18))
1770
+ self.check(PackedStructure, expected)
1771
+
1772
+ def test_big_endian_structure_packed(self):
1773
+ class BigEndStruct(ctypes.BigEndianStructure):
1774
+ _fields_ = [
1775
+ ('one', ctypes.c_uint8),
1776
+ ('two', ctypes.c_uint32)
1777
+ ]
1778
+ _pack_ = 1
1779
+ expected = np.dtype([('one', 'u1'), ('two', '>u4')])
1780
+ self.check(BigEndStruct, expected)
1781
+
1782
+ def test_little_endian_structure_packed(self):
1783
+ class LittleEndStruct(ctypes.LittleEndianStructure):
1784
+ _fields_ = [
1785
+ ('one', ctypes.c_uint8),
1786
+ ('two', ctypes.c_uint32)
1787
+ ]
1788
+ _pack_ = 1
1789
+ expected = np.dtype([('one', 'u1'), ('two', '<u4')])
1790
+ self.check(LittleEndStruct, expected)
1791
+
1792
+ def test_little_endian_structure(self):
1793
+ class PaddedStruct(ctypes.LittleEndianStructure):
1794
+ _fields_ = [
1795
+ ('a', ctypes.c_uint8),
1796
+ ('b', ctypes.c_uint16)
1797
+ ]
1798
+ expected = np.dtype([
1799
+ ('a', '<B'),
1800
+ ('b', '<H')
1801
+ ], align=True)
1802
+ self.check(PaddedStruct, expected)
1803
+
1804
+ def test_big_endian_structure(self):
1805
+ class PaddedStruct(ctypes.BigEndianStructure):
1806
+ _fields_ = [
1807
+ ('a', ctypes.c_uint8),
1808
+ ('b', ctypes.c_uint16)
1809
+ ]
1810
+ expected = np.dtype([
1811
+ ('a', '>B'),
1812
+ ('b', '>H')
1813
+ ], align=True)
1814
+ self.check(PaddedStruct, expected)
1815
+
1816
+ def test_simple_endian_types(self):
1817
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
1818
+ self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
1819
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
1820
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
1821
+
1822
+ all_types = set(np.typecodes['All'])
1823
+ all_pairs = permutations(all_types, 2)
1824
+
1825
+ @pytest.mark.parametrize("pair", all_pairs)
1826
+ def test_pairs(self, pair):
1827
+ """
1828
+ Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
1829
+ Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
1830
+ """
1831
+ # gh-5645: check that np.dtype('i,L') can be used
1832
+ pair_type = np.dtype('{},{}'.format(*pair))
1833
+ expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
1834
+ assert_equal(pair_type, expected)
1835
+
1836
+
1837
+ class TestUserDType:
1838
+ @pytest.mark.leaks_references(reason="dynamically creates custom dtype.")
1839
+ def test_custom_structured_dtype(self):
1840
+ class mytype:
1841
+ pass
1842
+
1843
+ blueprint = np.dtype([("field", object)])
1844
+ dt = create_custom_field_dtype(blueprint, mytype, 0)
1845
+ assert dt.type == mytype
1846
+ # We cannot (currently) *create* this dtype with `np.dtype` because
1847
+ # mytype does not inherit from `np.generic`. This seems like an
1848
+ # unnecessary restriction, but one that has been around forever:
1849
+ assert np.dtype(mytype) == np.dtype("O")
1850
+
1851
+ def test_custom_structured_dtype_errors(self):
1852
+ class mytype:
1853
+ pass
1854
+
1855
+ blueprint = np.dtype([("field", object)])
1856
+
1857
+ with pytest.raises(ValueError):
1858
+ # Tests what happens if fields are unset during creation
1859
+ # which is currently rejected due to the containing object
1860
+ # (see PyArray_RegisterDataType).
1861
+ create_custom_field_dtype(blueprint, mytype, 1)
1862
+
1863
+ with pytest.raises(RuntimeError):
1864
+ # Tests that a dtype must have its type field set up to np.dtype
1865
+ # or in this case a builtin instance.
1866
+ create_custom_field_dtype(blueprint, mytype, 2)
1867
+
1868
+
1869
+ class TestClassGetItem:
1870
+ def test_dtype(self) -> None:
1871
+ alias = np.dtype[Any]
1872
+ assert isinstance(alias, types.GenericAlias)
1873
+ assert alias.__origin__ is np.dtype
1874
+
1875
+ @pytest.mark.parametrize("code", np.typecodes["All"])
1876
+ def test_dtype_subclass(self, code: str) -> None:
1877
+ cls = type(np.dtype(code))
1878
+ alias = cls[Any]
1879
+ assert isinstance(alias, types.GenericAlias)
1880
+ assert alias.__origin__ is cls
1881
+
1882
+ @pytest.mark.parametrize("arg_len", range(4))
1883
+ def test_subscript_tuple(self, arg_len: int) -> None:
1884
+ arg_tup = (Any,) * arg_len
1885
+ if arg_len == 1:
1886
+ assert np.dtype[arg_tup]
1887
+ else:
1888
+ with pytest.raises(TypeError):
1889
+ np.dtype[arg_tup]
1890
+
1891
+ def test_subscript_scalar(self) -> None:
1892
+ assert np.dtype[Any]
1893
+
1894
+
1895
+ def test_result_type_integers_and_unitless_timedelta64():
1896
+ # Regression test for gh-20077. The following call of `result_type`
1897
+ # would cause a seg. fault.
1898
+ td = np.timedelta64(4)
1899
+ result = np.result_type(0, td)
1900
+ assert_dtype_equal(result, td.dtype)
1901
+
1902
+
1903
+ def test_creating_dtype_with_dtype_class_errors():
1904
+ # Regression test for #25031, calling `np.dtype` with itself segfaulted.
1905
+ with pytest.raises(TypeError, match="Cannot convert np.dtype into a"):
1906
+ np.array(np.ones(10), dtype=np.dtype)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_einsum.py ADDED
@@ -0,0 +1,1248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import sys
3
+ import platform
4
+
5
+ import pytest
6
+
7
+ import numpy as np
8
+ from numpy.testing import (
9
+ assert_, assert_equal, assert_array_equal, assert_almost_equal,
10
+ assert_raises, suppress_warnings, assert_raises_regex, assert_allclose
11
+ )
12
+
13
+ try:
14
+ COMPILERS = np.show_config(mode="dicts")["Compilers"]
15
+ USING_CLANG_CL = COMPILERS["c"]["name"] == "clang-cl"
16
+ except TypeError:
17
+ USING_CLANG_CL = False
18
+
19
+ # Setup for optimize einsum
20
+ chars = 'abcdefghij'
21
+ sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
22
+ global_size_dict = dict(zip(chars, sizes))
23
+
24
+
25
+ class TestEinsum:
26
+ def test_einsum_errors(self):
27
+ for do_opt in [True, False]:
28
+ # Need enough arguments
29
+ assert_raises(ValueError, np.einsum, optimize=do_opt)
30
+ assert_raises(ValueError, np.einsum, "", optimize=do_opt)
31
+
32
+ # subscripts must be a string
33
+ assert_raises(TypeError, np.einsum, 0, 0, optimize=do_opt)
34
+
35
+ # out parameter must be an array
36
+ assert_raises(TypeError, np.einsum, "", 0, out='test',
37
+ optimize=do_opt)
38
+
39
+ # order parameter must be a valid order
40
+ assert_raises(ValueError, np.einsum, "", 0, order='W',
41
+ optimize=do_opt)
42
+
43
+ # casting parameter must be a valid casting
44
+ assert_raises(ValueError, np.einsum, "", 0, casting='blah',
45
+ optimize=do_opt)
46
+
47
+ # dtype parameter must be a valid dtype
48
+ assert_raises(TypeError, np.einsum, "", 0, dtype='bad_data_type',
49
+ optimize=do_opt)
50
+
51
+ # other keyword arguments are rejected
52
+ assert_raises(TypeError, np.einsum, "", 0, bad_arg=0,
53
+ optimize=do_opt)
54
+
55
+ # issue 4528 revealed a segfault with this call
56
+ assert_raises(TypeError, np.einsum, *(None,)*63, optimize=do_opt)
57
+
58
+ # number of operands must match count in subscripts string
59
+ assert_raises(ValueError, np.einsum, "", 0, 0, optimize=do_opt)
60
+ assert_raises(ValueError, np.einsum, ",", 0, [0], [0],
61
+ optimize=do_opt)
62
+ assert_raises(ValueError, np.einsum, ",", [0], optimize=do_opt)
63
+
64
+ # can't have more subscripts than dimensions in the operand
65
+ assert_raises(ValueError, np.einsum, "i", 0, optimize=do_opt)
66
+ assert_raises(ValueError, np.einsum, "ij", [0, 0], optimize=do_opt)
67
+ assert_raises(ValueError, np.einsum, "...i", 0, optimize=do_opt)
68
+ assert_raises(ValueError, np.einsum, "i...j", [0, 0], optimize=do_opt)
69
+ assert_raises(ValueError, np.einsum, "i...", 0, optimize=do_opt)
70
+ assert_raises(ValueError, np.einsum, "ij...", [0, 0], optimize=do_opt)
71
+
72
+ # invalid ellipsis
73
+ assert_raises(ValueError, np.einsum, "i..", [0, 0], optimize=do_opt)
74
+ assert_raises(ValueError, np.einsum, ".i...", [0, 0], optimize=do_opt)
75
+ assert_raises(ValueError, np.einsum, "j->..j", [0, 0], optimize=do_opt)
76
+ assert_raises(ValueError, np.einsum, "j->.j...", [0, 0], optimize=do_opt)
77
+
78
+ # invalid subscript character
79
+ assert_raises(ValueError, np.einsum, "i%...", [0, 0], optimize=do_opt)
80
+ assert_raises(ValueError, np.einsum, "...j$", [0, 0], optimize=do_opt)
81
+ assert_raises(ValueError, np.einsum, "i->&", [0, 0], optimize=do_opt)
82
+
83
+ # output subscripts must appear in input
84
+ assert_raises(ValueError, np.einsum, "i->ij", [0, 0], optimize=do_opt)
85
+
86
+ # output subscripts may only be specified once
87
+ assert_raises(ValueError, np.einsum, "ij->jij", [[0, 0], [0, 0]],
88
+ optimize=do_opt)
89
+
90
+ # dimensions much match when being collapsed
91
+ assert_raises(ValueError, np.einsum, "ii",
92
+ np.arange(6).reshape(2, 3), optimize=do_opt)
93
+ assert_raises(ValueError, np.einsum, "ii->i",
94
+ np.arange(6).reshape(2, 3), optimize=do_opt)
95
+
96
+ # broadcasting to new dimensions must be enabled explicitly
97
+ assert_raises(ValueError, np.einsum, "i", np.arange(6).reshape(2, 3),
98
+ optimize=do_opt)
99
+ assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
100
+ out=np.arange(4).reshape(2, 2), optimize=do_opt)
101
+ with assert_raises_regex(ValueError, "'b'"):
102
+ # gh-11221 - 'c' erroneously appeared in the error message
103
+ a = np.ones((3, 3, 4, 5, 6))
104
+ b = np.ones((3, 4, 5))
105
+ np.einsum('aabcb,abc', a, b)
106
+
107
+ # Check order kwarg, asanyarray allows 1d to pass through
108
+ assert_raises(ValueError, np.einsum, "i->i", np.arange(6).reshape(-1, 1),
109
+ optimize=do_opt, order='d')
110
+
111
+ def test_einsum_object_errors(self):
112
+ # Exceptions created by object arithmetic should
113
+ # successfully propagate
114
+
115
+ class CustomException(Exception):
116
+ pass
117
+
118
+ class DestructoBox:
119
+
120
+ def __init__(self, value, destruct):
121
+ self._val = value
122
+ self._destruct = destruct
123
+
124
+ def __add__(self, other):
125
+ tmp = self._val + other._val
126
+ if tmp >= self._destruct:
127
+ raise CustomException
128
+ else:
129
+ self._val = tmp
130
+ return self
131
+
132
+ def __radd__(self, other):
133
+ if other == 0:
134
+ return self
135
+ else:
136
+ return self.__add__(other)
137
+
138
+ def __mul__(self, other):
139
+ tmp = self._val * other._val
140
+ if tmp >= self._destruct:
141
+ raise CustomException
142
+ else:
143
+ self._val = tmp
144
+ return self
145
+
146
+ def __rmul__(self, other):
147
+ if other == 0:
148
+ return self
149
+ else:
150
+ return self.__mul__(other)
151
+
152
+ a = np.array([DestructoBox(i, 5) for i in range(1, 10)],
153
+ dtype='object').reshape(3, 3)
154
+
155
+ # raised from unbuffered_loop_nop1_ndim2
156
+ assert_raises(CustomException, np.einsum, "ij->i", a)
157
+
158
+ # raised from unbuffered_loop_nop1_ndim3
159
+ b = np.array([DestructoBox(i, 100) for i in range(0, 27)],
160
+ dtype='object').reshape(3, 3, 3)
161
+ assert_raises(CustomException, np.einsum, "i...k->...", b)
162
+
163
+ # raised from unbuffered_loop_nop2_ndim2
164
+ b = np.array([DestructoBox(i, 55) for i in range(1, 4)],
165
+ dtype='object')
166
+ assert_raises(CustomException, np.einsum, "ij, j", a, b)
167
+
168
+ # raised from unbuffered_loop_nop2_ndim3
169
+ assert_raises(CustomException, np.einsum, "ij, jh", a, a)
170
+
171
+ # raised from PyArray_EinsteinSum
172
+ assert_raises(CustomException, np.einsum, "ij->", a)
173
+
174
+ def test_einsum_views(self):
175
+ # pass-through
176
+ for do_opt in [True, False]:
177
+ a = np.arange(6)
178
+ a.shape = (2, 3)
179
+
180
+ b = np.einsum("...", a, optimize=do_opt)
181
+ assert_(b.base is a)
182
+
183
+ b = np.einsum(a, [Ellipsis], optimize=do_opt)
184
+ assert_(b.base is a)
185
+
186
+ b = np.einsum("ij", a, optimize=do_opt)
187
+ assert_(b.base is a)
188
+ assert_equal(b, a)
189
+
190
+ b = np.einsum(a, [0, 1], optimize=do_opt)
191
+ assert_(b.base is a)
192
+ assert_equal(b, a)
193
+
194
+ # output is writeable whenever input is writeable
195
+ b = np.einsum("...", a, optimize=do_opt)
196
+ assert_(b.flags['WRITEABLE'])
197
+ a.flags['WRITEABLE'] = False
198
+ b = np.einsum("...", a, optimize=do_opt)
199
+ assert_(not b.flags['WRITEABLE'])
200
+
201
+ # transpose
202
+ a = np.arange(6)
203
+ a.shape = (2, 3)
204
+
205
+ b = np.einsum("ji", a, optimize=do_opt)
206
+ assert_(b.base is a)
207
+ assert_equal(b, a.T)
208
+
209
+ b = np.einsum(a, [1, 0], optimize=do_opt)
210
+ assert_(b.base is a)
211
+ assert_equal(b, a.T)
212
+
213
+ # diagonal
214
+ a = np.arange(9)
215
+ a.shape = (3, 3)
216
+
217
+ b = np.einsum("ii->i", a, optimize=do_opt)
218
+ assert_(b.base is a)
219
+ assert_equal(b, [a[i, i] for i in range(3)])
220
+
221
+ b = np.einsum(a, [0, 0], [0], optimize=do_opt)
222
+ assert_(b.base is a)
223
+ assert_equal(b, [a[i, i] for i in range(3)])
224
+
225
+ # diagonal with various ways of broadcasting an additional dimension
226
+ a = np.arange(27)
227
+ a.shape = (3, 3, 3)
228
+
229
+ b = np.einsum("...ii->...i", a, optimize=do_opt)
230
+ assert_(b.base is a)
231
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
232
+
233
+ b = np.einsum(a, [Ellipsis, 0, 0], [Ellipsis, 0], optimize=do_opt)
234
+ assert_(b.base is a)
235
+ assert_equal(b, [[x[i, i] for i in range(3)] for x in a])
236
+
237
+ b = np.einsum("ii...->...i", a, optimize=do_opt)
238
+ assert_(b.base is a)
239
+ assert_equal(b, [[x[i, i] for i in range(3)]
240
+ for x in a.transpose(2, 0, 1)])
241
+
242
+ b = np.einsum(a, [0, 0, Ellipsis], [Ellipsis, 0], optimize=do_opt)
243
+ assert_(b.base is a)
244
+ assert_equal(b, [[x[i, i] for i in range(3)]
245
+ for x in a.transpose(2, 0, 1)])
246
+
247
+ b = np.einsum("...ii->i...", a, optimize=do_opt)
248
+ assert_(b.base is a)
249
+ assert_equal(b, [a[:, i, i] for i in range(3)])
250
+
251
+ b = np.einsum(a, [Ellipsis, 0, 0], [0, Ellipsis], optimize=do_opt)
252
+ assert_(b.base is a)
253
+ assert_equal(b, [a[:, i, i] for i in range(3)])
254
+
255
+ b = np.einsum("jii->ij", a, optimize=do_opt)
256
+ assert_(b.base is a)
257
+ assert_equal(b, [a[:, i, i] for i in range(3)])
258
+
259
+ b = np.einsum(a, [1, 0, 0], [0, 1], optimize=do_opt)
260
+ assert_(b.base is a)
261
+ assert_equal(b, [a[:, i, i] for i in range(3)])
262
+
263
+ b = np.einsum("ii...->i...", a, optimize=do_opt)
264
+ assert_(b.base is a)
265
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
266
+
267
+ b = np.einsum(a, [0, 0, Ellipsis], [0, Ellipsis], optimize=do_opt)
268
+ assert_(b.base is a)
269
+ assert_equal(b, [a.transpose(2, 0, 1)[:, i, i] for i in range(3)])
270
+
271
+ b = np.einsum("i...i->i...", a, optimize=do_opt)
272
+ assert_(b.base is a)
273
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
274
+
275
+ b = np.einsum(a, [0, Ellipsis, 0], [0, Ellipsis], optimize=do_opt)
276
+ assert_(b.base is a)
277
+ assert_equal(b, [a.transpose(1, 0, 2)[:, i, i] for i in range(3)])
278
+
279
+ b = np.einsum("i...i->...i", a, optimize=do_opt)
280
+ assert_(b.base is a)
281
+ assert_equal(b, [[x[i, i] for i in range(3)]
282
+ for x in a.transpose(1, 0, 2)])
283
+
284
+ b = np.einsum(a, [0, Ellipsis, 0], [Ellipsis, 0], optimize=do_opt)
285
+ assert_(b.base is a)
286
+ assert_equal(b, [[x[i, i] for i in range(3)]
287
+ for x in a.transpose(1, 0, 2)])
288
+
289
+ # triple diagonal
290
+ a = np.arange(27)
291
+ a.shape = (3, 3, 3)
292
+
293
+ b = np.einsum("iii->i", a, optimize=do_opt)
294
+ assert_(b.base is a)
295
+ assert_equal(b, [a[i, i, i] for i in range(3)])
296
+
297
+ b = np.einsum(a, [0, 0, 0], [0], optimize=do_opt)
298
+ assert_(b.base is a)
299
+ assert_equal(b, [a[i, i, i] for i in range(3)])
300
+
301
+ # swap axes
302
+ a = np.arange(24)
303
+ a.shape = (2, 3, 4)
304
+
305
+ b = np.einsum("ijk->jik", a, optimize=do_opt)
306
+ assert_(b.base is a)
307
+ assert_equal(b, a.swapaxes(0, 1))
308
+
309
+ b = np.einsum(a, [0, 1, 2], [1, 0, 2], optimize=do_opt)
310
+ assert_(b.base is a)
311
+ assert_equal(b, a.swapaxes(0, 1))
312
+
313
+ @np._no_nep50_warning()
314
+ def check_einsum_sums(self, dtype, do_opt=False):
315
+ dtype = np.dtype(dtype)
316
+ # Check various sums. Does many sizes to exercise unrolled loops.
317
+
318
+ # sum(a, axis=-1)
319
+ for n in range(1, 17):
320
+ a = np.arange(n, dtype=dtype)
321
+ b = np.sum(a, axis=-1)
322
+ if hasattr(b, 'astype'):
323
+ b = b.astype(dtype)
324
+ assert_equal(np.einsum("i->", a, optimize=do_opt), b)
325
+ assert_equal(np.einsum(a, [0], [], optimize=do_opt), b)
326
+
327
+ for n in range(1, 17):
328
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
329
+ b = np.sum(a, axis=-1)
330
+ if hasattr(b, 'astype'):
331
+ b = b.astype(dtype)
332
+ assert_equal(np.einsum("...i->...", a, optimize=do_opt), b)
333
+ assert_equal(np.einsum(a, [Ellipsis, 0], [Ellipsis], optimize=do_opt), b)
334
+
335
+ # sum(a, axis=0)
336
+ for n in range(1, 17):
337
+ a = np.arange(2*n, dtype=dtype).reshape(2, n)
338
+ b = np.sum(a, axis=0)
339
+ if hasattr(b, 'astype'):
340
+ b = b.astype(dtype)
341
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
342
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
343
+
344
+ for n in range(1, 17):
345
+ a = np.arange(2*3*n, dtype=dtype).reshape(2, 3, n)
346
+ b = np.sum(a, axis=0)
347
+ if hasattr(b, 'astype'):
348
+ b = b.astype(dtype)
349
+ assert_equal(np.einsum("i...->...", a, optimize=do_opt), b)
350
+ assert_equal(np.einsum(a, [0, Ellipsis], [Ellipsis], optimize=do_opt), b)
351
+
352
+ # trace(a)
353
+ for n in range(1, 17):
354
+ a = np.arange(n*n, dtype=dtype).reshape(n, n)
355
+ b = np.trace(a)
356
+ if hasattr(b, 'astype'):
357
+ b = b.astype(dtype)
358
+ assert_equal(np.einsum("ii", a, optimize=do_opt), b)
359
+ assert_equal(np.einsum(a, [0, 0], optimize=do_opt), b)
360
+
361
+ # gh-15961: should accept numpy int64 type in subscript list
362
+ np_array = np.asarray([0, 0])
363
+ assert_equal(np.einsum(a, np_array, optimize=do_opt), b)
364
+ assert_equal(np.einsum(a, list(np_array), optimize=do_opt), b)
365
+
366
+ # multiply(a, b)
367
+ assert_equal(np.einsum("..., ...", 3, 4), 12) # scalar case
368
+ for n in range(1, 17):
369
+ a = np.arange(3 * n, dtype=dtype).reshape(3, n)
370
+ b = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
371
+ assert_equal(np.einsum("..., ...", a, b, optimize=do_opt),
372
+ np.multiply(a, b))
373
+ assert_equal(np.einsum(a, [Ellipsis], b, [Ellipsis], optimize=do_opt),
374
+ np.multiply(a, b))
375
+
376
+ # inner(a,b)
377
+ for n in range(1, 17):
378
+ a = np.arange(2 * 3 * n, dtype=dtype).reshape(2, 3, n)
379
+ b = np.arange(n, dtype=dtype)
380
+ assert_equal(np.einsum("...i, ...i", a, b, optimize=do_opt), np.inner(a, b))
381
+ assert_equal(np.einsum(a, [Ellipsis, 0], b, [Ellipsis, 0], optimize=do_opt),
382
+ np.inner(a, b))
383
+
384
+ for n in range(1, 11):
385
+ a = np.arange(n * 3 * 2, dtype=dtype).reshape(n, 3, 2)
386
+ b = np.arange(n, dtype=dtype)
387
+ assert_equal(np.einsum("i..., i...", a, b, optimize=do_opt),
388
+ np.inner(a.T, b.T).T)
389
+ assert_equal(np.einsum(a, [0, Ellipsis], b, [0, Ellipsis], optimize=do_opt),
390
+ np.inner(a.T, b.T).T)
391
+
392
+ # outer(a,b)
393
+ for n in range(1, 17):
394
+ a = np.arange(3, dtype=dtype)+1
395
+ b = np.arange(n, dtype=dtype)+1
396
+ assert_equal(np.einsum("i,j", a, b, optimize=do_opt),
397
+ np.outer(a, b))
398
+ assert_equal(np.einsum(a, [0], b, [1], optimize=do_opt),
399
+ np.outer(a, b))
400
+
401
+ # Suppress the complex warnings for the 'as f8' tests
402
+ with suppress_warnings() as sup:
403
+ sup.filter(np.ComplexWarning)
404
+
405
+ # matvec(a,b) / a.dot(b) where a is matrix, b is vector
406
+ for n in range(1, 17):
407
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
408
+ b = np.arange(n, dtype=dtype)
409
+ assert_equal(np.einsum("ij, j", a, b, optimize=do_opt),
410
+ np.dot(a, b))
411
+ assert_equal(np.einsum(a, [0, 1], b, [1], optimize=do_opt),
412
+ np.dot(a, b))
413
+
414
+ c = np.arange(4, dtype=dtype)
415
+ np.einsum("ij,j", a, b, out=c,
416
+ dtype='f8', casting='unsafe', optimize=do_opt)
417
+ assert_equal(c,
418
+ np.dot(a.astype('f8'),
419
+ b.astype('f8')).astype(dtype))
420
+ c[...] = 0
421
+ np.einsum(a, [0, 1], b, [1], out=c,
422
+ dtype='f8', casting='unsafe', optimize=do_opt)
423
+ assert_equal(c,
424
+ np.dot(a.astype('f8'),
425
+ b.astype('f8')).astype(dtype))
426
+
427
+ for n in range(1, 17):
428
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
429
+ b = np.arange(n, dtype=dtype)
430
+ assert_equal(np.einsum("ji,j", a.T, b.T, optimize=do_opt),
431
+ np.dot(b.T, a.T))
432
+ assert_equal(np.einsum(a.T, [1, 0], b.T, [1], optimize=do_opt),
433
+ np.dot(b.T, a.T))
434
+
435
+ c = np.arange(4, dtype=dtype)
436
+ np.einsum("ji,j", a.T, b.T, out=c,
437
+ dtype='f8', casting='unsafe', optimize=do_opt)
438
+ assert_equal(c,
439
+ np.dot(b.T.astype('f8'),
440
+ a.T.astype('f8')).astype(dtype))
441
+ c[...] = 0
442
+ np.einsum(a.T, [1, 0], b.T, [1], out=c,
443
+ dtype='f8', casting='unsafe', optimize=do_opt)
444
+ assert_equal(c,
445
+ np.dot(b.T.astype('f8'),
446
+ a.T.astype('f8')).astype(dtype))
447
+
448
+ # matmat(a,b) / a.dot(b) where a is matrix, b is matrix
449
+ for n in range(1, 17):
450
+ if n < 8 or dtype != 'f2':
451
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
452
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
453
+ assert_equal(np.einsum("ij,jk", a, b, optimize=do_opt),
454
+ np.dot(a, b))
455
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], optimize=do_opt),
456
+ np.dot(a, b))
457
+
458
+ for n in range(1, 17):
459
+ a = np.arange(4*n, dtype=dtype).reshape(4, n)
460
+ b = np.arange(n*6, dtype=dtype).reshape(n, 6)
461
+ c = np.arange(24, dtype=dtype).reshape(4, 6)
462
+ np.einsum("ij,jk", a, b, out=c, dtype='f8', casting='unsafe',
463
+ optimize=do_opt)
464
+ assert_equal(c,
465
+ np.dot(a.astype('f8'),
466
+ b.astype('f8')).astype(dtype))
467
+ c[...] = 0
468
+ np.einsum(a, [0, 1], b, [1, 2], out=c,
469
+ dtype='f8', casting='unsafe', optimize=do_opt)
470
+ assert_equal(c,
471
+ np.dot(a.astype('f8'),
472
+ b.astype('f8')).astype(dtype))
473
+
474
+ # matrix triple product (note this is not currently an efficient
475
+ # way to multiply 3 matrices)
476
+ a = np.arange(12, dtype=dtype).reshape(3, 4)
477
+ b = np.arange(20, dtype=dtype).reshape(4, 5)
478
+ c = np.arange(30, dtype=dtype).reshape(5, 6)
479
+ if dtype != 'f2':
480
+ assert_equal(np.einsum("ij,jk,kl", a, b, c, optimize=do_opt),
481
+ a.dot(b).dot(c))
482
+ assert_equal(np.einsum(a, [0, 1], b, [1, 2], c, [2, 3],
483
+ optimize=do_opt), a.dot(b).dot(c))
484
+
485
+ d = np.arange(18, dtype=dtype).reshape(3, 6)
486
+ np.einsum("ij,jk,kl", a, b, c, out=d,
487
+ dtype='f8', casting='unsafe', optimize=do_opt)
488
+ tgt = a.astype('f8').dot(b.astype('f8'))
489
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
490
+ assert_equal(d, tgt)
491
+
492
+ d[...] = 0
493
+ np.einsum(a, [0, 1], b, [1, 2], c, [2, 3], out=d,
494
+ dtype='f8', casting='unsafe', optimize=do_opt)
495
+ tgt = a.astype('f8').dot(b.astype('f8'))
496
+ tgt = tgt.dot(c.astype('f8')).astype(dtype)
497
+ assert_equal(d, tgt)
498
+
499
+ # tensordot(a, b)
500
+ if np.dtype(dtype) != np.dtype('f2'):
501
+ a = np.arange(60, dtype=dtype).reshape(3, 4, 5)
502
+ b = np.arange(24, dtype=dtype).reshape(4, 3, 2)
503
+ assert_equal(np.einsum("ijk, jil -> kl", a, b),
504
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
505
+ assert_equal(np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3]),
506
+ np.tensordot(a, b, axes=([1, 0], [0, 1])))
507
+
508
+ c = np.arange(10, dtype=dtype).reshape(5, 2)
509
+ np.einsum("ijk,jil->kl", a, b, out=c,
510
+ dtype='f8', casting='unsafe', optimize=do_opt)
511
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
512
+ axes=([1, 0], [0, 1])).astype(dtype))
513
+ c[...] = 0
514
+ np.einsum(a, [0, 1, 2], b, [1, 0, 3], [2, 3], out=c,
515
+ dtype='f8', casting='unsafe', optimize=do_opt)
516
+ assert_equal(c, np.tensordot(a.astype('f8'), b.astype('f8'),
517
+ axes=([1, 0], [0, 1])).astype(dtype))
518
+
519
+ # logical_and(logical_and(a!=0, b!=0), c!=0)
520
+ neg_val = -2 if dtype.kind != "u" else np.iinfo(dtype).max - 1
521
+ a = np.array([1, 3, neg_val, 0, 12, 13, 0, 1], dtype=dtype)
522
+ b = np.array([0, 3.5, 0., neg_val, 0, 1, 3, 12], dtype=dtype)
523
+ c = np.array([True, True, False, True, True, False, True, True])
524
+
525
+ assert_equal(np.einsum("i,i,i->i", a, b, c,
526
+ dtype='?', casting='unsafe', optimize=do_opt),
527
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
528
+ assert_equal(np.einsum(a, [0], b, [0], c, [0], [0],
529
+ dtype='?', casting='unsafe'),
530
+ np.logical_and(np.logical_and(a != 0, b != 0), c != 0))
531
+
532
+ a = np.arange(9, dtype=dtype)
533
+ assert_equal(np.einsum(",i->", 3, a), 3*np.sum(a))
534
+ assert_equal(np.einsum(3, [], a, [0], []), 3*np.sum(a))
535
+ assert_equal(np.einsum("i,->", a, 3), 3*np.sum(a))
536
+ assert_equal(np.einsum(a, [0], 3, [], []), 3*np.sum(a))
537
+
538
+ # Various stride0, contiguous, and SSE aligned variants
539
+ for n in range(1, 25):
540
+ a = np.arange(n, dtype=dtype)
541
+ if np.dtype(dtype).itemsize > 1:
542
+ assert_equal(np.einsum("...,...", a, a, optimize=do_opt),
543
+ np.multiply(a, a))
544
+ assert_equal(np.einsum("i,i", a, a, optimize=do_opt), np.dot(a, a))
545
+ assert_equal(np.einsum("i,->i", a, 2, optimize=do_opt), 2*a)
546
+ assert_equal(np.einsum(",i->i", 2, a, optimize=do_opt), 2*a)
547
+ assert_equal(np.einsum("i,->", a, 2, optimize=do_opt), 2*np.sum(a))
548
+ assert_equal(np.einsum(",i->", 2, a, optimize=do_opt), 2*np.sum(a))
549
+
550
+ assert_equal(np.einsum("...,...", a[1:], a[:-1], optimize=do_opt),
551
+ np.multiply(a[1:], a[:-1]))
552
+ assert_equal(np.einsum("i,i", a[1:], a[:-1], optimize=do_opt),
553
+ np.dot(a[1:], a[:-1]))
554
+ assert_equal(np.einsum("i,->i", a[1:], 2, optimize=do_opt), 2*a[1:])
555
+ assert_equal(np.einsum(",i->i", 2, a[1:], optimize=do_opt), 2*a[1:])
556
+ assert_equal(np.einsum("i,->", a[1:], 2, optimize=do_opt),
557
+ 2*np.sum(a[1:]))
558
+ assert_equal(np.einsum(",i->", 2, a[1:], optimize=do_opt),
559
+ 2*np.sum(a[1:]))
560
+
561
+ # An object array, summed as the data type
562
+ a = np.arange(9, dtype=object)
563
+
564
+ b = np.einsum("i->", a, dtype=dtype, casting='unsafe')
565
+ assert_equal(b, np.sum(a))
566
+ if hasattr(b, "dtype"):
567
+ # Can be a python object when dtype is object
568
+ assert_equal(b.dtype, np.dtype(dtype))
569
+
570
+ b = np.einsum(a, [0], [], dtype=dtype, casting='unsafe')
571
+ assert_equal(b, np.sum(a))
572
+ if hasattr(b, "dtype"):
573
+ # Can be a python object when dtype is object
574
+ assert_equal(b.dtype, np.dtype(dtype))
575
+
576
+ # A case which was failing (ticket #1885)
577
+ p = np.arange(2) + 1
578
+ q = np.arange(4).reshape(2, 2) + 3
579
+ r = np.arange(4).reshape(2, 2) + 7
580
+ assert_equal(np.einsum('z,mz,zm->', p, q, r), 253)
581
+
582
+ # singleton dimensions broadcast (gh-10343)
583
+ p = np.ones((10,2))
584
+ q = np.ones((1,2))
585
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
586
+ np.einsum('ij,ij->j', p, q, optimize=False))
587
+ assert_array_equal(np.einsum('ij,ij->j', p, q, optimize=True),
588
+ [10.] * 2)
589
+
590
+ # a blas-compatible contraction broadcasting case which was failing
591
+ # for optimize=True (ticket #10930)
592
+ x = np.array([2., 3.])
593
+ y = np.array([4.])
594
+ assert_array_equal(np.einsum("i, i", x, y, optimize=False), 20.)
595
+ assert_array_equal(np.einsum("i, i", x, y, optimize=True), 20.)
596
+
597
+ # all-ones array was bypassing bug (ticket #10930)
598
+ p = np.ones((1, 5)) / 2
599
+ q = np.ones((5, 5)) / 2
600
+ for optimize in (True, False):
601
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, p,
602
+ optimize=optimize),
603
+ np.einsum("...ij,...jk->...ik", p, q,
604
+ optimize=optimize))
605
+ assert_array_equal(np.einsum("...ij,...jk->...ik", p, q,
606
+ optimize=optimize),
607
+ np.full((1, 5), 1.25))
608
+
609
+ # Cases which were failing (gh-10899)
610
+ x = np.eye(2, dtype=dtype)
611
+ y = np.ones(2, dtype=dtype)
612
+ assert_array_equal(np.einsum("ji,i->", x, y, optimize=optimize),
613
+ [2.]) # contig_contig_outstride0_two
614
+ assert_array_equal(np.einsum("i,ij->", y, x, optimize=optimize),
615
+ [2.]) # stride0_contig_outstride0_two
616
+ assert_array_equal(np.einsum("ij,i->", x, y, optimize=optimize),
617
+ [2.]) # contig_stride0_outstride0_two
618
+
619
+ def test_einsum_sums_int8(self):
620
+ if (
621
+ (sys.platform == 'darwin' and platform.machine() == 'x86_64')
622
+ or
623
+ USING_CLANG_CL
624
+ ):
625
+ pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
626
+ 'with Meson, see gh-23838')
627
+ self.check_einsum_sums('i1')
628
+
629
+ def test_einsum_sums_uint8(self):
630
+ if (
631
+ (sys.platform == 'darwin' and platform.machine() == 'x86_64')
632
+ or
633
+ USING_CLANG_CL
634
+ ):
635
+ pytest.xfail('Fails on macOS x86-64 and when using clang-cl '
636
+ 'with Meson, see gh-23838')
637
+ self.check_einsum_sums('u1')
638
+
639
+ def test_einsum_sums_int16(self):
640
+ self.check_einsum_sums('i2')
641
+
642
+ def test_einsum_sums_uint16(self):
643
+ self.check_einsum_sums('u2')
644
+
645
+ def test_einsum_sums_int32(self):
646
+ self.check_einsum_sums('i4')
647
+ self.check_einsum_sums('i4', True)
648
+
649
+ def test_einsum_sums_uint32(self):
650
+ self.check_einsum_sums('u4')
651
+ self.check_einsum_sums('u4', True)
652
+
653
+ def test_einsum_sums_int64(self):
654
+ self.check_einsum_sums('i8')
655
+
656
+ def test_einsum_sums_uint64(self):
657
+ self.check_einsum_sums('u8')
658
+
659
+ def test_einsum_sums_float16(self):
660
+ self.check_einsum_sums('f2')
661
+
662
+ def test_einsum_sums_float32(self):
663
+ self.check_einsum_sums('f4')
664
+
665
+ def test_einsum_sums_float64(self):
666
+ self.check_einsum_sums('f8')
667
+ self.check_einsum_sums('f8', True)
668
+
669
+ def test_einsum_sums_longdouble(self):
670
+ self.check_einsum_sums(np.longdouble)
671
+
672
+ def test_einsum_sums_cfloat64(self):
673
+ self.check_einsum_sums('c8')
674
+ self.check_einsum_sums('c8', True)
675
+
676
+ def test_einsum_sums_cfloat128(self):
677
+ self.check_einsum_sums('c16')
678
+
679
+ def test_einsum_sums_clongdouble(self):
680
+ self.check_einsum_sums(np.clongdouble)
681
+
682
+ def test_einsum_sums_object(self):
683
+ self.check_einsum_sums('object')
684
+ self.check_einsum_sums('object', True)
685
+
686
+ def test_einsum_misc(self):
687
+ # This call used to crash because of a bug in
688
+ # PyArray_AssignZero
689
+ a = np.ones((1, 2))
690
+ b = np.ones((2, 2, 1))
691
+ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
692
+ assert_equal(np.einsum('ij...,j...->i...', a, b, optimize=True), [[[2], [2]]])
693
+
694
+ # Regression test for issue #10369 (test unicode inputs with Python 2)
695
+ assert_equal(np.einsum('ij...,j...->i...', a, b), [[[2], [2]]])
696
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4]), 20)
697
+ assert_equal(np.einsum('...i,...i', [1, 2, 3], [2, 3, 4],
698
+ optimize='greedy'), 20)
699
+
700
+ # The iterator had an issue with buffering this reduction
701
+ a = np.ones((5, 12, 4, 2, 3), np.int64)
702
+ b = np.ones((5, 12, 11), np.int64)
703
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b),
704
+ np.einsum('ijklm,ijn->', a, b))
705
+ assert_equal(np.einsum('ijklm,ijn,ijn->', a, b, b, optimize=True),
706
+ np.einsum('ijklm,ijn->', a, b, optimize=True))
707
+
708
+ # Issue #2027, was a problem in the contiguous 3-argument
709
+ # inner loop implementation
710
+ a = np.arange(1, 3)
711
+ b = np.arange(1, 5).reshape(2, 2)
712
+ c = np.arange(1, 9).reshape(4, 2)
713
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c),
714
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
715
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
716
+ assert_equal(np.einsum('x,yx,zx->xzy', a, b, c, optimize=True),
717
+ [[[1, 3], [3, 9], [5, 15], [7, 21]],
718
+ [[8, 16], [16, 32], [24, 48], [32, 64]]])
719
+
720
+ # Ensure explicitly setting out=None does not cause an error
721
+ # see issue gh-15776 and issue gh-15256
722
+ assert_equal(np.einsum('i,j', [1], [2], out=None), [[2]])
723
+
724
+ def test_object_loop(self):
725
+
726
+ class Mult:
727
+ def __mul__(self, other):
728
+ return 42
729
+
730
+ objMult = np.array([Mult()])
731
+ objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object)
732
+
733
+ with pytest.raises(TypeError):
734
+ np.einsum("i,j", [1], objNULL)
735
+ with pytest.raises(TypeError):
736
+ np.einsum("i,j", objNULL, [1])
737
+ assert np.einsum("i,j", objMult, objMult) == 42
738
+
739
+ def test_subscript_range(self):
740
+ # Issue #7741, make sure that all letters of Latin alphabet (both uppercase & lowercase) can be used
741
+ # when creating a subscript from arrays
742
+ a = np.ones((2, 3))
743
+ b = np.ones((3, 4))
744
+ np.einsum(a, [0, 20], b, [20, 2], [0, 2], optimize=False)
745
+ np.einsum(a, [0, 27], b, [27, 2], [0, 2], optimize=False)
746
+ np.einsum(a, [0, 51], b, [51, 2], [0, 2], optimize=False)
747
+ assert_raises(ValueError, lambda: np.einsum(a, [0, 52], b, [52, 2], [0, 2], optimize=False))
748
+ assert_raises(ValueError, lambda: np.einsum(a, [-1, 5], b, [5, 2], [-1, 2], optimize=False))
749
+
750
+ def test_einsum_broadcast(self):
751
+ # Issue #2455 change in handling ellipsis
752
+ # remove the 'middle broadcast' error
753
+ # only use the 'RIGHT' iteration in prepare_op_axes
754
+ # adds auto broadcast on left where it belongs
755
+ # broadcast on right has to be explicit
756
+ # We need to test the optimized parsing as well
757
+
758
+ A = np.arange(2 * 3 * 4).reshape(2, 3, 4)
759
+ B = np.arange(3)
760
+ ref = np.einsum('ijk,j->ijk', A, B, optimize=False)
761
+ for opt in [True, False]:
762
+ assert_equal(np.einsum('ij...,j...->ij...', A, B, optimize=opt), ref)
763
+ assert_equal(np.einsum('ij...,...j->ij...', A, B, optimize=opt), ref)
764
+ assert_equal(np.einsum('ij...,j->ij...', A, B, optimize=opt), ref) # used to raise error
765
+
766
+ A = np.arange(12).reshape((4, 3))
767
+ B = np.arange(6).reshape((3, 2))
768
+ ref = np.einsum('ik,kj->ij', A, B, optimize=False)
769
+ for opt in [True, False]:
770
+ assert_equal(np.einsum('ik...,k...->i...', A, B, optimize=opt), ref)
771
+ assert_equal(np.einsum('ik...,...kj->i...j', A, B, optimize=opt), ref)
772
+ assert_equal(np.einsum('...k,kj', A, B, optimize=opt), ref) # used to raise error
773
+ assert_equal(np.einsum('ik,k...->i...', A, B, optimize=opt), ref) # used to raise error
774
+
775
+ dims = [2, 3, 4, 5]
776
+ a = np.arange(np.prod(dims)).reshape(dims)
777
+ v = np.arange(dims[2])
778
+ ref = np.einsum('ijkl,k->ijl', a, v, optimize=False)
779
+ for opt in [True, False]:
780
+ assert_equal(np.einsum('ijkl,k', a, v, optimize=opt), ref)
781
+ assert_equal(np.einsum('...kl,k', a, v, optimize=opt), ref) # used to raise error
782
+ assert_equal(np.einsum('...kl,k...', a, v, optimize=opt), ref)
783
+
784
+ J, K, M = 160, 160, 120
785
+ A = np.arange(J * K * M).reshape(1, 1, 1, J, K, M)
786
+ B = np.arange(J * K * M * 3).reshape(J, K, M, 3)
787
+ ref = np.einsum('...lmn,...lmno->...o', A, B, optimize=False)
788
+ for opt in [True, False]:
789
+ assert_equal(np.einsum('...lmn,lmno->...o', A, B,
790
+ optimize=opt), ref) # used to raise error
791
+
792
+ def test_einsum_fixedstridebug(self):
793
+ # Issue #4485 obscure einsum bug
794
+ # This case revealed a bug in nditer where it reported a stride
795
+ # as 'fixed' (0) when it was in fact not fixed during processing
796
+ # (0 or 4). The reason for the bug was that the check for a fixed
797
+ # stride was using the information from the 2D inner loop reuse
798
+ # to restrict the iteration dimensions it had to validate to be
799
+ # the same, but that 2D inner loop reuse logic is only triggered
800
+ # during the buffer copying step, and hence it was invalid to
801
+ # rely on those values. The fix is to check all the dimensions
802
+ # of the stride in question, which in the test case reveals that
803
+ # the stride is not fixed.
804
+ #
805
+ # NOTE: This test is triggered by the fact that the default buffersize,
806
+ # used by einsum, is 8192, and 3*2731 = 8193, is larger than that
807
+ # and results in a mismatch between the buffering and the
808
+ # striding for operand A.
809
+ A = np.arange(2 * 3).reshape(2, 3).astype(np.float32)
810
+ B = np.arange(2 * 3 * 2731).reshape(2, 3, 2731).astype(np.int16)
811
+ es = np.einsum('cl, cpx->lpx', A, B)
812
+ tp = np.tensordot(A, B, axes=(0, 0))
813
+ assert_equal(es, tp)
814
+ # The following is the original test case from the bug report,
815
+ # made repeatable by changing random arrays to aranges.
816
+ A = np.arange(3 * 3).reshape(3, 3).astype(np.float64)
817
+ B = np.arange(3 * 3 * 64 * 64).reshape(3, 3, 64, 64).astype(np.float32)
818
+ es = np.einsum('cl, cpxy->lpxy', A, B)
819
+ tp = np.tensordot(A, B, axes=(0, 0))
820
+ assert_equal(es, tp)
821
+
822
+ def test_einsum_fixed_collapsingbug(self):
823
+ # Issue #5147.
824
+ # The bug only occurred when output argument of einssum was used.
825
+ x = np.random.normal(0, 1, (5, 5, 5, 5))
826
+ y1 = np.zeros((5, 5))
827
+ np.einsum('aabb->ab', x, out=y1)
828
+ idx = np.arange(5)
829
+ y2 = x[idx[:, None], idx[:, None], idx, idx]
830
+ assert_equal(y1, y2)
831
+
832
+ def test_einsum_failed_on_p9_and_s390x(self):
833
+ # Issues gh-14692 and gh-12689
834
+ # Bug with signed vs unsigned char errored on power9 and s390x Linux
835
+ tensor = np.random.random_sample((10, 10, 10, 10))
836
+ x = np.einsum('ijij->', tensor)
837
+ y = tensor.trace(axis1=0, axis2=2).trace()
838
+ assert_allclose(x, y)
839
+
840
+ def test_einsum_all_contig_non_contig_output(self):
841
+ # Issue gh-5907, tests that the all contiguous special case
842
+ # actually checks the contiguity of the output
843
+ x = np.ones((5, 5))
844
+ out = np.ones(10)[::2]
845
+ correct_base = np.ones(10)
846
+ correct_base[::2] = 5
847
+ # Always worked (inner iteration is done with 0-stride):
848
+ np.einsum('mi,mi,mi->m', x, x, x, out=out)
849
+ assert_array_equal(out.base, correct_base)
850
+ # Example 1:
851
+ out = np.ones(10)[::2]
852
+ np.einsum('im,im,im->m', x, x, x, out=out)
853
+ assert_array_equal(out.base, correct_base)
854
+ # Example 2, buffering causes x to be contiguous but
855
+ # special cases do not catch the operation before:
856
+ out = np.ones((2, 2, 2))[..., 0]
857
+ correct_base = np.ones((2, 2, 2))
858
+ correct_base[..., 0] = 2
859
+ x = np.ones((2, 2), np.float32)
860
+ np.einsum('ij,jk->ik', x, x, out=out)
861
+ assert_array_equal(out.base, correct_base)
862
+
863
+ @pytest.mark.parametrize("dtype",
864
+ np.typecodes["AllFloat"] + np.typecodes["AllInteger"])
865
+ def test_different_paths(self, dtype):
866
+ # Test originally added to cover broken float16 path: gh-20305
867
+ # Likely most are covered elsewhere, at least partially.
868
+ dtype = np.dtype(dtype)
869
+ # Simple test, designed to exercise most specialized code paths,
870
+ # note the +0.5 for floats. This makes sure we use a float value
871
+ # where the results must be exact.
872
+ arr = (np.arange(7) + 0.5).astype(dtype)
873
+ scalar = np.array(2, dtype=dtype)
874
+
875
+ # contig -> scalar:
876
+ res = np.einsum('i->', arr)
877
+ assert res == arr.sum()
878
+ # contig, contig -> contig:
879
+ res = np.einsum('i,i->i', arr, arr)
880
+ assert_array_equal(res, arr * arr)
881
+ # noncontig, noncontig -> contig:
882
+ res = np.einsum('i,i->i', arr.repeat(2)[::2], arr.repeat(2)[::2])
883
+ assert_array_equal(res, arr * arr)
884
+ # contig + contig -> scalar
885
+ assert np.einsum('i,i->', arr, arr) == (arr * arr).sum()
886
+ # contig + scalar -> contig (with out)
887
+ out = np.ones(7, dtype=dtype)
888
+ res = np.einsum('i,->i', arr, dtype.type(2), out=out)
889
+ assert_array_equal(res, arr * dtype.type(2))
890
+ # scalar + contig -> contig (with out)
891
+ res = np.einsum(',i->i', scalar, arr)
892
+ assert_array_equal(res, arr * dtype.type(2))
893
+ # scalar + contig -> scalar
894
+ res = np.einsum(',i->', scalar, arr)
895
+ # Use einsum to compare to not have difference due to sum round-offs:
896
+ assert res == np.einsum('i->', scalar * arr)
897
+ # contig + scalar -> scalar
898
+ res = np.einsum('i,->', arr, scalar)
899
+ # Use einsum to compare to not have difference due to sum round-offs:
900
+ assert res == np.einsum('i->', scalar * arr)
901
+ # contig + contig + contig -> scalar
902
+ arr = np.array([0.5, 0.5, 0.25, 4.5, 3.], dtype=dtype)
903
+ res = np.einsum('i,i,i->', arr, arr, arr)
904
+ assert_array_equal(res, (arr * arr * arr).sum())
905
+ # four arrays:
906
+ res = np.einsum('i,i,i,i->', arr, arr, arr, arr)
907
+ assert_array_equal(res, (arr * arr * arr * arr).sum())
908
+
909
+ def test_small_boolean_arrays(self):
910
+ # See gh-5946.
911
+ # Use array of True embedded in False.
912
+ a = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
913
+ a[...] = True
914
+ out = np.zeros((16, 1, 1), dtype=np.bool_)[:2]
915
+ tgt = np.ones((2, 1, 1), dtype=np.bool_)
916
+ res = np.einsum('...ij,...jk->...ik', a, a, out=out)
917
+ assert_equal(res, tgt)
918
+
919
+ def test_out_is_res(self):
920
+ a = np.arange(9).reshape(3, 3)
921
+ res = np.einsum('...ij,...jk->...ik', a, a, out=a)
922
+ assert res is a
923
+
924
+ def optimize_compare(self, subscripts, operands=None):
925
+ # Tests all paths of the optimization function against
926
+ # conventional einsum
927
+ if operands is None:
928
+ args = [subscripts]
929
+ terms = subscripts.split('->')[0].split(',')
930
+ for term in terms:
931
+ dims = [global_size_dict[x] for x in term]
932
+ args.append(np.random.rand(*dims))
933
+ else:
934
+ args = [subscripts] + operands
935
+
936
+ noopt = np.einsum(*args, optimize=False)
937
+ opt = np.einsum(*args, optimize='greedy')
938
+ assert_almost_equal(opt, noopt)
939
+ opt = np.einsum(*args, optimize='optimal')
940
+ assert_almost_equal(opt, noopt)
941
+
942
+ def test_hadamard_like_products(self):
943
+ # Hadamard outer products
944
+ self.optimize_compare('a,ab,abc->abc')
945
+ self.optimize_compare('a,b,ab->ab')
946
+
947
+ def test_index_transformations(self):
948
+ # Simple index transformation cases
949
+ self.optimize_compare('ea,fb,gc,hd,abcd->efgh')
950
+ self.optimize_compare('ea,fb,abcd,gc,hd->efgh')
951
+ self.optimize_compare('abcd,ea,fb,gc,hd->efgh')
952
+
953
+ def test_complex(self):
954
+ # Long test cases
955
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
956
+ self.optimize_compare('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
957
+ self.optimize_compare('cd,bdhe,aidb,hgca,gc,hgibcd,hgac')
958
+ self.optimize_compare('abhe,hidj,jgba,hiab,gab')
959
+ self.optimize_compare('bde,cdh,agdb,hica,ibd,hgicd,hiac')
960
+ self.optimize_compare('chd,bde,agbc,hiad,hgc,hgi,hiad')
961
+ self.optimize_compare('chd,bde,agbc,hiad,bdi,cgh,agdb')
962
+ self.optimize_compare('bdhe,acad,hiab,agac,hibd')
963
+
964
+ def test_collapse(self):
965
+ # Inner products
966
+ self.optimize_compare('ab,ab,c->')
967
+ self.optimize_compare('ab,ab,c->c')
968
+ self.optimize_compare('ab,ab,cd,cd->')
969
+ self.optimize_compare('ab,ab,cd,cd->ac')
970
+ self.optimize_compare('ab,ab,cd,cd->cd')
971
+ self.optimize_compare('ab,ab,cd,cd,ef,ef->')
972
+
973
+ def test_expand(self):
974
+ # Outer products
975
+ self.optimize_compare('ab,cd,ef->abcdef')
976
+ self.optimize_compare('ab,cd,ef->acdf')
977
+ self.optimize_compare('ab,cd,de->abcde')
978
+ self.optimize_compare('ab,cd,de->be')
979
+ self.optimize_compare('ab,bcd,cd->abcd')
980
+ self.optimize_compare('ab,bcd,cd->abd')
981
+
982
+ def test_edge_cases(self):
983
+ # Difficult edge cases for optimization
984
+ self.optimize_compare('eb,cb,fb->cef')
985
+ self.optimize_compare('dd,fb,be,cdb->cef')
986
+ self.optimize_compare('bca,cdb,dbf,afc->')
987
+ self.optimize_compare('dcc,fce,ea,dbf->ab')
988
+ self.optimize_compare('fdf,cdd,ccd,afe->ae')
989
+ self.optimize_compare('abcd,ad')
990
+ self.optimize_compare('ed,fcd,ff,bcf->be')
991
+ self.optimize_compare('baa,dcf,af,cde->be')
992
+ self.optimize_compare('bd,db,eac->ace')
993
+ self.optimize_compare('fff,fae,bef,def->abd')
994
+ self.optimize_compare('efc,dbc,acf,fd->abe')
995
+ self.optimize_compare('ba,ac,da->bcd')
996
+
997
+ def test_inner_product(self):
998
+ # Inner products
999
+ self.optimize_compare('ab,ab')
1000
+ self.optimize_compare('ab,ba')
1001
+ self.optimize_compare('abc,abc')
1002
+ self.optimize_compare('abc,bac')
1003
+ self.optimize_compare('abc,cba')
1004
+
1005
+ def test_random_cases(self):
1006
+ # Randomly built test cases
1007
+ self.optimize_compare('aab,fa,df,ecc->bde')
1008
+ self.optimize_compare('ecb,fef,bad,ed->ac')
1009
+ self.optimize_compare('bcf,bbb,fbf,fc->')
1010
+ self.optimize_compare('bb,ff,be->e')
1011
+ self.optimize_compare('bcb,bb,fc,fff->')
1012
+ self.optimize_compare('fbb,dfd,fc,fc->')
1013
+ self.optimize_compare('afd,ba,cc,dc->bf')
1014
+ self.optimize_compare('adb,bc,fa,cfc->d')
1015
+ self.optimize_compare('bbd,bda,fc,db->acf')
1016
+ self.optimize_compare('dba,ead,cad->bce')
1017
+ self.optimize_compare('aef,fbc,dca->bde')
1018
+
1019
+ def test_combined_views_mapping(self):
1020
+ # gh-10792
1021
+ a = np.arange(9).reshape(1, 1, 3, 1, 3)
1022
+ b = np.einsum('bbcdc->d', a)
1023
+ assert_equal(b, [12])
1024
+
1025
+ def test_broadcasting_dot_cases(self):
1026
+ # Ensures broadcasting cases are not mistaken for GEMM
1027
+
1028
+ a = np.random.rand(1, 5, 4)
1029
+ b = np.random.rand(4, 6)
1030
+ c = np.random.rand(5, 6)
1031
+ d = np.random.rand(10)
1032
+
1033
+ self.optimize_compare('ijk,kl,jl', operands=[a, b, c])
1034
+ self.optimize_compare('ijk,kl,jl,i->i', operands=[a, b, c, d])
1035
+
1036
+ e = np.random.rand(1, 1, 5, 4)
1037
+ f = np.random.rand(7, 7)
1038
+ self.optimize_compare('abjk,kl,jl', operands=[e, b, c])
1039
+ self.optimize_compare('abjk,kl,jl,ab->ab', operands=[e, b, c, f])
1040
+
1041
+ # Edge case found in gh-11308
1042
+ g = np.arange(64).reshape(2, 4, 8)
1043
+ self.optimize_compare('obk,ijk->ioj', operands=[g, g])
1044
+
1045
+ def test_output_order(self):
1046
+ # Ensure output order is respected for optimize cases, the below
1047
+ # conraction should yield a reshaped tensor view
1048
+ # gh-16415
1049
+
1050
+ a = np.ones((2, 3, 5), order='F')
1051
+ b = np.ones((4, 3), order='F')
1052
+
1053
+ for opt in [True, False]:
1054
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='a', optimize=opt)
1055
+ assert_(tmp.flags.f_contiguous)
1056
+
1057
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='f', optimize=opt)
1058
+ assert_(tmp.flags.f_contiguous)
1059
+
1060
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='c', optimize=opt)
1061
+ assert_(tmp.flags.c_contiguous)
1062
+
1063
+ tmp = np.einsum('...ft,mf->...mt', a, b, order='k', optimize=opt)
1064
+ assert_(tmp.flags.c_contiguous is False)
1065
+ assert_(tmp.flags.f_contiguous is False)
1066
+
1067
+ tmp = np.einsum('...ft,mf->...mt', a, b, optimize=opt)
1068
+ assert_(tmp.flags.c_contiguous is False)
1069
+ assert_(tmp.flags.f_contiguous is False)
1070
+
1071
+ c = np.ones((4, 3), order='C')
1072
+ for opt in [True, False]:
1073
+ tmp = np.einsum('...ft,mf->...mt', a, c, order='a', optimize=opt)
1074
+ assert_(tmp.flags.c_contiguous)
1075
+
1076
+ d = np.ones((2, 3, 5), order='C')
1077
+ for opt in [True, False]:
1078
+ tmp = np.einsum('...ft,mf->...mt', d, c, order='a', optimize=opt)
1079
+ assert_(tmp.flags.c_contiguous)
1080
+
1081
+ class TestEinsumPath:
1082
+ def build_operands(self, string, size_dict=global_size_dict):
1083
+
1084
+ # Builds views based off initial operands
1085
+ operands = [string]
1086
+ terms = string.split('->')[0].split(',')
1087
+ for term in terms:
1088
+ dims = [size_dict[x] for x in term]
1089
+ operands.append(np.random.rand(*dims))
1090
+
1091
+ return operands
1092
+
1093
+ def assert_path_equal(self, comp, benchmark):
1094
+ # Checks if list of tuples are equivalent
1095
+ ret = (len(comp) == len(benchmark))
1096
+ assert_(ret)
1097
+ for pos in range(len(comp) - 1):
1098
+ ret &= isinstance(comp[pos + 1], tuple)
1099
+ ret &= (comp[pos + 1] == benchmark[pos + 1])
1100
+ assert_(ret)
1101
+
1102
+ def test_memory_contraints(self):
1103
+ # Ensure memory constraints are satisfied
1104
+
1105
+ outer_test = self.build_operands('a,b,c->abc')
1106
+
1107
+ path, path_str = np.einsum_path(*outer_test, optimize=('greedy', 0))
1108
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
1109
+
1110
+ path, path_str = np.einsum_path(*outer_test, optimize=('optimal', 0))
1111
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2)])
1112
+
1113
+ long_test = self.build_operands('acdf,jbje,gihb,hfac')
1114
+ path, path_str = np.einsum_path(*long_test, optimize=('greedy', 0))
1115
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
1116
+
1117
+ path, path_str = np.einsum_path(*long_test, optimize=('optimal', 0))
1118
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
1119
+
1120
+ def test_long_paths(self):
1121
+ # Long complex cases
1122
+
1123
+ # Long test 1
1124
+ long_test1 = self.build_operands('acdf,jbje,gihb,hfac,gfac,gifabc,hfac')
1125
+ path, path_str = np.einsum_path(*long_test1, optimize='greedy')
1126
+ self.assert_path_equal(path, ['einsum_path',
1127
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
1128
+
1129
+ path, path_str = np.einsum_path(*long_test1, optimize='optimal')
1130
+ self.assert_path_equal(path, ['einsum_path',
1131
+ (3, 6), (3, 4), (2, 4), (2, 3), (0, 2), (0, 1)])
1132
+
1133
+ # Long test 2
1134
+ long_test2 = self.build_operands('chd,bde,agbc,hiad,bdi,cgh,agdb')
1135
+ path, path_str = np.einsum_path(*long_test2, optimize='greedy')
1136
+ self.assert_path_equal(path, ['einsum_path',
1137
+ (3, 4), (0, 3), (3, 4), (1, 3), (1, 2), (0, 1)])
1138
+
1139
+ path, path_str = np.einsum_path(*long_test2, optimize='optimal')
1140
+ self.assert_path_equal(path, ['einsum_path',
1141
+ (0, 5), (1, 4), (3, 4), (1, 3), (1, 2), (0, 1)])
1142
+
1143
+ def test_edge_paths(self):
1144
+ # Difficult edge cases
1145
+
1146
+ # Edge test1
1147
+ edge_test1 = self.build_operands('eb,cb,fb->cef')
1148
+ path, path_str = np.einsum_path(*edge_test1, optimize='greedy')
1149
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
1150
+
1151
+ path, path_str = np.einsum_path(*edge_test1, optimize='optimal')
1152
+ self.assert_path_equal(path, ['einsum_path', (0, 2), (0, 1)])
1153
+
1154
+ # Edge test2
1155
+ edge_test2 = self.build_operands('dd,fb,be,cdb->cef')
1156
+ path, path_str = np.einsum_path(*edge_test2, optimize='greedy')
1157
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
1158
+
1159
+ path, path_str = np.einsum_path(*edge_test2, optimize='optimal')
1160
+ self.assert_path_equal(path, ['einsum_path', (0, 3), (0, 1), (0, 1)])
1161
+
1162
+ # Edge test3
1163
+ edge_test3 = self.build_operands('bca,cdb,dbf,afc->')
1164
+ path, path_str = np.einsum_path(*edge_test3, optimize='greedy')
1165
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
1166
+
1167
+ path, path_str = np.einsum_path(*edge_test3, optimize='optimal')
1168
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
1169
+
1170
+ # Edge test4
1171
+ edge_test4 = self.build_operands('dcc,fce,ea,dbf->ab')
1172
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
1173
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
1174
+
1175
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
1176
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 2), (0, 1)])
1177
+
1178
+ # Edge test5
1179
+ edge_test4 = self.build_operands('a,ac,ab,ad,cd,bd,bc->',
1180
+ size_dict={"a": 20, "b": 20, "c": 20, "d": 20})
1181
+ path, path_str = np.einsum_path(*edge_test4, optimize='greedy')
1182
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
1183
+
1184
+ path, path_str = np.einsum_path(*edge_test4, optimize='optimal')
1185
+ self.assert_path_equal(path, ['einsum_path', (0, 1), (0, 1, 2, 3, 4, 5)])
1186
+
1187
+ def test_path_type_input(self):
1188
+ # Test explicit path handling
1189
+ path_test = self.build_operands('dcc,fce,ea,dbf->ab')
1190
+
1191
+ path, path_str = np.einsum_path(*path_test, optimize=False)
1192
+ self.assert_path_equal(path, ['einsum_path', (0, 1, 2, 3)])
1193
+
1194
+ path, path_str = np.einsum_path(*path_test, optimize=True)
1195
+ self.assert_path_equal(path, ['einsum_path', (1, 2), (0, 1), (0, 1)])
1196
+
1197
+ exp_path = ['einsum_path', (0, 2), (0, 2), (0, 1)]
1198
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
1199
+ self.assert_path_equal(path, exp_path)
1200
+
1201
+ # Double check einsum works on the input path
1202
+ noopt = np.einsum(*path_test, optimize=False)
1203
+ opt = np.einsum(*path_test, optimize=exp_path)
1204
+ assert_almost_equal(noopt, opt)
1205
+
1206
+ def test_path_type_input_internal_trace(self):
1207
+ #gh-20962
1208
+ path_test = self.build_operands('cab,cdd->ab')
1209
+ exp_path = ['einsum_path', (1,), (0, 1)]
1210
+
1211
+ path, path_str = np.einsum_path(*path_test, optimize=exp_path)
1212
+ self.assert_path_equal(path, exp_path)
1213
+
1214
+ # Double check einsum works on the input path
1215
+ noopt = np.einsum(*path_test, optimize=False)
1216
+ opt = np.einsum(*path_test, optimize=exp_path)
1217
+ assert_almost_equal(noopt, opt)
1218
+
1219
+ def test_path_type_input_invalid(self):
1220
+ path_test = self.build_operands('ab,bc,cd,de->ae')
1221
+ exp_path = ['einsum_path', (2, 3), (0, 1)]
1222
+ assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
1223
+ assert_raises(
1224
+ RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
1225
+
1226
+ path_test = self.build_operands('a,a,a->a')
1227
+ exp_path = ['einsum_path', (1,), (0, 1)]
1228
+ assert_raises(RuntimeError, np.einsum, *path_test, optimize=exp_path)
1229
+ assert_raises(
1230
+ RuntimeError, np.einsum_path, *path_test, optimize=exp_path)
1231
+
1232
+ def test_spaces(self):
1233
+ #gh-10794
1234
+ arr = np.array([[1]])
1235
+ for sp in itertools.product(['', ' '], repeat=4):
1236
+ # no error for any spacing
1237
+ np.einsum('{}...a{}->{}...a{}'.format(*sp), arr)
1238
+
1239
+ def test_overlap():
1240
+ a = np.arange(9, dtype=int).reshape(3, 3)
1241
+ b = np.arange(9, dtype=int).reshape(3, 3)
1242
+ d = np.dot(a, b)
1243
+ # sanity check
1244
+ c = np.einsum('ij,jk->ik', a, b)
1245
+ assert_equal(c, d)
1246
+ #gh-10080, out overlaps one of the operands
1247
+ c = np.einsum('ij,jk->ik', a, b, out=b)
1248
+ assert_equal(c, d)
.venv/lib/python3.11/site-packages/numpy/core/tests/test_errstate.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ import sysconfig
3
+
4
+ import numpy as np
5
+ from numpy.testing import assert_, assert_raises, IS_WASM
6
+
7
+ # The floating point emulation on ARM EABI systems lacking a hardware FPU is
8
+ # known to be buggy. This is an attempt to identify these hosts. It may not
9
+ # catch all possible cases, but it catches the known cases of gh-413 and
10
+ # gh-15562.
11
+ hosttype = sysconfig.get_config_var('HOST_GNU_TYPE')
12
+ arm_softfloat = False if hosttype is None else hosttype.endswith('gnueabi')
13
+
14
+ class TestErrstate:
15
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
16
+ @pytest.mark.skipif(arm_softfloat,
17
+ reason='platform/cpu issue with FPU (gh-413,-15562)')
18
+ def test_invalid(self):
19
+ with np.errstate(all='raise', under='ignore'):
20
+ a = -np.arange(3)
21
+ # This should work
22
+ with np.errstate(invalid='ignore'):
23
+ np.sqrt(a)
24
+ # While this should fail!
25
+ with assert_raises(FloatingPointError):
26
+ np.sqrt(a)
27
+
28
+ @pytest.mark.skipif(IS_WASM, reason="fp errors don't work in wasm")
29
+ @pytest.mark.skipif(arm_softfloat,
30
+ reason='platform/cpu issue with FPU (gh-15562)')
31
+ def test_divide(self):
32
+ with np.errstate(all='raise', under='ignore'):
33
+ a = -np.arange(3)
34
+ # This should work
35
+ with np.errstate(divide='ignore'):
36
+ a // 0
37
+ # While this should fail!
38
+ with assert_raises(FloatingPointError):
39
+ a // 0
40
+ # As should this, see gh-15562
41
+ with assert_raises(FloatingPointError):
42
+ a // a
43
+
44
+ def test_errcall(self):
45
+ def foo(*args):
46
+ print(args)
47
+
48
+ olderrcall = np.geterrcall()
49
+ with np.errstate(call=foo):
50
+ assert_(np.geterrcall() is foo, 'call is not foo')
51
+ with np.errstate(call=None):
52
+ assert_(np.geterrcall() is None, 'call is not None')
53
+ assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
54
+
55
+ def test_errstate_decorator(self):
56
+ @np.errstate(all='ignore')
57
+ def foo():
58
+ a = -np.arange(3)
59
+ a // 0
60
+
61
+ foo()
.venv/lib/python3.11/site-packages/numpy/core/tests/test_function_base.py ADDED
@@ -0,0 +1,446 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+ from numpy import (
3
+ logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
4
+ ndarray, sqrt, nextafter, stack, errstate
5
+ )
6
+ from numpy.testing import (
7
+ assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
8
+ )
9
+
10
+
11
+ class PhysicalQuantity(float):
12
+ def __new__(cls, value):
13
+ return float.__new__(cls, value)
14
+
15
+ def __add__(self, x):
16
+ assert_(isinstance(x, PhysicalQuantity))
17
+ return PhysicalQuantity(float(x) + float(self))
18
+ __radd__ = __add__
19
+
20
+ def __sub__(self, x):
21
+ assert_(isinstance(x, PhysicalQuantity))
22
+ return PhysicalQuantity(float(self) - float(x))
23
+
24
+ def __rsub__(self, x):
25
+ assert_(isinstance(x, PhysicalQuantity))
26
+ return PhysicalQuantity(float(x) - float(self))
27
+
28
+ def __mul__(self, x):
29
+ return PhysicalQuantity(float(x) * float(self))
30
+ __rmul__ = __mul__
31
+
32
+ def __div__(self, x):
33
+ return PhysicalQuantity(float(self) / float(x))
34
+
35
+ def __rdiv__(self, x):
36
+ return PhysicalQuantity(float(x) / float(self))
37
+
38
+
39
+ class PhysicalQuantity2(ndarray):
40
+ __array_priority__ = 10
41
+
42
+
43
+ class TestLogspace:
44
+
45
+ def test_basic(self):
46
+ y = logspace(0, 6)
47
+ assert_(len(y) == 50)
48
+ y = logspace(0, 6, num=100)
49
+ assert_(y[-1] == 10 ** 6)
50
+ y = logspace(0, 6, endpoint=False)
51
+ assert_(y[-1] < 10 ** 6)
52
+ y = logspace(0, 6, num=7)
53
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
54
+
55
+ def test_start_stop_array(self):
56
+ start = array([0., 1.])
57
+ stop = array([6., 7.])
58
+ t1 = logspace(start, stop, 6)
59
+ t2 = stack([logspace(_start, _stop, 6)
60
+ for _start, _stop in zip(start, stop)], axis=1)
61
+ assert_equal(t1, t2)
62
+ t3 = logspace(start, stop[0], 6)
63
+ t4 = stack([logspace(_start, stop[0], 6)
64
+ for _start in start], axis=1)
65
+ assert_equal(t3, t4)
66
+ t5 = logspace(start, stop, 6, axis=-1)
67
+ assert_equal(t5, t2.T)
68
+
69
+ @pytest.mark.parametrize("axis", [0, 1, -1])
70
+ def test_base_array(self, axis: int):
71
+ start = 1
72
+ stop = 2
73
+ num = 6
74
+ base = array([1, 2])
75
+ t1 = logspace(start, stop, num=num, base=base, axis=axis)
76
+ t2 = stack(
77
+ [logspace(start, stop, num=num, base=_base) for _base in base],
78
+ axis=(axis + 1) % t1.ndim,
79
+ )
80
+ assert_equal(t1, t2)
81
+
82
+ @pytest.mark.parametrize("axis", [0, 1, -1])
83
+ def test_stop_base_array(self, axis: int):
84
+ start = 1
85
+ stop = array([2, 3])
86
+ num = 6
87
+ base = array([1, 2])
88
+ t1 = logspace(start, stop, num=num, base=base, axis=axis)
89
+ t2 = stack(
90
+ [logspace(start, _stop, num=num, base=_base)
91
+ for _stop, _base in zip(stop, base)],
92
+ axis=(axis + 1) % t1.ndim,
93
+ )
94
+ assert_equal(t1, t2)
95
+
96
+ def test_dtype(self):
97
+ y = logspace(0, 6, dtype='float32')
98
+ assert_equal(y.dtype, dtype('float32'))
99
+ y = logspace(0, 6, dtype='float64')
100
+ assert_equal(y.dtype, dtype('float64'))
101
+ y = logspace(0, 6, dtype='int32')
102
+ assert_equal(y.dtype, dtype('int32'))
103
+
104
+ def test_physical_quantities(self):
105
+ a = PhysicalQuantity(1.0)
106
+ b = PhysicalQuantity(5.0)
107
+ assert_equal(logspace(a, b), logspace(1.0, 5.0))
108
+
109
+ def test_subclass(self):
110
+ a = array(1).view(PhysicalQuantity2)
111
+ b = array(7).view(PhysicalQuantity2)
112
+ ls = logspace(a, b)
113
+ assert type(ls) is PhysicalQuantity2
114
+ assert_equal(ls, logspace(1.0, 7.0))
115
+ ls = logspace(a, b, 1)
116
+ assert type(ls) is PhysicalQuantity2
117
+ assert_equal(ls, logspace(1.0, 7.0, 1))
118
+
119
+
120
+ class TestGeomspace:
121
+
122
+ def test_basic(self):
123
+ y = geomspace(1, 1e6)
124
+ assert_(len(y) == 50)
125
+ y = geomspace(1, 1e6, num=100)
126
+ assert_(y[-1] == 10 ** 6)
127
+ y = geomspace(1, 1e6, endpoint=False)
128
+ assert_(y[-1] < 10 ** 6)
129
+ y = geomspace(1, 1e6, num=7)
130
+ assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
131
+
132
+ y = geomspace(8, 2, num=3)
133
+ assert_allclose(y, [8, 4, 2])
134
+ assert_array_equal(y.imag, 0)
135
+
136
+ y = geomspace(-1, -100, num=3)
137
+ assert_array_equal(y, [-1, -10, -100])
138
+ assert_array_equal(y.imag, 0)
139
+
140
+ y = geomspace(-100, -1, num=3)
141
+ assert_array_equal(y, [-100, -10, -1])
142
+ assert_array_equal(y.imag, 0)
143
+
144
+ def test_boundaries_match_start_and_stop_exactly(self):
145
+ # make sure that the boundaries of the returned array exactly
146
+ # equal 'start' and 'stop' - this isn't obvious because
147
+ # np.exp(np.log(x)) isn't necessarily exactly equal to x
148
+ start = 0.3
149
+ stop = 20.3
150
+
151
+ y = geomspace(start, stop, num=1)
152
+ assert_equal(y[0], start)
153
+
154
+ y = geomspace(start, stop, num=1, endpoint=False)
155
+ assert_equal(y[0], start)
156
+
157
+ y = geomspace(start, stop, num=3)
158
+ assert_equal(y[0], start)
159
+ assert_equal(y[-1], stop)
160
+
161
+ y = geomspace(start, stop, num=3, endpoint=False)
162
+ assert_equal(y[0], start)
163
+
164
+ def test_nan_interior(self):
165
+ with errstate(invalid='ignore'):
166
+ y = geomspace(-3, 3, num=4)
167
+
168
+ assert_equal(y[0], -3.0)
169
+ assert_(isnan(y[1:-1]).all())
170
+ assert_equal(y[3], 3.0)
171
+
172
+ with errstate(invalid='ignore'):
173
+ y = geomspace(-3, 3, num=4, endpoint=False)
174
+
175
+ assert_equal(y[0], -3.0)
176
+ assert_(isnan(y[1:]).all())
177
+
178
+ def test_complex(self):
179
+ # Purely imaginary
180
+ y = geomspace(1j, 16j, num=5)
181
+ assert_allclose(y, [1j, 2j, 4j, 8j, 16j])
182
+ assert_array_equal(y.real, 0)
183
+
184
+ y = geomspace(-4j, -324j, num=5)
185
+ assert_allclose(y, [-4j, -12j, -36j, -108j, -324j])
186
+ assert_array_equal(y.real, 0)
187
+
188
+ y = geomspace(1+1j, 1000+1000j, num=4)
189
+ assert_allclose(y, [1+1j, 10+10j, 100+100j, 1000+1000j])
190
+
191
+ y = geomspace(-1+1j, -1000+1000j, num=4)
192
+ assert_allclose(y, [-1+1j, -10+10j, -100+100j, -1000+1000j])
193
+
194
+ # Logarithmic spirals
195
+ y = geomspace(-1, 1, num=3, dtype=complex)
196
+ assert_allclose(y, [-1, 1j, +1])
197
+
198
+ y = geomspace(0+3j, -3+0j, 3)
199
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
200
+ y = geomspace(0+3j, 3+0j, 3)
201
+ assert_allclose(y, [0+3j, 3/sqrt(2)+3j/sqrt(2), 3+0j])
202
+ y = geomspace(-3+0j, 0-3j, 3)
203
+ assert_allclose(y, [-3+0j, -3/sqrt(2)-3j/sqrt(2), 0-3j])
204
+ y = geomspace(0+3j, -3+0j, 3)
205
+ assert_allclose(y, [0+3j, -3/sqrt(2)+3j/sqrt(2), -3+0j])
206
+ y = geomspace(-2-3j, 5+7j, 7)
207
+ assert_allclose(y, [-2-3j, -0.29058977-4.15771027j,
208
+ 2.08885354-4.34146838j, 4.58345529-3.16355218j,
209
+ 6.41401745-0.55233457j, 6.75707386+3.11795092j,
210
+ 5+7j])
211
+
212
+ # Type promotion should prevent the -5 from becoming a NaN
213
+ y = geomspace(3j, -5, 2)
214
+ assert_allclose(y, [3j, -5])
215
+ y = geomspace(-5, 3j, 2)
216
+ assert_allclose(y, [-5, 3j])
217
+
218
+ def test_dtype(self):
219
+ y = geomspace(1, 1e6, dtype='float32')
220
+ assert_equal(y.dtype, dtype('float32'))
221
+ y = geomspace(1, 1e6, dtype='float64')
222
+ assert_equal(y.dtype, dtype('float64'))
223
+ y = geomspace(1, 1e6, dtype='int32')
224
+ assert_equal(y.dtype, dtype('int32'))
225
+
226
+ # Native types
227
+ y = geomspace(1, 1e6, dtype=float)
228
+ assert_equal(y.dtype, dtype('float_'))
229
+ y = geomspace(1, 1e6, dtype=complex)
230
+ assert_equal(y.dtype, dtype('complex'))
231
+
232
+ def test_start_stop_array_scalar(self):
233
+ lim1 = array([120, 100], dtype="int8")
234
+ lim2 = array([-120, -100], dtype="int8")
235
+ lim3 = array([1200, 1000], dtype="uint16")
236
+ t1 = geomspace(lim1[0], lim1[1], 5)
237
+ t2 = geomspace(lim2[0], lim2[1], 5)
238
+ t3 = geomspace(lim3[0], lim3[1], 5)
239
+ t4 = geomspace(120.0, 100.0, 5)
240
+ t5 = geomspace(-120.0, -100.0, 5)
241
+ t6 = geomspace(1200.0, 1000.0, 5)
242
+
243
+ # t3 uses float32, t6 uses float64
244
+ assert_allclose(t1, t4, rtol=1e-2)
245
+ assert_allclose(t2, t5, rtol=1e-2)
246
+ assert_allclose(t3, t6, rtol=1e-5)
247
+
248
+ def test_start_stop_array(self):
249
+ # Try to use all special cases.
250
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
251
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
252
+ t1 = geomspace(start, stop, 5)
253
+ t2 = stack([geomspace(_start, _stop, 5)
254
+ for _start, _stop in zip(start, stop)], axis=1)
255
+ assert_equal(t1, t2)
256
+ t3 = geomspace(start, stop[0], 5)
257
+ t4 = stack([geomspace(_start, stop[0], 5)
258
+ for _start in start], axis=1)
259
+ assert_equal(t3, t4)
260
+ t5 = geomspace(start, stop, 5, axis=-1)
261
+ assert_equal(t5, t2.T)
262
+
263
+ def test_physical_quantities(self):
264
+ a = PhysicalQuantity(1.0)
265
+ b = PhysicalQuantity(5.0)
266
+ assert_equal(geomspace(a, b), geomspace(1.0, 5.0))
267
+
268
+ def test_subclass(self):
269
+ a = array(1).view(PhysicalQuantity2)
270
+ b = array(7).view(PhysicalQuantity2)
271
+ gs = geomspace(a, b)
272
+ assert type(gs) is PhysicalQuantity2
273
+ assert_equal(gs, geomspace(1.0, 7.0))
274
+ gs = geomspace(a, b, 1)
275
+ assert type(gs) is PhysicalQuantity2
276
+ assert_equal(gs, geomspace(1.0, 7.0, 1))
277
+
278
+ def test_bounds(self):
279
+ assert_raises(ValueError, geomspace, 0, 10)
280
+ assert_raises(ValueError, geomspace, 10, 0)
281
+ assert_raises(ValueError, geomspace, 0, 0)
282
+
283
+
284
+ class TestLinspace:
285
+
286
+ def test_basic(self):
287
+ y = linspace(0, 10)
288
+ assert_(len(y) == 50)
289
+ y = linspace(2, 10, num=100)
290
+ assert_(y[-1] == 10)
291
+ y = linspace(2, 10, endpoint=False)
292
+ assert_(y[-1] < 10)
293
+ assert_raises(ValueError, linspace, 0, 10, num=-1)
294
+
295
+ def test_corner(self):
296
+ y = list(linspace(0, 1, 1))
297
+ assert_(y == [0.0], y)
298
+ assert_raises(TypeError, linspace, 0, 1, num=2.5)
299
+
300
+ def test_type(self):
301
+ t1 = linspace(0, 1, 0).dtype
302
+ t2 = linspace(0, 1, 1).dtype
303
+ t3 = linspace(0, 1, 2).dtype
304
+ assert_equal(t1, t2)
305
+ assert_equal(t2, t3)
306
+
307
+ def test_dtype(self):
308
+ y = linspace(0, 6, dtype='float32')
309
+ assert_equal(y.dtype, dtype('float32'))
310
+ y = linspace(0, 6, dtype='float64')
311
+ assert_equal(y.dtype, dtype('float64'))
312
+ y = linspace(0, 6, dtype='int32')
313
+ assert_equal(y.dtype, dtype('int32'))
314
+
315
+ def test_start_stop_array_scalar(self):
316
+ lim1 = array([-120, 100], dtype="int8")
317
+ lim2 = array([120, -100], dtype="int8")
318
+ lim3 = array([1200, 1000], dtype="uint16")
319
+ t1 = linspace(lim1[0], lim1[1], 5)
320
+ t2 = linspace(lim2[0], lim2[1], 5)
321
+ t3 = linspace(lim3[0], lim3[1], 5)
322
+ t4 = linspace(-120.0, 100.0, 5)
323
+ t5 = linspace(120.0, -100.0, 5)
324
+ t6 = linspace(1200.0, 1000.0, 5)
325
+ assert_equal(t1, t4)
326
+ assert_equal(t2, t5)
327
+ assert_equal(t3, t6)
328
+
329
+ def test_start_stop_array(self):
330
+ start = array([-120, 120], dtype="int8")
331
+ stop = array([100, -100], dtype="int8")
332
+ t1 = linspace(start, stop, 5)
333
+ t2 = stack([linspace(_start, _stop, 5)
334
+ for _start, _stop in zip(start, stop)], axis=1)
335
+ assert_equal(t1, t2)
336
+ t3 = linspace(start, stop[0], 5)
337
+ t4 = stack([linspace(_start, stop[0], 5)
338
+ for _start in start], axis=1)
339
+ assert_equal(t3, t4)
340
+ t5 = linspace(start, stop, 5, axis=-1)
341
+ assert_equal(t5, t2.T)
342
+
343
+ def test_complex(self):
344
+ lim1 = linspace(1 + 2j, 3 + 4j, 5)
345
+ t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
346
+ lim2 = linspace(1j, 10, 5)
347
+ t2 = array([0.0+1.j, 2.5+0.75j, 5.0+0.5j, 7.5+0.25j, 10.0+0j])
348
+ assert_equal(lim1, t1)
349
+ assert_equal(lim2, t2)
350
+
351
+ def test_physical_quantities(self):
352
+ a = PhysicalQuantity(0.0)
353
+ b = PhysicalQuantity(1.0)
354
+ assert_equal(linspace(a, b), linspace(0.0, 1.0))
355
+
356
+ def test_subclass(self):
357
+ a = array(0).view(PhysicalQuantity2)
358
+ b = array(1).view(PhysicalQuantity2)
359
+ ls = linspace(a, b)
360
+ assert type(ls) is PhysicalQuantity2
361
+ assert_equal(ls, linspace(0.0, 1.0))
362
+ ls = linspace(a, b, 1)
363
+ assert type(ls) is PhysicalQuantity2
364
+ assert_equal(ls, linspace(0.0, 1.0, 1))
365
+
366
+ def test_array_interface(self):
367
+ # Regression test for https://github.com/numpy/numpy/pull/6659
368
+ # Ensure that start/stop can be objects that implement
369
+ # __array_interface__ and are convertible to numeric scalars
370
+
371
+ class Arrayish:
372
+ """
373
+ A generic object that supports the __array_interface__ and hence
374
+ can in principle be converted to a numeric scalar, but is not
375
+ otherwise recognized as numeric, but also happens to support
376
+ multiplication by floats.
377
+
378
+ Data should be an object that implements the buffer interface,
379
+ and contains at least 4 bytes.
380
+ """
381
+
382
+ def __init__(self, data):
383
+ self._data = data
384
+
385
+ @property
386
+ def __array_interface__(self):
387
+ return {'shape': (), 'typestr': '<i4', 'data': self._data,
388
+ 'version': 3}
389
+
390
+ def __mul__(self, other):
391
+ # For the purposes of this test any multiplication is an
392
+ # identity operation :)
393
+ return self
394
+
395
+ one = Arrayish(array(1, dtype='<i4'))
396
+ five = Arrayish(array(5, dtype='<i4'))
397
+
398
+ assert_equal(linspace(one, five), linspace(1, 5))
399
+
400
+ def test_denormal_numbers(self):
401
+ # Regression test for gh-5437. Will probably fail when compiled
402
+ # with ICC, which flushes denormals to zero
403
+ for ftype in sctypes['float']:
404
+ stop = nextafter(ftype(0), ftype(1)) * 5 # A denormal number
405
+ assert_(any(linspace(0, stop, 10, endpoint=False, dtype=ftype)))
406
+
407
+ def test_equivalent_to_arange(self):
408
+ for j in range(1000):
409
+ assert_equal(linspace(0, j, j+1, dtype=int),
410
+ arange(j+1, dtype=int))
411
+
412
+ def test_retstep(self):
413
+ for num in [0, 1, 2]:
414
+ for ept in [False, True]:
415
+ y = linspace(0, 1, num, endpoint=ept, retstep=True)
416
+ assert isinstance(y, tuple) and len(y) == 2
417
+ if num == 2:
418
+ y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
419
+ assert_array_equal(y[0], y0_expect)
420
+ assert_equal(y[1], y0_expect[1])
421
+ elif num == 1 and not ept:
422
+ assert_array_equal(y[0], [0.0])
423
+ assert_equal(y[1], 1.0)
424
+ else:
425
+ assert_array_equal(y[0], [0.0][:num])
426
+ assert isnan(y[1])
427
+
428
+ def test_object(self):
429
+ start = array(1, dtype='O')
430
+ stop = array(2, dtype='O')
431
+ y = linspace(start, stop, 3)
432
+ assert_array_equal(y, array([1., 1.5, 2.]))
433
+
434
+ def test_round_negative(self):
435
+ y = linspace(-1, 3, num=8, dtype=int)
436
+ t = array([-1, -1, 0, 0, 1, 1, 2, 3], dtype=int)
437
+ assert_array_equal(y, t)
438
+
439
+ def test_any_step_zero_and_not_mult_inplace(self):
440
+ # any_step_zero is True, _mult_inplace is False
441
+ start = array([0.0, 1.0])
442
+ stop = array([2.0, 1.0])
443
+ y = linspace(start, stop, 3)
444
+ assert_array_equal(y, array([[0.0, 1.0], [1.0, 1.0], [2.0, 1.0]]))
445
+
446
+
.venv/lib/python3.11/site-packages/numpy/core/tests/test_half.py ADDED
@@ -0,0 +1,572 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import platform
2
+ import pytest
3
+
4
+ import numpy as np
5
+ from numpy import uint16, float16, float32, float64
6
+ from numpy.testing import assert_, assert_equal, _OLD_PROMOTION, IS_WASM
7
+
8
+
9
+ def assert_raises_fpe(strmatch, callable, *args, **kwargs):
10
+ try:
11
+ callable(*args, **kwargs)
12
+ except FloatingPointError as exc:
13
+ assert_(str(exc).find(strmatch) >= 0,
14
+ "Did not raise floating point %s error" % strmatch)
15
+ else:
16
+ assert_(False,
17
+ "Did not raise floating point %s error" % strmatch)
18
+
19
+ class TestHalf:
20
+ def setup_method(self):
21
+ # An array of all possible float16 values
22
+ self.all_f16 = np.arange(0x10000, dtype=uint16)
23
+ self.all_f16.dtype = float16
24
+
25
+ # NaN value can cause an invalid FP exception if HW is been used
26
+ with np.errstate(invalid='ignore'):
27
+ self.all_f32 = np.array(self.all_f16, dtype=float32)
28
+ self.all_f64 = np.array(self.all_f16, dtype=float64)
29
+
30
+ # An array of all non-NaN float16 values, in sorted order
31
+ self.nonan_f16 = np.concatenate(
32
+ (np.arange(0xfc00, 0x7fff, -1, dtype=uint16),
33
+ np.arange(0x0000, 0x7c01, 1, dtype=uint16)))
34
+ self.nonan_f16.dtype = float16
35
+ self.nonan_f32 = np.array(self.nonan_f16, dtype=float32)
36
+ self.nonan_f64 = np.array(self.nonan_f16, dtype=float64)
37
+
38
+ # An array of all finite float16 values, in sorted order
39
+ self.finite_f16 = self.nonan_f16[1:-1]
40
+ self.finite_f32 = self.nonan_f32[1:-1]
41
+ self.finite_f64 = self.nonan_f64[1:-1]
42
+
43
+ def test_half_conversions(self):
44
+ """Checks that all 16-bit values survive conversion
45
+ to/from 32-bit and 64-bit float"""
46
+ # Because the underlying routines preserve the NaN bits, every
47
+ # value is preserved when converting to/from other floats.
48
+
49
+ # Convert from float32 back to float16
50
+ with np.errstate(invalid='ignore'):
51
+ b = np.array(self.all_f32, dtype=float16)
52
+ # avoid testing NaNs due to differ bits wither Q/SNaNs
53
+ b_nn = b == b
54
+ assert_equal(self.all_f16[b_nn].view(dtype=uint16),
55
+ b[b_nn].view(dtype=uint16))
56
+
57
+ # Convert from float64 back to float16
58
+ with np.errstate(invalid='ignore'):
59
+ b = np.array(self.all_f64, dtype=float16)
60
+ b_nn = b == b
61
+ assert_equal(self.all_f16[b_nn].view(dtype=uint16),
62
+ b[b_nn].view(dtype=uint16))
63
+
64
+ # Convert float16 to longdouble and back
65
+ # This doesn't necessarily preserve the extra NaN bits,
66
+ # so exclude NaNs.
67
+ a_ld = np.array(self.nonan_f16, dtype=np.longdouble)
68
+ b = np.array(a_ld, dtype=float16)
69
+ assert_equal(self.nonan_f16.view(dtype=uint16),
70
+ b.view(dtype=uint16))
71
+
72
+ # Check the range for which all integers can be represented
73
+ i_int = np.arange(-2048, 2049)
74
+ i_f16 = np.array(i_int, dtype=float16)
75
+ j = np.array(i_f16, dtype=int)
76
+ assert_equal(i_int, j)
77
+
78
+ @pytest.mark.parametrize("string_dt", ["S", "U"])
79
+ def test_half_conversion_to_string(self, string_dt):
80
+ # Currently uses S/U32 (which is sufficient for float32)
81
+ expected_dt = np.dtype(f"{string_dt}32")
82
+ assert np.promote_types(np.float16, string_dt) == expected_dt
83
+ assert np.promote_types(string_dt, np.float16) == expected_dt
84
+
85
+ arr = np.ones(3, dtype=np.float16).astype(string_dt)
86
+ assert arr.dtype == expected_dt
87
+
88
+ @pytest.mark.parametrize("string_dt", ["S", "U"])
89
+ def test_half_conversion_from_string(self, string_dt):
90
+ string = np.array("3.1416", dtype=string_dt)
91
+ assert string.astype(np.float16) == np.array(3.1416, dtype=np.float16)
92
+
93
+ @pytest.mark.parametrize("offset", [None, "up", "down"])
94
+ @pytest.mark.parametrize("shift", [None, "up", "down"])
95
+ @pytest.mark.parametrize("float_t", [np.float32, np.float64])
96
+ @np._no_nep50_warning()
97
+ def test_half_conversion_rounding(self, float_t, shift, offset):
98
+ # Assumes that round to even is used during casting.
99
+ max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
100
+
101
+ # Test all (positive) finite numbers, denormals are most interesting
102
+ # however:
103
+ f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
104
+ f16s_float = f16s_patterns.view(np.float16).astype(float_t)
105
+
106
+ # Shift the values by half a bit up or a down (or do not shift),
107
+ if shift == "up":
108
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
109
+ elif shift == "down":
110
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
111
+ else:
112
+ f16s_float = f16s_float[1:-1]
113
+
114
+ # Increase the float by a minimal value:
115
+ if offset == "up":
116
+ f16s_float = np.nextafter(f16s_float, float_t(np.inf))
117
+ elif offset == "down":
118
+ f16s_float = np.nextafter(f16s_float, float_t(-np.inf))
119
+
120
+ # Convert back to float16 and its bit pattern:
121
+ res_patterns = f16s_float.astype(np.float16).view(np.uint16)
122
+
123
+ # The above calculations tries the original values, or the exact
124
+ # mid points between the float16 values. It then further offsets them
125
+ # by as little as possible. If no offset occurs, "round to even"
126
+ # logic will be necessary, an arbitrarily small offset should cause
127
+ # normal up/down rounding always.
128
+
129
+ # Calculate the expected pattern:
130
+ cmp_patterns = f16s_patterns[1:-1].copy()
131
+
132
+ if shift == "down" and offset != "up":
133
+ shift_pattern = -1
134
+ elif shift == "up" and offset != "down":
135
+ shift_pattern = 1
136
+ else:
137
+ # There cannot be a shift, either shift is None, so all rounding
138
+ # will go back to original, or shift is reduced by offset too much.
139
+ shift_pattern = 0
140
+
141
+ # If rounding occurs, is it normal rounding or round to even?
142
+ if offset is None:
143
+ # Round to even occurs, modify only non-even, cast to allow + (-1)
144
+ cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
145
+ else:
146
+ cmp_patterns.view(np.int16)[...] += shift_pattern
147
+
148
+ assert_equal(res_patterns, cmp_patterns)
149
+
150
+ @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
151
+ [(np.float32, np.uint32, 23),
152
+ (np.float64, np.uint64, 52)])
153
+ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
154
+ # Test specifically that all bits are considered when deciding
155
+ # whether round to even should occur (i.e. no bits are lost at the
156
+ # end. Compare also gh-12721. The most bits can get lost for the
157
+ # smallest denormal:
158
+ smallest_value = np.uint16(1).view(np.float16).astype(float_t)
159
+ assert smallest_value == 2**-24
160
+
161
+ # Will be rounded to zero based on round to even rule:
162
+ rounded_to_zero = smallest_value / float_t(2)
163
+ assert rounded_to_zero.astype(np.float16) == 0
164
+
165
+ # The significand will be all 0 for the float_t, test that we do not
166
+ # lose the lower ones of these:
167
+ for i in range(bits):
168
+ # slightly increasing the value should make it round up:
169
+ larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
170
+ larger_value = larger_pattern.view(float_t)
171
+ assert larger_value.astype(np.float16) == smallest_value
172
+
173
+ def test_nans_infs(self):
174
+ with np.errstate(all='ignore'):
175
+ # Check some of the ufuncs
176
+ assert_equal(np.isnan(self.all_f16), np.isnan(self.all_f32))
177
+ assert_equal(np.isinf(self.all_f16), np.isinf(self.all_f32))
178
+ assert_equal(np.isfinite(self.all_f16), np.isfinite(self.all_f32))
179
+ assert_equal(np.signbit(self.all_f16), np.signbit(self.all_f32))
180
+ assert_equal(np.spacing(float16(65504)), np.inf)
181
+
182
+ # Check comparisons of all values with NaN
183
+ nan = float16(np.nan)
184
+
185
+ assert_(not (self.all_f16 == nan).any())
186
+ assert_(not (nan == self.all_f16).any())
187
+
188
+ assert_((self.all_f16 != nan).all())
189
+ assert_((nan != self.all_f16).all())
190
+
191
+ assert_(not (self.all_f16 < nan).any())
192
+ assert_(not (nan < self.all_f16).any())
193
+
194
+ assert_(not (self.all_f16 <= nan).any())
195
+ assert_(not (nan <= self.all_f16).any())
196
+
197
+ assert_(not (self.all_f16 > nan).any())
198
+ assert_(not (nan > self.all_f16).any())
199
+
200
+ assert_(not (self.all_f16 >= nan).any())
201
+ assert_(not (nan >= self.all_f16).any())
202
+
203
+ def test_half_values(self):
204
+ """Confirms a small number of known half values"""
205
+ a = np.array([1.0, -1.0,
206
+ 2.0, -2.0,
207
+ 0.0999755859375, 0.333251953125, # 1/10, 1/3
208
+ 65504, -65504, # Maximum magnitude
209
+ 2.0**(-14), -2.0**(-14), # Minimum normal
210
+ 2.0**(-24), -2.0**(-24), # Minimum subnormal
211
+ 0, -1/1e1000, # Signed zeros
212
+ np.inf, -np.inf])
213
+ b = np.array([0x3c00, 0xbc00,
214
+ 0x4000, 0xc000,
215
+ 0x2e66, 0x3555,
216
+ 0x7bff, 0xfbff,
217
+ 0x0400, 0x8400,
218
+ 0x0001, 0x8001,
219
+ 0x0000, 0x8000,
220
+ 0x7c00, 0xfc00], dtype=uint16)
221
+ b.dtype = float16
222
+ assert_equal(a, b)
223
+
224
+ def test_half_rounding(self):
225
+ """Checks that rounding when converting to half is correct"""
226
+ a = np.array([2.0**-25 + 2.0**-35, # Rounds to minimum subnormal
227
+ 2.0**-25, # Underflows to zero (nearest even mode)
228
+ 2.0**-26, # Underflows to zero
229
+ 1.0+2.0**-11 + 2.0**-16, # rounds to 1.0+2**(-10)
230
+ 1.0+2.0**-11, # rounds to 1.0 (nearest even mode)
231
+ 1.0+2.0**-12, # rounds to 1.0
232
+ 65519, # rounds to 65504
233
+ 65520], # rounds to inf
234
+ dtype=float64)
235
+ rounded = [2.0**-24,
236
+ 0.0,
237
+ 0.0,
238
+ 1.0+2.0**(-10),
239
+ 1.0,
240
+ 1.0,
241
+ 65504,
242
+ np.inf]
243
+
244
+ # Check float64->float16 rounding
245
+ with np.errstate(over="ignore"):
246
+ b = np.array(a, dtype=float16)
247
+ assert_equal(b, rounded)
248
+
249
+ # Check float32->float16 rounding
250
+ a = np.array(a, dtype=float32)
251
+ with np.errstate(over="ignore"):
252
+ b = np.array(a, dtype=float16)
253
+ assert_equal(b, rounded)
254
+
255
+ def test_half_correctness(self):
256
+ """Take every finite float16, and check the casting functions with
257
+ a manual conversion."""
258
+
259
+ # Create an array of all finite float16s
260
+ a_bits = self.finite_f16.view(dtype=uint16)
261
+
262
+ # Convert to 64-bit float manually
263
+ a_sgn = (-1.0)**((a_bits & 0x8000) >> 15)
264
+ a_exp = np.array((a_bits & 0x7c00) >> 10, dtype=np.int32) - 15
265
+ a_man = (a_bits & 0x03ff) * 2.0**(-10)
266
+ # Implicit bit of normalized floats
267
+ a_man[a_exp != -15] += 1
268
+ # Denormalized exponent is -14
269
+ a_exp[a_exp == -15] = -14
270
+
271
+ a_manual = a_sgn * a_man * 2.0**a_exp
272
+
273
+ a32_fail = np.nonzero(self.finite_f32 != a_manual)[0]
274
+ if len(a32_fail) != 0:
275
+ bad_index = a32_fail[0]
276
+ assert_equal(self.finite_f32, a_manual,
277
+ "First non-equal is half value 0x%x -> %g != %g" %
278
+ (a_bits[bad_index],
279
+ self.finite_f32[bad_index],
280
+ a_manual[bad_index]))
281
+
282
+ a64_fail = np.nonzero(self.finite_f64 != a_manual)[0]
283
+ if len(a64_fail) != 0:
284
+ bad_index = a64_fail[0]
285
+ assert_equal(self.finite_f64, a_manual,
286
+ "First non-equal is half value 0x%x -> %g != %g" %
287
+ (a_bits[bad_index],
288
+ self.finite_f64[bad_index],
289
+ a_manual[bad_index]))
290
+
291
+ def test_half_ordering(self):
292
+ """Make sure comparisons are working right"""
293
+
294
+ # All non-NaN float16 values in reverse order
295
+ a = self.nonan_f16[::-1].copy()
296
+
297
+ # 32-bit float copy
298
+ b = np.array(a, dtype=float32)
299
+
300
+ # Should sort the same
301
+ a.sort()
302
+ b.sort()
303
+ assert_equal(a, b)
304
+
305
+ # Comparisons should work
306
+ assert_((a[:-1] <= a[1:]).all())
307
+ assert_(not (a[:-1] > a[1:]).any())
308
+ assert_((a[1:] >= a[:-1]).all())
309
+ assert_(not (a[1:] < a[:-1]).any())
310
+ # All != except for +/-0
311
+ assert_equal(np.nonzero(a[:-1] < a[1:])[0].size, a.size-2)
312
+ assert_equal(np.nonzero(a[1:] > a[:-1])[0].size, a.size-2)
313
+
314
+ def test_half_funcs(self):
315
+ """Test the various ArrFuncs"""
316
+
317
+ # fill
318
+ assert_equal(np.arange(10, dtype=float16),
319
+ np.arange(10, dtype=float32))
320
+
321
+ # fillwithscalar
322
+ a = np.zeros((5,), dtype=float16)
323
+ a.fill(1)
324
+ assert_equal(a, np.ones((5,), dtype=float16))
325
+
326
+ # nonzero and copyswap
327
+ a = np.array([0, 0, -1, -1/1e20, 0, 2.0**-24, 7.629e-6], dtype=float16)
328
+ assert_equal(a.nonzero()[0],
329
+ [2, 5, 6])
330
+ a = a.byteswap()
331
+ a = a.view(a.dtype.newbyteorder())
332
+ assert_equal(a.nonzero()[0],
333
+ [2, 5, 6])
334
+
335
+ # dot
336
+ a = np.arange(0, 10, 0.5, dtype=float16)
337
+ b = np.ones((20,), dtype=float16)
338
+ assert_equal(np.dot(a, b),
339
+ 95)
340
+
341
+ # argmax
342
+ a = np.array([0, -np.inf, -2, 0.5, 12.55, 7.3, 2.1, 12.4], dtype=float16)
343
+ assert_equal(a.argmax(),
344
+ 4)
345
+ a = np.array([0, -np.inf, -2, np.inf, 12.55, np.nan, 2.1, 12.4], dtype=float16)
346
+ assert_equal(a.argmax(),
347
+ 5)
348
+
349
+ # getitem
350
+ a = np.arange(10, dtype=float16)
351
+ for i in range(10):
352
+ assert_equal(a.item(i), i)
353
+
354
+ def test_spacing_nextafter(self):
355
+ """Test np.spacing and np.nextafter"""
356
+ # All non-negative finite #'s
357
+ a = np.arange(0x7c00, dtype=uint16)
358
+ hinf = np.array((np.inf,), dtype=float16)
359
+ hnan = np.array((np.nan,), dtype=float16)
360
+ a_f16 = a.view(dtype=float16)
361
+
362
+ assert_equal(np.spacing(a_f16[:-1]), a_f16[1:]-a_f16[:-1])
363
+
364
+ assert_equal(np.nextafter(a_f16[:-1], hinf), a_f16[1:])
365
+ assert_equal(np.nextafter(a_f16[0], -hinf), -a_f16[1])
366
+ assert_equal(np.nextafter(a_f16[1:], -hinf), a_f16[:-1])
367
+
368
+ assert_equal(np.nextafter(hinf, a_f16), a_f16[-1])
369
+ assert_equal(np.nextafter(-hinf, a_f16), -a_f16[-1])
370
+
371
+ assert_equal(np.nextafter(hinf, hinf), hinf)
372
+ assert_equal(np.nextafter(hinf, -hinf), a_f16[-1])
373
+ assert_equal(np.nextafter(-hinf, hinf), -a_f16[-1])
374
+ assert_equal(np.nextafter(-hinf, -hinf), -hinf)
375
+
376
+ assert_equal(np.nextafter(a_f16, hnan), hnan[0])
377
+ assert_equal(np.nextafter(hnan, a_f16), hnan[0])
378
+
379
+ assert_equal(np.nextafter(hnan, hnan), hnan)
380
+ assert_equal(np.nextafter(hinf, hnan), hnan)
381
+ assert_equal(np.nextafter(hnan, hinf), hnan)
382
+
383
+ # switch to negatives
384
+ a |= 0x8000
385
+
386
+ assert_equal(np.spacing(a_f16[0]), np.spacing(a_f16[1]))
387
+ assert_equal(np.spacing(a_f16[1:]), a_f16[:-1]-a_f16[1:])
388
+
389
+ assert_equal(np.nextafter(a_f16[0], hinf), -a_f16[1])
390
+ assert_equal(np.nextafter(a_f16[1:], hinf), a_f16[:-1])
391
+ assert_equal(np.nextafter(a_f16[:-1], -hinf), a_f16[1:])
392
+
393
+ assert_equal(np.nextafter(hinf, a_f16), -a_f16[-1])
394
+ assert_equal(np.nextafter(-hinf, a_f16), a_f16[-1])
395
+
396
+ assert_equal(np.nextafter(a_f16, hnan), hnan[0])
397
+ assert_equal(np.nextafter(hnan, a_f16), hnan[0])
398
+
399
+ def test_half_ufuncs(self):
400
+ """Test the various ufuncs"""
401
+
402
+ a = np.array([0, 1, 2, 4, 2], dtype=float16)
403
+ b = np.array([-2, 5, 1, 4, 3], dtype=float16)
404
+ c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)
405
+
406
+ assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
407
+ assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
408
+ assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
409
+ assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])
410
+
411
+ assert_equal(np.equal(a, b), [False, False, False, True, False])
412
+ assert_equal(np.not_equal(a, b), [True, True, True, False, True])
413
+ assert_equal(np.less(a, b), [False, True, False, False, True])
414
+ assert_equal(np.less_equal(a, b), [False, True, False, True, True])
415
+ assert_equal(np.greater(a, b), [True, False, True, False, False])
416
+ assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
417
+ assert_equal(np.logical_and(a, b), [False, True, True, True, True])
418
+ assert_equal(np.logical_or(a, b), [True, True, True, True, True])
419
+ assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
420
+ assert_equal(np.logical_not(a), [True, False, False, False, False])
421
+
422
+ assert_equal(np.isnan(c), [False, False, False, True, False])
423
+ assert_equal(np.isinf(c), [False, False, True, False, False])
424
+ assert_equal(np.isfinite(c), [True, True, False, False, True])
425
+ assert_equal(np.signbit(b), [True, False, False, False, False])
426
+
427
+ assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
428
+
429
+ assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
430
+
431
+ x = np.maximum(b, c)
432
+ assert_(np.isnan(x[3]))
433
+ x[3] = 0
434
+ assert_equal(x, [0, 5, 1, 0, 6])
435
+
436
+ assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
437
+
438
+ x = np.minimum(b, c)
439
+ assert_(np.isnan(x[3]))
440
+ x[3] = 0
441
+ assert_equal(x, [-2, -1, -np.inf, 0, 3])
442
+
443
+ assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
444
+ assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
445
+ assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
446
+ assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])
447
+
448
+ assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
449
+ assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
450
+ assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
451
+ assert_equal(np.square(b), [4, 25, 1, 16, 9])
452
+ assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
453
+ assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
454
+ assert_equal(np.conjugate(b), b)
455
+ assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
456
+ assert_equal(np.negative(b), [2, -5, -1, -4, -3])
457
+ assert_equal(np.positive(b), b)
458
+ assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
459
+ assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
460
+ assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
461
+ assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
462
+
463
+ @np._no_nep50_warning()
464
+ def test_half_coercion(self, weak_promotion):
465
+ """Test that half gets coerced properly with the other types"""
466
+ a16 = np.array((1,), dtype=float16)
467
+ a32 = np.array((1,), dtype=float32)
468
+ b16 = float16(1)
469
+ b32 = float32(1)
470
+
471
+ assert np.power(a16, 2).dtype == float16
472
+ assert np.power(a16, 2.0).dtype == float16
473
+ assert np.power(a16, b16).dtype == float16
474
+ expected_dt = float32 if weak_promotion else float16
475
+ assert np.power(a16, b32).dtype == expected_dt
476
+ assert np.power(a16, a16).dtype == float16
477
+ assert np.power(a16, a32).dtype == float32
478
+
479
+ expected_dt = float16 if weak_promotion else float64
480
+ assert np.power(b16, 2).dtype == expected_dt
481
+ assert np.power(b16, 2.0).dtype == expected_dt
482
+ assert np.power(b16, b16).dtype, float16
483
+ assert np.power(b16, b32).dtype, float32
484
+ assert np.power(b16, a16).dtype, float16
485
+ assert np.power(b16, a32).dtype, float32
486
+
487
+ assert np.power(a32, a16).dtype == float32
488
+ assert np.power(a32, b16).dtype == float32
489
+ expected_dt = float32 if weak_promotion else float16
490
+ assert np.power(b32, a16).dtype == expected_dt
491
+ assert np.power(b32, b16).dtype == float32
492
+
493
+ @pytest.mark.skipif(platform.machine() == "armv5tel",
494
+ reason="See gh-413.")
495
+ @pytest.mark.skipif(IS_WASM,
496
+ reason="fp exceptions don't work in wasm.")
497
+ def test_half_fpe(self):
498
+ with np.errstate(all='raise'):
499
+ sx16 = np.array((1e-4,), dtype=float16)
500
+ bx16 = np.array((1e4,), dtype=float16)
501
+ sy16 = float16(1e-4)
502
+ by16 = float16(1e4)
503
+
504
+ # Underflow errors
505
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sx16)
506
+ assert_raises_fpe('underflow', lambda a, b:a*b, sx16, sy16)
507
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sx16)
508
+ assert_raises_fpe('underflow', lambda a, b:a*b, sy16, sy16)
509
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, bx16)
510
+ assert_raises_fpe('underflow', lambda a, b:a/b, sx16, by16)
511
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, bx16)
512
+ assert_raises_fpe('underflow', lambda a, b:a/b, sy16, by16)
513
+ assert_raises_fpe('underflow', lambda a, b:a/b,
514
+ float16(2.**-14), float16(2**11))
515
+ assert_raises_fpe('underflow', lambda a, b:a/b,
516
+ float16(-2.**-14), float16(2**11))
517
+ assert_raises_fpe('underflow', lambda a, b:a/b,
518
+ float16(2.**-14+2**-24), float16(2))
519
+ assert_raises_fpe('underflow', lambda a, b:a/b,
520
+ float16(-2.**-14-2**-24), float16(2))
521
+ assert_raises_fpe('underflow', lambda a, b:a/b,
522
+ float16(2.**-14+2**-23), float16(4))
523
+
524
+ # Overflow errors
525
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, bx16)
526
+ assert_raises_fpe('overflow', lambda a, b:a*b, bx16, by16)
527
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, bx16)
528
+ assert_raises_fpe('overflow', lambda a, b:a*b, by16, by16)
529
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sx16)
530
+ assert_raises_fpe('overflow', lambda a, b:a/b, bx16, sy16)
531
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sx16)
532
+ assert_raises_fpe('overflow', lambda a, b:a/b, by16, sy16)
533
+ assert_raises_fpe('overflow', lambda a, b:a+b,
534
+ float16(65504), float16(17))
535
+ assert_raises_fpe('overflow', lambda a, b:a-b,
536
+ float16(-65504), float16(17))
537
+ assert_raises_fpe('overflow', np.nextafter, float16(65504), float16(np.inf))
538
+ assert_raises_fpe('overflow', np.nextafter, float16(-65504), float16(-np.inf))
539
+ assert_raises_fpe('overflow', np.spacing, float16(65504))
540
+
541
+ # Invalid value errors
542
+ assert_raises_fpe('invalid', np.divide, float16(np.inf), float16(np.inf))
543
+ assert_raises_fpe('invalid', np.spacing, float16(np.inf))
544
+ assert_raises_fpe('invalid', np.spacing, float16(np.nan))
545
+
546
+ # These should not raise
547
+ float16(65472)+float16(32)
548
+ float16(2**-13)/float16(2)
549
+ float16(2**-14)/float16(2**10)
550
+ np.spacing(float16(-65504))
551
+ np.nextafter(float16(65504), float16(-np.inf))
552
+ np.nextafter(float16(-65504), float16(np.inf))
553
+ np.nextafter(float16(np.inf), float16(0))
554
+ np.nextafter(float16(-np.inf), float16(0))
555
+ np.nextafter(float16(0), float16(np.nan))
556
+ np.nextafter(float16(np.nan), float16(0))
557
+ float16(2**-14)/float16(2**10)
558
+ float16(-2**-14)/float16(2**10)
559
+ float16(2**-14+2**-23)/float16(2)
560
+ float16(-2**-14-2**-23)/float16(2)
561
+
562
+ def test_half_array_interface(self):
563
+ """Test that half is compatible with __array_interface__"""
564
+ class Dummy:
565
+ pass
566
+
567
+ a = np.ones((1,), dtype=float16)
568
+ b = Dummy()
569
+ b.__array_interface__ = a.__array_interface__
570
+ c = np.array(b)
571
+ assert_(c.dtype == float16)
572
+ assert_equal(a, c)