ZTWHHH commited on
Commit
760095a
·
verified ·
1 Parent(s): 4cc9d1e

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/__init__.cpython-310.pyc +0 -0
  2. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_common.cpython-310.pyc +0 -0
  3. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_concat.cpython-310.pyc +0 -0
  4. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_dtypes.cpython-310.pyc +0 -0
  5. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_generic.cpython-310.pyc +0 -0
  6. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_inference.cpython-310.pyc +0 -0
  7. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_missing.cpython-310.pyc +0 -0
  8. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/__init__.cpython-310.pyc +0 -0
  9. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_can_hold_element.cpython-310.pyc +0 -0
  10. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_from_scalar.cpython-310.pyc +0 -0
  11. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_ndarray.cpython-310.pyc +0 -0
  12. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_object_arr.cpython-310.pyc +0 -0
  13. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_dict_compat.cpython-310.pyc +0 -0
  14. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_downcast.cpython-310.pyc +0 -0
  15. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_find_common_type.cpython-310.pyc +0 -0
  16. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_infer_datetimelike.cpython-310.pyc +0 -0
  17. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_infer_dtype.cpython-310.pyc +0 -0
  18. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_maybe_box_native.cpython-310.pyc +0 -0
  19. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_promote.cpython-310.pyc +0 -0
  20. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.py +79 -0
  21. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.py +55 -0
  22. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py +36 -0
  23. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.py +20 -0
  24. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_dict_compat.py +14 -0
  25. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_find_common_type.py +175 -0
  26. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py +216 -0
  27. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.py +40 -0
  28. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_promote.py +530 -0
  29. omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/test_inference.py +2047 -0
  30. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py +0 -0
  31. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc +0 -0
  32. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc +0 -0
  33. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc +0 -0
  34. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py +0 -0
  35. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc +0 -0
  36. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc +0 -0
  37. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc +0 -0
  38. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc +0 -0
  39. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc +0 -0
  40. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc +0 -0
  41. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc +0 -0
  42. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py +192 -0
  43. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py +51 -0
  44. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py +73 -0
  45. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py +11 -0
  46. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py +87 -0
  47. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py +67 -0
  48. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py +316 -0
  49. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py +709 -0
  50. omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py +698 -0
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (23.9 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_concat.cpython-310.pyc ADDED
Binary file (2.08 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_dtypes.cpython-310.pyc ADDED
Binary file (40.1 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_generic.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_inference.cpython-310.pyc ADDED
Binary file (58.4 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/__pycache__/test_missing.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (177 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_can_hold_element.cpython-310.pyc ADDED
Binary file (1.8 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_from_scalar.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_ndarray.cpython-310.pyc ADDED
Binary file (1.6 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_construct_object_arr.cpython-310.pyc ADDED
Binary file (963 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_dict_compat.cpython-310.pyc ADDED
Binary file (666 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_downcast.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_find_common_type.cpython-310.pyc ADDED
Binary file (4.11 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_infer_datetimelike.cpython-310.pyc ADDED
Binary file (882 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_infer_dtype.cpython-310.pyc ADDED
Binary file (5.67 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_maybe_box_native.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/__pycache__/test_promote.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_can_hold_element.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas.core.dtypes.cast import can_hold_element
4
+
5
+
6
+ def test_can_hold_element_range(any_int_numpy_dtype):
7
+ # GH#44261
8
+ dtype = np.dtype(any_int_numpy_dtype)
9
+ arr = np.array([], dtype=dtype)
10
+
11
+ rng = range(2, 127)
12
+ assert can_hold_element(arr, rng)
13
+
14
+ # negatives -> can't be held by uint dtypes
15
+ rng = range(-2, 127)
16
+ if dtype.kind == "i":
17
+ assert can_hold_element(arr, rng)
18
+ else:
19
+ assert not can_hold_element(arr, rng)
20
+
21
+ rng = range(2, 255)
22
+ if dtype == "int8":
23
+ assert not can_hold_element(arr, rng)
24
+ else:
25
+ assert can_hold_element(arr, rng)
26
+
27
+ rng = range(-255, 65537)
28
+ if dtype.kind == "u":
29
+ assert not can_hold_element(arr, rng)
30
+ elif dtype.itemsize < 4:
31
+ assert not can_hold_element(arr, rng)
32
+ else:
33
+ assert can_hold_element(arr, rng)
34
+
35
+ # empty
36
+ rng = range(-(10**10), -(10**10))
37
+ assert len(rng) == 0
38
+ # assert can_hold_element(arr, rng)
39
+
40
+ rng = range(10**10, 10**10)
41
+ assert len(rng) == 0
42
+ assert can_hold_element(arr, rng)
43
+
44
+
45
+ def test_can_hold_element_int_values_float_ndarray():
46
+ arr = np.array([], dtype=np.int64)
47
+
48
+ element = np.array([1.0, 2.0])
49
+ assert can_hold_element(arr, element)
50
+
51
+ assert not can_hold_element(arr, element + 0.5)
52
+
53
+ # integer but not losslessly castable to int64
54
+ element = np.array([3, 2**65], dtype=np.float64)
55
+ assert not can_hold_element(arr, element)
56
+
57
+
58
+ def test_can_hold_element_int8_int():
59
+ arr = np.array([], dtype=np.int8)
60
+
61
+ element = 2
62
+ assert can_hold_element(arr, element)
63
+ assert can_hold_element(arr, np.int8(element))
64
+ assert can_hold_element(arr, np.uint8(element))
65
+ assert can_hold_element(arr, np.int16(element))
66
+ assert can_hold_element(arr, np.uint16(element))
67
+ assert can_hold_element(arr, np.int32(element))
68
+ assert can_hold_element(arr, np.uint32(element))
69
+ assert can_hold_element(arr, np.int64(element))
70
+ assert can_hold_element(arr, np.uint64(element))
71
+
72
+ element = 2**9
73
+ assert not can_hold_element(arr, element)
74
+ assert not can_hold_element(arr, np.int16(element))
75
+ assert not can_hold_element(arr, np.uint16(element))
76
+ assert not can_hold_element(arr, np.int32(element))
77
+ assert not can_hold_element(arr, np.uint32(element))
78
+ assert not can_hold_element(arr, np.int64(element))
79
+ assert not can_hold_element(arr, np.uint64(element))
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_from_scalar.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar
5
+ from pandas.core.dtypes.dtypes import CategoricalDtype
6
+
7
+ from pandas import (
8
+ Categorical,
9
+ Timedelta,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ def test_cast_1d_array_like_from_scalar_categorical():
15
+ # see gh-19565
16
+ #
17
+ # Categorical result from scalar did not maintain
18
+ # categories and ordering of the passed dtype.
19
+ cats = ["a", "b", "c"]
20
+ cat_type = CategoricalDtype(categories=cats, ordered=False)
21
+ expected = Categorical(["a", "a"], categories=cats)
22
+
23
+ result = construct_1d_arraylike_from_scalar("a", len(expected), cat_type)
24
+ tm.assert_categorical_equal(result, expected)
25
+
26
+
27
+ def test_cast_1d_array_like_from_timestamp(fixed_now_ts):
28
+ # check we dont lose nanoseconds
29
+ ts = fixed_now_ts + Timedelta(1)
30
+ res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))
31
+ assert res[0] == ts
32
+
33
+
34
+ def test_cast_1d_array_like_from_timedelta():
35
+ # check we dont lose nanoseconds
36
+ td = Timedelta(1)
37
+ res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))
38
+ assert res[0] == td
39
+
40
+
41
+ def test_cast_1d_array_like_mismatched_datetimelike():
42
+ td = np.timedelta64("NaT", "ns")
43
+ dt = np.datetime64("NaT", "ns")
44
+
45
+ with pytest.raises(TypeError, match="Cannot cast"):
46
+ construct_1d_arraylike_from_scalar(td, 2, dt.dtype)
47
+
48
+ with pytest.raises(TypeError, match="Cannot cast"):
49
+ construct_1d_arraylike_from_scalar(np.timedelta64(4, "ns"), 2, dt.dtype)
50
+
51
+ with pytest.raises(TypeError, match="Cannot cast"):
52
+ construct_1d_arraylike_from_scalar(dt, 2, td.dtype)
53
+
54
+ with pytest.raises(TypeError, match="Cannot cast"):
55
+ construct_1d_arraylike_from_scalar(np.datetime64(4, "ns"), 2, td.dtype)
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_ndarray.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ import pandas._testing as tm
6
+ from pandas.core.construction import sanitize_array
7
+
8
+
9
+ @pytest.mark.parametrize(
10
+ "values, dtype, expected",
11
+ [
12
+ ([1, 2, 3], None, np.array([1, 2, 3], dtype=np.int64)),
13
+ (np.array([1, 2, 3]), None, np.array([1, 2, 3])),
14
+ (["1", "2", None], None, np.array(["1", "2", None])),
15
+ (["1", "2", None], np.dtype("str"), np.array(["1", "2", None])),
16
+ ([1, 2, None], np.dtype("str"), np.array(["1", "2", None])),
17
+ ],
18
+ )
19
+ def test_construct_1d_ndarray_preserving_na(
20
+ values, dtype, expected, using_infer_string
21
+ ):
22
+ result = sanitize_array(values, index=None, dtype=dtype)
23
+ if using_infer_string and expected.dtype == object and dtype is None:
24
+ tm.assert_extension_array_equal(result, pd.array(expected))
25
+ else:
26
+ tm.assert_numpy_array_equal(result, expected)
27
+
28
+
29
+ @pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"])
30
+ def test_construct_1d_ndarray_preserving_na_datetimelike(dtype):
31
+ arr = np.arange(5, dtype=np.int64).view(dtype)
32
+ expected = np.array(list(arr), dtype=object)
33
+ assert all(isinstance(x, type(arr[0])) for x in expected)
34
+
35
+ result = sanitize_array(arr, index=None, dtype=np.dtype(object))
36
+ tm.assert_numpy_array_equal(result, expected)
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_construct_object_arr.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
4
+
5
+
6
+ @pytest.mark.parametrize("datum1", [1, 2.0, "3", (4, 5), [6, 7], None])
7
+ @pytest.mark.parametrize("datum2", [8, 9.0, "10", (11, 12), [13, 14], None])
8
+ def test_cast_1d_array(datum1, datum2):
9
+ data = [datum1, datum2]
10
+ result = construct_1d_object_array_from_listlike(data)
11
+
12
+ # Direct comparison fails: https://github.com/numpy/numpy/issues/10218
13
+ assert result.dtype == "object"
14
+ assert list(result) == data
15
+
16
+
17
+ @pytest.mark.parametrize("val", [1, 2.0, None])
18
+ def test_cast_1d_array_invalid_scalar(val):
19
+ with pytest.raises(TypeError, match="has no len()"):
20
+ construct_1d_object_array_from_listlike(val)
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_dict_compat.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ from pandas.core.dtypes.cast import dict_compat
4
+
5
+ from pandas import Timestamp
6
+
7
+
8
+ def test_dict_compat():
9
+ data_datetime64 = {np.datetime64("1990-03-15"): 1, np.datetime64("2015-03-15"): 2}
10
+ data_unchanged = {1: 2, 3: 4, 5: 6}
11
+ expected = {Timestamp("1990-3-15"): 1, Timestamp("2015-03-15"): 2}
12
+ assert dict_compat(data_datetime64) == expected
13
+ assert dict_compat(expected) == expected
14
+ assert dict_compat(data_unchanged) == data_unchanged
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_find_common_type.py ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.core.dtypes.cast import find_common_type
5
+ from pandas.core.dtypes.common import pandas_dtype
6
+ from pandas.core.dtypes.dtypes import (
7
+ CategoricalDtype,
8
+ DatetimeTZDtype,
9
+ IntervalDtype,
10
+ PeriodDtype,
11
+ )
12
+
13
+ from pandas import (
14
+ Categorical,
15
+ Index,
16
+ )
17
+
18
+
19
+ @pytest.mark.parametrize(
20
+ "source_dtypes,expected_common_dtype",
21
+ [
22
+ ((np.int64,), np.int64),
23
+ ((np.uint64,), np.uint64),
24
+ ((np.float32,), np.float32),
25
+ ((object,), object),
26
+ # Into ints.
27
+ ((np.int16, np.int64), np.int64),
28
+ ((np.int32, np.uint32), np.int64),
29
+ ((np.uint16, np.uint64), np.uint64),
30
+ # Into floats.
31
+ ((np.float16, np.float32), np.float32),
32
+ ((np.float16, np.int16), np.float32),
33
+ ((np.float32, np.int16), np.float32),
34
+ ((np.uint64, np.int64), np.float64),
35
+ ((np.int16, np.float64), np.float64),
36
+ ((np.float16, np.int64), np.float64),
37
+ # Into others.
38
+ ((np.complex128, np.int32), np.complex128),
39
+ ((object, np.float32), object),
40
+ ((object, np.int16), object),
41
+ # Bool with int.
42
+ ((np.dtype("bool"), np.int64), object),
43
+ ((np.dtype("bool"), np.int32), object),
44
+ ((np.dtype("bool"), np.int16), object),
45
+ ((np.dtype("bool"), np.int8), object),
46
+ ((np.dtype("bool"), np.uint64), object),
47
+ ((np.dtype("bool"), np.uint32), object),
48
+ ((np.dtype("bool"), np.uint16), object),
49
+ ((np.dtype("bool"), np.uint8), object),
50
+ # Bool with float.
51
+ ((np.dtype("bool"), np.float64), object),
52
+ ((np.dtype("bool"), np.float32), object),
53
+ (
54
+ (np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")),
55
+ np.dtype("datetime64[ns]"),
56
+ ),
57
+ (
58
+ (np.dtype("timedelta64[ns]"), np.dtype("timedelta64[ns]")),
59
+ np.dtype("timedelta64[ns]"),
60
+ ),
61
+ (
62
+ (np.dtype("datetime64[ns]"), np.dtype("datetime64[ms]")),
63
+ np.dtype("datetime64[ns]"),
64
+ ),
65
+ (
66
+ (np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")),
67
+ np.dtype("timedelta64[ns]"),
68
+ ),
69
+ ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), object),
70
+ ((np.dtype("datetime64[ns]"), np.int64), object),
71
+ ],
72
+ )
73
+ def test_numpy_dtypes(source_dtypes, expected_common_dtype):
74
+ source_dtypes = [pandas_dtype(x) for x in source_dtypes]
75
+ assert find_common_type(source_dtypes) == expected_common_dtype
76
+
77
+
78
+ def test_raises_empty_input():
79
+ with pytest.raises(ValueError, match="no types given"):
80
+ find_common_type([])
81
+
82
+
83
+ @pytest.mark.parametrize(
84
+ "dtypes,exp_type",
85
+ [
86
+ ([CategoricalDtype()], "category"),
87
+ ([object, CategoricalDtype()], object),
88
+ ([CategoricalDtype(), CategoricalDtype()], "category"),
89
+ ],
90
+ )
91
+ def test_categorical_dtype(dtypes, exp_type):
92
+ assert find_common_type(dtypes) == exp_type
93
+
94
+
95
+ def test_datetimetz_dtype_match():
96
+ dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")
97
+ assert find_common_type([dtype, dtype]) == "datetime64[ns, US/Eastern]"
98
+
99
+
100
+ @pytest.mark.parametrize(
101
+ "dtype2",
102
+ [
103
+ DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),
104
+ np.dtype("datetime64[ns]"),
105
+ object,
106
+ np.int64,
107
+ ],
108
+ )
109
+ def test_datetimetz_dtype_mismatch(dtype2):
110
+ dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")
111
+ assert find_common_type([dtype, dtype2]) == object
112
+ assert find_common_type([dtype2, dtype]) == object
113
+
114
+
115
+ def test_period_dtype_match():
116
+ dtype = PeriodDtype(freq="D")
117
+ assert find_common_type([dtype, dtype]) == "period[D]"
118
+
119
+
120
+ @pytest.mark.parametrize(
121
+ "dtype2",
122
+ [
123
+ DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),
124
+ PeriodDtype(freq="2D"),
125
+ PeriodDtype(freq="h"),
126
+ np.dtype("datetime64[ns]"),
127
+ object,
128
+ np.int64,
129
+ ],
130
+ )
131
+ def test_period_dtype_mismatch(dtype2):
132
+ dtype = PeriodDtype(freq="D")
133
+ assert find_common_type([dtype, dtype2]) == object
134
+ assert find_common_type([dtype2, dtype]) == object
135
+
136
+
137
+ interval_dtypes = [
138
+ IntervalDtype(np.int64, "right"),
139
+ IntervalDtype(np.float64, "right"),
140
+ IntervalDtype(np.uint64, "right"),
141
+ IntervalDtype(DatetimeTZDtype(unit="ns", tz="US/Eastern"), "right"),
142
+ IntervalDtype("M8[ns]", "right"),
143
+ IntervalDtype("m8[ns]", "right"),
144
+ ]
145
+
146
+
147
+ @pytest.mark.parametrize("left", interval_dtypes)
148
+ @pytest.mark.parametrize("right", interval_dtypes)
149
+ def test_interval_dtype(left, right):
150
+ result = find_common_type([left, right])
151
+
152
+ if left is right:
153
+ assert result is left
154
+
155
+ elif left.subtype.kind in ["i", "u", "f"]:
156
+ # i.e. numeric
157
+ if right.subtype.kind in ["i", "u", "f"]:
158
+ # both numeric -> common numeric subtype
159
+ expected = IntervalDtype(np.float64, "right")
160
+ assert result == expected
161
+ else:
162
+ assert result == object
163
+
164
+ else:
165
+ assert result == object
166
+
167
+
168
+ @pytest.mark.parametrize("dtype", interval_dtypes)
169
+ def test_interval_dtype_with_categorical(dtype):
170
+ obj = Index([], dtype=dtype)
171
+
172
+ cat = Categorical([], categories=obj)
173
+
174
+ result = find_common_type([dtype, cat.dtype])
175
+ assert result == dtype
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_infer_dtype.py ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ timedelta,
5
+ )
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas.core.dtypes.cast import (
11
+ infer_dtype_from,
12
+ infer_dtype_from_array,
13
+ infer_dtype_from_scalar,
14
+ )
15
+ from pandas.core.dtypes.common import is_dtype_equal
16
+
17
+ from pandas import (
18
+ Categorical,
19
+ Interval,
20
+ Period,
21
+ Series,
22
+ Timedelta,
23
+ Timestamp,
24
+ date_range,
25
+ )
26
+
27
+
28
+ def test_infer_dtype_from_int_scalar(any_int_numpy_dtype):
29
+ # Test that infer_dtype_from_scalar is
30
+ # returning correct dtype for int and float.
31
+ data = np.dtype(any_int_numpy_dtype).type(12)
32
+ dtype, val = infer_dtype_from_scalar(data)
33
+ assert dtype == type(data)
34
+
35
+
36
+ def test_infer_dtype_from_float_scalar(float_numpy_dtype):
37
+ float_numpy_dtype = np.dtype(float_numpy_dtype).type
38
+ data = float_numpy_dtype(12)
39
+
40
+ dtype, val = infer_dtype_from_scalar(data)
41
+ assert dtype == float_numpy_dtype
42
+
43
+
44
+ @pytest.mark.parametrize(
45
+ "data,exp_dtype", [(12, np.int64), (np.float64(12), np.float64)]
46
+ )
47
+ def test_infer_dtype_from_python_scalar(data, exp_dtype):
48
+ dtype, val = infer_dtype_from_scalar(data)
49
+ assert dtype == exp_dtype
50
+
51
+
52
+ @pytest.mark.parametrize("bool_val", [True, False])
53
+ def test_infer_dtype_from_boolean(bool_val):
54
+ dtype, val = infer_dtype_from_scalar(bool_val)
55
+ assert dtype == np.bool_
56
+
57
+
58
+ def test_infer_dtype_from_complex(complex_dtype):
59
+ data = np.dtype(complex_dtype).type(1)
60
+ dtype, val = infer_dtype_from_scalar(data)
61
+ assert dtype == np.complex128
62
+
63
+
64
+ def test_infer_dtype_from_datetime():
65
+ dt64 = np.datetime64(1, "ns")
66
+ dtype, val = infer_dtype_from_scalar(dt64)
67
+ assert dtype == "M8[ns]"
68
+
69
+ ts = Timestamp(1)
70
+ dtype, val = infer_dtype_from_scalar(ts)
71
+ assert dtype == "M8[ns]"
72
+
73
+ dt = datetime(2000, 1, 1, 0, 0)
74
+ dtype, val = infer_dtype_from_scalar(dt)
75
+ assert dtype == "M8[us]"
76
+
77
+
78
+ def test_infer_dtype_from_timedelta():
79
+ td64 = np.timedelta64(1, "ns")
80
+ dtype, val = infer_dtype_from_scalar(td64)
81
+ assert dtype == "m8[ns]"
82
+
83
+ pytd = timedelta(1)
84
+ dtype, val = infer_dtype_from_scalar(pytd)
85
+ assert dtype == "m8[us]"
86
+
87
+ td = Timedelta(1)
88
+ dtype, val = infer_dtype_from_scalar(td)
89
+ assert dtype == "m8[ns]"
90
+
91
+
92
+ @pytest.mark.parametrize("freq", ["M", "D"])
93
+ def test_infer_dtype_from_period(freq):
94
+ p = Period("2011-01-01", freq=freq)
95
+ dtype, val = infer_dtype_from_scalar(p)
96
+
97
+ exp_dtype = f"period[{freq}]"
98
+
99
+ assert dtype == exp_dtype
100
+ assert val == p
101
+
102
+
103
+ def test_infer_dtype_misc():
104
+ dt = date(2000, 1, 1)
105
+ dtype, val = infer_dtype_from_scalar(dt)
106
+ assert dtype == np.object_
107
+
108
+ ts = Timestamp(1, tz="US/Eastern")
109
+ dtype, val = infer_dtype_from_scalar(ts)
110
+ assert dtype == "datetime64[ns, US/Eastern]"
111
+
112
+
113
+ @pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo"])
114
+ def test_infer_from_scalar_tz(tz):
115
+ dt = Timestamp(1, tz=tz)
116
+ dtype, val = infer_dtype_from_scalar(dt)
117
+
118
+ exp_dtype = f"datetime64[ns, {tz}]"
119
+
120
+ assert dtype == exp_dtype
121
+ assert val == dt
122
+
123
+
124
+ @pytest.mark.parametrize(
125
+ "left, right, subtype",
126
+ [
127
+ (0, 1, "int64"),
128
+ (0.0, 1.0, "float64"),
129
+ (Timestamp(0), Timestamp(1), "datetime64[ns]"),
130
+ (Timestamp(0, tz="UTC"), Timestamp(1, tz="UTC"), "datetime64[ns, UTC]"),
131
+ (Timedelta(0), Timedelta(1), "timedelta64[ns]"),
132
+ ],
133
+ )
134
+ def test_infer_from_interval(left, right, subtype, closed):
135
+ # GH 30337
136
+ interval = Interval(left, right, closed)
137
+ result_dtype, result_value = infer_dtype_from_scalar(interval)
138
+ expected_dtype = f"interval[{subtype}, {closed}]"
139
+ assert result_dtype == expected_dtype
140
+ assert result_value == interval
141
+
142
+
143
+ def test_infer_dtype_from_scalar_errors():
144
+ msg = "invalid ndarray passed to infer_dtype_from_scalar"
145
+
146
+ with pytest.raises(ValueError, match=msg):
147
+ infer_dtype_from_scalar(np.array([1]))
148
+
149
+
150
+ @pytest.mark.parametrize(
151
+ "value, expected",
152
+ [
153
+ ("foo", np.object_),
154
+ (b"foo", np.object_),
155
+ (1, np.int64),
156
+ (1.5, np.float64),
157
+ (np.datetime64("2016-01-01"), np.dtype("M8[s]")),
158
+ (Timestamp("20160101"), np.dtype("M8[s]")),
159
+ (Timestamp("20160101", tz="UTC"), "datetime64[s, UTC]"),
160
+ ],
161
+ )
162
+ def test_infer_dtype_from_scalar(value, expected, using_infer_string):
163
+ dtype, _ = infer_dtype_from_scalar(value)
164
+ if using_infer_string and value == "foo":
165
+ expected = "string"
166
+ assert is_dtype_equal(dtype, expected)
167
+
168
+ with pytest.raises(TypeError, match="must be list-like"):
169
+ infer_dtype_from_array(value)
170
+
171
+
172
+ @pytest.mark.parametrize(
173
+ "arr, expected",
174
+ [
175
+ ([1], np.dtype(int)),
176
+ (np.array([1], dtype=np.int64), np.int64),
177
+ ([np.nan, 1, ""], np.object_),
178
+ (np.array([[1.0, 2.0]]), np.float64),
179
+ (Categorical(list("aabc")), "category"),
180
+ (Categorical([1, 2, 3]), "category"),
181
+ (date_range("20160101", periods=3), np.dtype("=M8[ns]")),
182
+ (
183
+ date_range("20160101", periods=3, tz="US/Eastern"),
184
+ "datetime64[ns, US/Eastern]",
185
+ ),
186
+ (Series([1.0, 2, 3]), np.float64),
187
+ (Series(list("abc")), np.object_),
188
+ (
189
+ Series(date_range("20160101", periods=3, tz="US/Eastern")),
190
+ "datetime64[ns, US/Eastern]",
191
+ ),
192
+ ],
193
+ )
194
+ def test_infer_dtype_from_array(arr, expected, using_infer_string):
195
+ dtype, _ = infer_dtype_from_array(arr)
196
+ if (
197
+ using_infer_string
198
+ and isinstance(arr, Series)
199
+ and arr.tolist() == ["a", "b", "c"]
200
+ ):
201
+ expected = "string"
202
+ assert is_dtype_equal(dtype, expected)
203
+
204
+
205
+ @pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64])
206
+ def test_infer_dtype_from_scalar_zerodim_datetimelike(cls):
207
+ # ndarray.item() can incorrectly return int instead of td64/dt64
208
+ val = cls(1234, "ns")
209
+ arr = np.array(val)
210
+
211
+ dtype, res = infer_dtype_from_scalar(arr)
212
+ assert dtype.type is cls
213
+ assert isinstance(res, cls)
214
+
215
+ dtype, res = infer_dtype_from(arr)
216
+ assert dtype.type is cls
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_maybe_box_native.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.core.dtypes.cast import maybe_box_native
7
+
8
+ from pandas import (
9
+ Interval,
10
+ Period,
11
+ Timedelta,
12
+ Timestamp,
13
+ )
14
+
15
+
16
+ @pytest.mark.parametrize(
17
+ "obj,expected_dtype",
18
+ [
19
+ (b"\x00\x10", bytes),
20
+ (int(4), int),
21
+ (np.uint(4), int),
22
+ (np.int32(-4), int),
23
+ (np.uint8(4), int),
24
+ (float(454.98), float),
25
+ (np.float16(0.4), float),
26
+ (np.float64(1.4), float),
27
+ (np.bool_(False), bool),
28
+ (datetime(2005, 2, 25), datetime),
29
+ (np.datetime64("2005-02-25"), Timestamp),
30
+ (Timestamp("2005-02-25"), Timestamp),
31
+ (np.timedelta64(1, "D"), Timedelta),
32
+ (Timedelta(1, "D"), Timedelta),
33
+ (Interval(0, 1), Interval),
34
+ (Period("4Q2005"), Period),
35
+ ],
36
+ )
37
+ def test_maybe_box_native(obj, expected_dtype):
38
+ boxed_obj = maybe_box_native(obj)
39
+ result_dtype = type(boxed_obj)
40
+ assert result_dtype is expected_dtype
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/cast/test_promote.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ These test the method maybe_promote from core/dtypes/cast.py
3
+ """
4
+
5
+ import datetime
6
+ from decimal import Decimal
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas._libs.tslibs import NaT
12
+
13
+ from pandas.core.dtypes.cast import maybe_promote
14
+ from pandas.core.dtypes.common import is_scalar
15
+ from pandas.core.dtypes.dtypes import DatetimeTZDtype
16
+ from pandas.core.dtypes.missing import isna
17
+
18
+ import pandas as pd
19
+
20
+
21
+ def _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None):
22
+ """
23
+ Auxiliary function to unify testing of scalar/array promotion.
24
+
25
+ Parameters
26
+ ----------
27
+ dtype : dtype
28
+ The value to pass on as the first argument to maybe_promote.
29
+ fill_value : scalar
30
+ The value to pass on as the second argument to maybe_promote as
31
+ a scalar.
32
+ expected_dtype : dtype
33
+ The expected dtype returned by maybe_promote (by design this is the
34
+ same regardless of whether fill_value was passed as a scalar or in an
35
+ array!).
36
+ exp_val_for_scalar : scalar
37
+ The expected value for the (potentially upcast) fill_value returned by
38
+ maybe_promote.
39
+ """
40
+ assert is_scalar(fill_value)
41
+
42
+ # here, we pass on fill_value as a scalar directly; the expected value
43
+ # returned from maybe_promote is fill_value, potentially upcast to the
44
+ # returned dtype.
45
+ result_dtype, result_fill_value = maybe_promote(dtype, fill_value)
46
+ expected_fill_value = exp_val_for_scalar
47
+
48
+ assert result_dtype == expected_dtype
49
+ _assert_match(result_fill_value, expected_fill_value)
50
+
51
+
52
+ def _assert_match(result_fill_value, expected_fill_value):
53
+ # GH#23982/25425 require the same type in addition to equality/NA-ness
54
+ res_type = type(result_fill_value)
55
+ ex_type = type(expected_fill_value)
56
+
57
+ if hasattr(result_fill_value, "dtype"):
58
+ # Compare types in a way that is robust to platform-specific
59
+ # idiosyncrasies where e.g. sometimes we get "ulonglong" as an alias
60
+ # for "uint64" or "intc" as an alias for "int32"
61
+ assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind
62
+ assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize
63
+ else:
64
+ # On some builds, type comparison fails, e.g. np.int32 != np.int32
65
+ assert res_type == ex_type or res_type.__name__ == ex_type.__name__
66
+
67
+ match_value = result_fill_value == expected_fill_value
68
+ if match_value is pd.NA:
69
+ match_value = False
70
+
71
+ # Note: type check above ensures that we have the _same_ NA value
72
+ # for missing values, None == None (which is checked
73
+ # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT
74
+ match_missing = isna(result_fill_value) and isna(expected_fill_value)
75
+
76
+ assert match_value or match_missing
77
+
78
+
79
+ @pytest.mark.parametrize(
80
+ "dtype, fill_value, expected_dtype",
81
+ [
82
+ # size 8
83
+ ("int8", 1, "int8"),
84
+ ("int8", np.iinfo("int8").max + 1, "int16"),
85
+ ("int8", np.iinfo("int16").max + 1, "int32"),
86
+ ("int8", np.iinfo("int32").max + 1, "int64"),
87
+ ("int8", np.iinfo("int64").max + 1, "object"),
88
+ ("int8", -1, "int8"),
89
+ ("int8", np.iinfo("int8").min - 1, "int16"),
90
+ ("int8", np.iinfo("int16").min - 1, "int32"),
91
+ ("int8", np.iinfo("int32").min - 1, "int64"),
92
+ ("int8", np.iinfo("int64").min - 1, "object"),
93
+ # keep signed-ness as long as possible
94
+ ("uint8", 1, "uint8"),
95
+ ("uint8", np.iinfo("int8").max + 1, "uint8"),
96
+ ("uint8", np.iinfo("uint8").max + 1, "uint16"),
97
+ ("uint8", np.iinfo("int16").max + 1, "uint16"),
98
+ ("uint8", np.iinfo("uint16").max + 1, "uint32"),
99
+ ("uint8", np.iinfo("int32").max + 1, "uint32"),
100
+ ("uint8", np.iinfo("uint32").max + 1, "uint64"),
101
+ ("uint8", np.iinfo("int64").max + 1, "uint64"),
102
+ ("uint8", np.iinfo("uint64").max + 1, "object"),
103
+ # max of uint8 cannot be contained in int8
104
+ ("uint8", -1, "int16"),
105
+ ("uint8", np.iinfo("int8").min - 1, "int16"),
106
+ ("uint8", np.iinfo("int16").min - 1, "int32"),
107
+ ("uint8", np.iinfo("int32").min - 1, "int64"),
108
+ ("uint8", np.iinfo("int64").min - 1, "object"),
109
+ # size 16
110
+ ("int16", 1, "int16"),
111
+ ("int16", np.iinfo("int8").max + 1, "int16"),
112
+ ("int16", np.iinfo("int16").max + 1, "int32"),
113
+ ("int16", np.iinfo("int32").max + 1, "int64"),
114
+ ("int16", np.iinfo("int64").max + 1, "object"),
115
+ ("int16", -1, "int16"),
116
+ ("int16", np.iinfo("int8").min - 1, "int16"),
117
+ ("int16", np.iinfo("int16").min - 1, "int32"),
118
+ ("int16", np.iinfo("int32").min - 1, "int64"),
119
+ ("int16", np.iinfo("int64").min - 1, "object"),
120
+ ("uint16", 1, "uint16"),
121
+ ("uint16", np.iinfo("int8").max + 1, "uint16"),
122
+ ("uint16", np.iinfo("uint8").max + 1, "uint16"),
123
+ ("uint16", np.iinfo("int16").max + 1, "uint16"),
124
+ ("uint16", np.iinfo("uint16").max + 1, "uint32"),
125
+ ("uint16", np.iinfo("int32").max + 1, "uint32"),
126
+ ("uint16", np.iinfo("uint32").max + 1, "uint64"),
127
+ ("uint16", np.iinfo("int64").max + 1, "uint64"),
128
+ ("uint16", np.iinfo("uint64").max + 1, "object"),
129
+ ("uint16", -1, "int32"),
130
+ ("uint16", np.iinfo("int8").min - 1, "int32"),
131
+ ("uint16", np.iinfo("int16").min - 1, "int32"),
132
+ ("uint16", np.iinfo("int32").min - 1, "int64"),
133
+ ("uint16", np.iinfo("int64").min - 1, "object"),
134
+ # size 32
135
+ ("int32", 1, "int32"),
136
+ ("int32", np.iinfo("int8").max + 1, "int32"),
137
+ ("int32", np.iinfo("int16").max + 1, "int32"),
138
+ ("int32", np.iinfo("int32").max + 1, "int64"),
139
+ ("int32", np.iinfo("int64").max + 1, "object"),
140
+ ("int32", -1, "int32"),
141
+ ("int32", np.iinfo("int8").min - 1, "int32"),
142
+ ("int32", np.iinfo("int16").min - 1, "int32"),
143
+ ("int32", np.iinfo("int32").min - 1, "int64"),
144
+ ("int32", np.iinfo("int64").min - 1, "object"),
145
+ ("uint32", 1, "uint32"),
146
+ ("uint32", np.iinfo("int8").max + 1, "uint32"),
147
+ ("uint32", np.iinfo("uint8").max + 1, "uint32"),
148
+ ("uint32", np.iinfo("int16").max + 1, "uint32"),
149
+ ("uint32", np.iinfo("uint16").max + 1, "uint32"),
150
+ ("uint32", np.iinfo("int32").max + 1, "uint32"),
151
+ ("uint32", np.iinfo("uint32").max + 1, "uint64"),
152
+ ("uint32", np.iinfo("int64").max + 1, "uint64"),
153
+ ("uint32", np.iinfo("uint64").max + 1, "object"),
154
+ ("uint32", -1, "int64"),
155
+ ("uint32", np.iinfo("int8").min - 1, "int64"),
156
+ ("uint32", np.iinfo("int16").min - 1, "int64"),
157
+ ("uint32", np.iinfo("int32").min - 1, "int64"),
158
+ ("uint32", np.iinfo("int64").min - 1, "object"),
159
+ # size 64
160
+ ("int64", 1, "int64"),
161
+ ("int64", np.iinfo("int8").max + 1, "int64"),
162
+ ("int64", np.iinfo("int16").max + 1, "int64"),
163
+ ("int64", np.iinfo("int32").max + 1, "int64"),
164
+ ("int64", np.iinfo("int64").max + 1, "object"),
165
+ ("int64", -1, "int64"),
166
+ ("int64", np.iinfo("int8").min - 1, "int64"),
167
+ ("int64", np.iinfo("int16").min - 1, "int64"),
168
+ ("int64", np.iinfo("int32").min - 1, "int64"),
169
+ ("int64", np.iinfo("int64").min - 1, "object"),
170
+ ("uint64", 1, "uint64"),
171
+ ("uint64", np.iinfo("int8").max + 1, "uint64"),
172
+ ("uint64", np.iinfo("uint8").max + 1, "uint64"),
173
+ ("uint64", np.iinfo("int16").max + 1, "uint64"),
174
+ ("uint64", np.iinfo("uint16").max + 1, "uint64"),
175
+ ("uint64", np.iinfo("int32").max + 1, "uint64"),
176
+ ("uint64", np.iinfo("uint32").max + 1, "uint64"),
177
+ ("uint64", np.iinfo("int64").max + 1, "uint64"),
178
+ ("uint64", np.iinfo("uint64").max + 1, "object"),
179
+ ("uint64", -1, "object"),
180
+ ("uint64", np.iinfo("int8").min - 1, "object"),
181
+ ("uint64", np.iinfo("int16").min - 1, "object"),
182
+ ("uint64", np.iinfo("int32").min - 1, "object"),
183
+ ("uint64", np.iinfo("int64").min - 1, "object"),
184
+ ],
185
+ )
186
+ def test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype):
187
+ dtype = np.dtype(dtype)
188
+ expected_dtype = np.dtype(expected_dtype)
189
+
190
+ # output is not a generic int, but corresponds to expected_dtype
191
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
192
+
193
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
194
+
195
+
196
+ def test_maybe_promote_int_with_float(any_int_numpy_dtype, float_numpy_dtype):
197
+ dtype = np.dtype(any_int_numpy_dtype)
198
+ fill_dtype = np.dtype(float_numpy_dtype)
199
+
200
+ # create array of given dtype; casts "1" to correct dtype
201
+ fill_value = np.array([1], dtype=fill_dtype)[0]
202
+
203
+ # filling int with float always upcasts to float64
204
+ expected_dtype = np.float64
205
+ # fill_value can be different float type
206
+ exp_val_for_scalar = np.float64(fill_value)
207
+
208
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
209
+
210
+
211
+ def test_maybe_promote_float_with_int(float_numpy_dtype, any_int_numpy_dtype):
212
+ dtype = np.dtype(float_numpy_dtype)
213
+ fill_dtype = np.dtype(any_int_numpy_dtype)
214
+
215
+ # create array of given dtype; casts "1" to correct dtype
216
+ fill_value = np.array([1], dtype=fill_dtype)[0]
217
+
218
+ # filling float with int always keeps float dtype
219
+ # because: np.finfo('float32').max > np.iinfo('uint64').max
220
+ expected_dtype = dtype
221
+ # output is not a generic float, but corresponds to expected_dtype
222
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
223
+
224
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
225
+
226
+
227
+ @pytest.mark.parametrize(
228
+ "dtype, fill_value, expected_dtype",
229
+ [
230
+ # float filled with float
231
+ ("float32", 1, "float32"),
232
+ ("float32", float(np.finfo("float32").max) * 1.1, "float64"),
233
+ ("float64", 1, "float64"),
234
+ ("float64", float(np.finfo("float32").max) * 1.1, "float64"),
235
+ # complex filled with float
236
+ ("complex64", 1, "complex64"),
237
+ ("complex64", float(np.finfo("float32").max) * 1.1, "complex128"),
238
+ ("complex128", 1, "complex128"),
239
+ ("complex128", float(np.finfo("float32").max) * 1.1, "complex128"),
240
+ # float filled with complex
241
+ ("float32", 1 + 1j, "complex64"),
242
+ ("float32", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),
243
+ ("float64", 1 + 1j, "complex128"),
244
+ ("float64", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),
245
+ # complex filled with complex
246
+ ("complex64", 1 + 1j, "complex64"),
247
+ ("complex64", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),
248
+ ("complex128", 1 + 1j, "complex128"),
249
+ ("complex128", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),
250
+ ],
251
+ )
252
+ def test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype):
253
+ dtype = np.dtype(dtype)
254
+ expected_dtype = np.dtype(expected_dtype)
255
+
256
+ # output is not a generic float, but corresponds to expected_dtype
257
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
258
+
259
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
260
+
261
+
262
+ def test_maybe_promote_bool_with_any(any_numpy_dtype):
263
+ dtype = np.dtype(bool)
264
+ fill_dtype = np.dtype(any_numpy_dtype)
265
+
266
+ # create array of given dtype; casts "1" to correct dtype
267
+ fill_value = np.array([1], dtype=fill_dtype)[0]
268
+
269
+ # filling bool with anything but bool casts to object
270
+ expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype
271
+ exp_val_for_scalar = fill_value
272
+
273
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
274
+
275
+
276
+ def test_maybe_promote_any_with_bool(any_numpy_dtype):
277
+ dtype = np.dtype(any_numpy_dtype)
278
+ fill_value = True
279
+
280
+ # filling anything but bool with bool casts to object
281
+ expected_dtype = np.dtype(object) if dtype != bool else dtype
282
+ # output is not a generic bool, but corresponds to expected_dtype
283
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
284
+
285
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
286
+
287
+
288
+ def test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype):
289
+ dtype = np.dtype(bytes_dtype)
290
+ fill_dtype = np.dtype(any_numpy_dtype)
291
+
292
+ # create array of given dtype; casts "1" to correct dtype
293
+ fill_value = np.array([1], dtype=fill_dtype)[0]
294
+
295
+ # we never use bytes dtype internally, always promote to object
296
+ expected_dtype = np.dtype(np.object_)
297
+ exp_val_for_scalar = fill_value
298
+
299
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
300
+
301
+
302
+ def test_maybe_promote_any_with_bytes(any_numpy_dtype):
303
+ dtype = np.dtype(any_numpy_dtype)
304
+
305
+ # create array of given dtype
306
+ fill_value = b"abc"
307
+
308
+ # we never use bytes dtype internally, always promote to object
309
+ expected_dtype = np.dtype(np.object_)
310
+ # output is not a generic bytes, but corresponds to expected_dtype
311
+ exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]
312
+
313
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
314
+
315
+
316
+ def test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype):
317
+ dtype = np.dtype(datetime64_dtype)
318
+ fill_dtype = np.dtype(any_numpy_dtype)
319
+
320
+ # create array of given dtype; casts "1" to correct dtype
321
+ fill_value = np.array([1], dtype=fill_dtype)[0]
322
+
323
+ # filling datetime with anything but datetime casts to object
324
+ if fill_dtype.kind == "M":
325
+ expected_dtype = dtype
326
+ # for datetime dtypes, scalar values get cast to to_datetime64
327
+ exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
328
+ else:
329
+ expected_dtype = np.dtype(object)
330
+ exp_val_for_scalar = fill_value
331
+
332
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
333
+
334
+
335
+ @pytest.mark.parametrize(
336
+ "fill_value",
337
+ [
338
+ pd.Timestamp("now"),
339
+ np.datetime64("now"),
340
+ datetime.datetime.now(),
341
+ datetime.date.today(),
342
+ ],
343
+ ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
344
+ )
345
+ def test_maybe_promote_any_with_datetime64(any_numpy_dtype, fill_value):
346
+ dtype = np.dtype(any_numpy_dtype)
347
+
348
+ # filling datetime with anything but datetime casts to object
349
+ if dtype.kind == "M":
350
+ expected_dtype = dtype
351
+ # for datetime dtypes, scalar values get cast to pd.Timestamp.value
352
+ exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()
353
+ else:
354
+ expected_dtype = np.dtype(object)
355
+ exp_val_for_scalar = fill_value
356
+
357
+ if type(fill_value) is datetime.date and dtype.kind == "M":
358
+ # Casting date to dt64 is deprecated, in 2.0 enforced to cast to object
359
+ expected_dtype = np.dtype(object)
360
+ exp_val_for_scalar = fill_value
361
+
362
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
363
+
364
+
365
+ @pytest.mark.parametrize(
366
+ "fill_value",
367
+ [
368
+ pd.Timestamp(2023, 1, 1),
369
+ np.datetime64("2023-01-01"),
370
+ datetime.datetime(2023, 1, 1),
371
+ datetime.date(2023, 1, 1),
372
+ ],
373
+ ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],
374
+ )
375
+ def test_maybe_promote_any_numpy_dtype_with_datetimetz(
376
+ any_numpy_dtype, tz_aware_fixture, fill_value
377
+ ):
378
+ dtype = np.dtype(any_numpy_dtype)
379
+ fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)
380
+
381
+ fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]
382
+
383
+ # filling any numpy dtype with datetimetz casts to object
384
+ expected_dtype = np.dtype(object)
385
+ exp_val_for_scalar = fill_value
386
+
387
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
388
+
389
+
390
+ def test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype):
391
+ dtype = np.dtype(timedelta64_dtype)
392
+ fill_dtype = np.dtype(any_numpy_dtype)
393
+
394
+ # create array of given dtype; casts "1" to correct dtype
395
+ fill_value = np.array([1], dtype=fill_dtype)[0]
396
+
397
+ # filling timedelta with anything but timedelta casts to object
398
+ if fill_dtype.kind == "m":
399
+ expected_dtype = dtype
400
+ # for timedelta dtypes, scalar values get cast to pd.Timedelta.value
401
+ exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
402
+ else:
403
+ expected_dtype = np.dtype(object)
404
+ exp_val_for_scalar = fill_value
405
+
406
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
407
+
408
+
409
+ @pytest.mark.parametrize(
410
+ "fill_value",
411
+ [pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)],
412
+ ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"],
413
+ )
414
+ def test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value):
415
+ dtype = np.dtype(any_numpy_dtype)
416
+
417
+ # filling anything but timedelta with timedelta casts to object
418
+ if dtype.kind == "m":
419
+ expected_dtype = dtype
420
+ # for timedelta dtypes, scalar values get cast to pd.Timedelta.value
421
+ exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()
422
+ else:
423
+ expected_dtype = np.dtype(object)
424
+ exp_val_for_scalar = fill_value
425
+
426
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
427
+
428
+
429
+ def test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype):
430
+ dtype = np.dtype(string_dtype)
431
+ fill_dtype = np.dtype(any_numpy_dtype)
432
+
433
+ # create array of given dtype; casts "1" to correct dtype
434
+ fill_value = np.array([1], dtype=fill_dtype)[0]
435
+
436
+ # filling string with anything casts to object
437
+ expected_dtype = np.dtype(object)
438
+ exp_val_for_scalar = fill_value
439
+
440
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
441
+
442
+
443
+ def test_maybe_promote_any_with_string(any_numpy_dtype):
444
+ dtype = np.dtype(any_numpy_dtype)
445
+
446
+ # create array of given dtype
447
+ fill_value = "abc"
448
+
449
+ # filling anything with a string casts to object
450
+ expected_dtype = np.dtype(object)
451
+ exp_val_for_scalar = fill_value
452
+
453
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
454
+
455
+
456
+ def test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype):
457
+ dtype = np.dtype(object_dtype)
458
+ fill_dtype = np.dtype(any_numpy_dtype)
459
+
460
+ # create array of given dtype; casts "1" to correct dtype
461
+ fill_value = np.array([1], dtype=fill_dtype)[0]
462
+
463
+ # filling object with anything stays object
464
+ expected_dtype = np.dtype(object)
465
+ exp_val_for_scalar = fill_value
466
+
467
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
468
+
469
+
470
+ def test_maybe_promote_any_with_object(any_numpy_dtype):
471
+ dtype = np.dtype(any_numpy_dtype)
472
+
473
+ # create array of object dtype from a scalar value (i.e. passing
474
+ # dtypes.common.is_scalar), which can however not be cast to int/float etc.
475
+ fill_value = pd.DateOffset(1)
476
+
477
+ # filling object with anything stays object
478
+ expected_dtype = np.dtype(object)
479
+ exp_val_for_scalar = fill_value
480
+
481
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
482
+
483
+
484
+ def test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype, nulls_fixture):
485
+ fill_value = nulls_fixture
486
+ dtype = np.dtype(any_numpy_dtype)
487
+
488
+ if isinstance(fill_value, Decimal):
489
+ # Subject to change, but ATM (When Decimal(NAN) is being added to nulls_fixture)
490
+ # this is the existing behavior in maybe_promote,
491
+ # hinges on is_valid_na_for_dtype
492
+ if dtype.kind in "iufc":
493
+ if dtype.kind in "iu":
494
+ expected_dtype = np.dtype(np.float64)
495
+ else:
496
+ expected_dtype = dtype
497
+ exp_val_for_scalar = np.nan
498
+ else:
499
+ expected_dtype = np.dtype(object)
500
+ exp_val_for_scalar = fill_value
501
+ elif dtype.kind in "iu" and fill_value is not NaT:
502
+ # integer + other missing value (np.nan / None) casts to float
503
+ expected_dtype = np.float64
504
+ exp_val_for_scalar = np.nan
505
+ elif dtype == object and fill_value is NaT:
506
+ # inserting into object does not cast the value
507
+ # but *does* cast None to np.nan
508
+ expected_dtype = np.dtype(object)
509
+ exp_val_for_scalar = fill_value
510
+ elif dtype.kind in "mM":
511
+ # datetime / timedelta cast all missing values to dtyped-NaT
512
+ expected_dtype = dtype
513
+ exp_val_for_scalar = dtype.type("NaT", "ns")
514
+ elif fill_value is NaT:
515
+ # NaT upcasts everything that's not datetime/timedelta to object
516
+ expected_dtype = np.dtype(object)
517
+ exp_val_for_scalar = NaT
518
+ elif dtype.kind in "fc":
519
+ # float / complex + missing value (!= NaT) stays the same
520
+ expected_dtype = dtype
521
+ exp_val_for_scalar = np.nan
522
+ else:
523
+ # all other cases cast to object, and use np.nan as missing value
524
+ expected_dtype = np.dtype(object)
525
+ if fill_value is pd.NA:
526
+ exp_val_for_scalar = pd.NA
527
+ else:
528
+ exp_val_for_scalar = np.nan
529
+
530
+ _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)
omnilmm/lib/python3.10/site-packages/pandas/tests/dtypes/test_inference.py ADDED
@@ -0,0 +1,2047 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ These the test the public routines exposed in types/common.py
3
+ related to inference and not otherwise tested in types/test_common.py
4
+
5
+ """
6
+ import collections
7
+ from collections import namedtuple
8
+ from collections.abc import Iterator
9
+ from datetime import (
10
+ date,
11
+ datetime,
12
+ time,
13
+ timedelta,
14
+ )
15
+ from decimal import Decimal
16
+ from fractions import Fraction
17
+ from io import StringIO
18
+ import itertools
19
+ from numbers import Number
20
+ import re
21
+ import sys
22
+ from typing import (
23
+ Generic,
24
+ TypeVar,
25
+ )
26
+
27
+ import numpy as np
28
+ import pytest
29
+ import pytz
30
+
31
+ from pandas._libs import (
32
+ lib,
33
+ missing as libmissing,
34
+ ops as libops,
35
+ )
36
+ from pandas.compat.numpy import np_version_gt2
37
+
38
+ from pandas.core.dtypes import inference
39
+ from pandas.core.dtypes.cast import find_result_type
40
+ from pandas.core.dtypes.common import (
41
+ ensure_int32,
42
+ is_bool,
43
+ is_complex,
44
+ is_datetime64_any_dtype,
45
+ is_datetime64_dtype,
46
+ is_datetime64_ns_dtype,
47
+ is_datetime64tz_dtype,
48
+ is_float,
49
+ is_integer,
50
+ is_number,
51
+ is_scalar,
52
+ is_scipy_sparse,
53
+ is_timedelta64_dtype,
54
+ is_timedelta64_ns_dtype,
55
+ )
56
+
57
+ import pandas as pd
58
+ from pandas import (
59
+ Categorical,
60
+ DataFrame,
61
+ DateOffset,
62
+ DatetimeIndex,
63
+ Index,
64
+ Interval,
65
+ Period,
66
+ PeriodIndex,
67
+ Series,
68
+ Timedelta,
69
+ TimedeltaIndex,
70
+ Timestamp,
71
+ )
72
+ import pandas._testing as tm
73
+ from pandas.core.arrays import (
74
+ BooleanArray,
75
+ FloatingArray,
76
+ IntegerArray,
77
+ )
78
+
79
+
80
+ @pytest.fixture(params=[True, False], ids=str)
81
+ def coerce(request):
82
+ return request.param
83
+
84
+
85
+ class MockNumpyLikeArray:
86
+ """
87
+ A class which is numpy-like (e.g. Pint's Quantity) but not actually numpy
88
+
89
+ The key is that it is not actually a numpy array so
90
+ ``util.is_array(mock_numpy_like_array_instance)`` returns ``False``. Other
91
+ important properties are that the class defines a :meth:`__iter__` method
92
+ (so that ``isinstance(abc.Iterable)`` returns ``True``) and has a
93
+ :meth:`ndim` property, as pandas special-cases 0-dimensional arrays in some
94
+ cases.
95
+
96
+ We expect pandas to behave with respect to such duck arrays exactly as
97
+ with real numpy arrays. In particular, a 0-dimensional duck array is *NOT*
98
+ a scalar (`is_scalar(np.array(1)) == False`), but it is not list-like either.
99
+ """
100
+
101
+ def __init__(self, values) -> None:
102
+ self._values = values
103
+
104
+ def __iter__(self) -> Iterator:
105
+ iter_values = iter(self._values)
106
+
107
+ def it_outer():
108
+ yield from iter_values
109
+
110
+ return it_outer()
111
+
112
+ def __len__(self) -> int:
113
+ return len(self._values)
114
+
115
+ def __array__(self, dtype=None, copy=None):
116
+ return np.asarray(self._values, dtype=dtype)
117
+
118
+ @property
119
+ def ndim(self):
120
+ return self._values.ndim
121
+
122
+ @property
123
+ def dtype(self):
124
+ return self._values.dtype
125
+
126
+ @property
127
+ def size(self):
128
+ return self._values.size
129
+
130
+ @property
131
+ def shape(self):
132
+ return self._values.shape
133
+
134
+
135
+ # collect all objects to be tested for list-like-ness; use tuples of objects,
136
+ # whether they are list-like or not (special casing for sets), and their ID
137
+ ll_params = [
138
+ ([1], True, "list"),
139
+ ([], True, "list-empty"),
140
+ ((1,), True, "tuple"),
141
+ ((), True, "tuple-empty"),
142
+ ({"a": 1}, True, "dict"),
143
+ ({}, True, "dict-empty"),
144
+ ({"a", 1}, "set", "set"),
145
+ (set(), "set", "set-empty"),
146
+ (frozenset({"a", 1}), "set", "frozenset"),
147
+ (frozenset(), "set", "frozenset-empty"),
148
+ (iter([1, 2]), True, "iterator"),
149
+ (iter([]), True, "iterator-empty"),
150
+ ((x for x in [1, 2]), True, "generator"),
151
+ ((_ for _ in []), True, "generator-empty"),
152
+ (Series([1]), True, "Series"),
153
+ (Series([], dtype=object), True, "Series-empty"),
154
+ # Series.str will still raise a TypeError if iterated
155
+ (Series(["a"]).str, True, "StringMethods"),
156
+ (Series([], dtype="O").str, True, "StringMethods-empty"),
157
+ (Index([1]), True, "Index"),
158
+ (Index([]), True, "Index-empty"),
159
+ (DataFrame([[1]]), True, "DataFrame"),
160
+ (DataFrame(), True, "DataFrame-empty"),
161
+ (np.ndarray((2,) * 1), True, "ndarray-1d"),
162
+ (np.array([]), True, "ndarray-1d-empty"),
163
+ (np.ndarray((2,) * 2), True, "ndarray-2d"),
164
+ (np.array([[]]), True, "ndarray-2d-empty"),
165
+ (np.ndarray((2,) * 3), True, "ndarray-3d"),
166
+ (np.array([[[]]]), True, "ndarray-3d-empty"),
167
+ (np.ndarray((2,) * 4), True, "ndarray-4d"),
168
+ (np.array([[[[]]]]), True, "ndarray-4d-empty"),
169
+ (np.array(2), False, "ndarray-0d"),
170
+ (MockNumpyLikeArray(np.ndarray((2,) * 1)), True, "duck-ndarray-1d"),
171
+ (MockNumpyLikeArray(np.array([])), True, "duck-ndarray-1d-empty"),
172
+ (MockNumpyLikeArray(np.ndarray((2,) * 2)), True, "duck-ndarray-2d"),
173
+ (MockNumpyLikeArray(np.array([[]])), True, "duck-ndarray-2d-empty"),
174
+ (MockNumpyLikeArray(np.ndarray((2,) * 3)), True, "duck-ndarray-3d"),
175
+ (MockNumpyLikeArray(np.array([[[]]])), True, "duck-ndarray-3d-empty"),
176
+ (MockNumpyLikeArray(np.ndarray((2,) * 4)), True, "duck-ndarray-4d"),
177
+ (MockNumpyLikeArray(np.array([[[[]]]])), True, "duck-ndarray-4d-empty"),
178
+ (MockNumpyLikeArray(np.array(2)), False, "duck-ndarray-0d"),
179
+ (1, False, "int"),
180
+ (b"123", False, "bytes"),
181
+ (b"", False, "bytes-empty"),
182
+ ("123", False, "string"),
183
+ ("", False, "string-empty"),
184
+ (str, False, "string-type"),
185
+ (object(), False, "object"),
186
+ (np.nan, False, "NaN"),
187
+ (None, False, "None"),
188
+ ]
189
+ objs, expected, ids = zip(*ll_params)
190
+
191
+
192
+ @pytest.fixture(params=zip(objs, expected), ids=ids)
193
+ def maybe_list_like(request):
194
+ return request.param
195
+
196
+
197
+ def test_is_list_like(maybe_list_like):
198
+ obj, expected = maybe_list_like
199
+ expected = True if expected == "set" else expected
200
+ assert inference.is_list_like(obj) == expected
201
+
202
+
203
+ def test_is_list_like_disallow_sets(maybe_list_like):
204
+ obj, expected = maybe_list_like
205
+ expected = False if expected == "set" else expected
206
+ assert inference.is_list_like(obj, allow_sets=False) == expected
207
+
208
+
209
+ def test_is_list_like_recursion():
210
+ # GH 33721
211
+ # interpreter would crash with SIGABRT
212
+ def list_like():
213
+ inference.is_list_like([])
214
+ list_like()
215
+
216
+ rec_limit = sys.getrecursionlimit()
217
+ try:
218
+ # Limit to avoid stack overflow on Windows CI
219
+ sys.setrecursionlimit(100)
220
+ with tm.external_error_raised(RecursionError):
221
+ list_like()
222
+ finally:
223
+ sys.setrecursionlimit(rec_limit)
224
+
225
+
226
+ def test_is_list_like_iter_is_none():
227
+ # GH 43373
228
+ # is_list_like was yielding false positives with __iter__ == None
229
+ class NotListLike:
230
+ def __getitem__(self, item):
231
+ return self
232
+
233
+ __iter__ = None
234
+
235
+ assert not inference.is_list_like(NotListLike())
236
+
237
+
238
+ def test_is_list_like_generic():
239
+ # GH 49649
240
+ # is_list_like was yielding false positives for Generic classes in python 3.11
241
+ T = TypeVar("T")
242
+
243
+ class MyDataFrame(DataFrame, Generic[T]):
244
+ ...
245
+
246
+ tstc = MyDataFrame[int]
247
+ tst = MyDataFrame[int]({"x": [1, 2, 3]})
248
+
249
+ assert not inference.is_list_like(tstc)
250
+ assert isinstance(tst, DataFrame)
251
+ assert inference.is_list_like(tst)
252
+
253
+
254
+ def test_is_sequence():
255
+ is_seq = inference.is_sequence
256
+ assert is_seq((1, 2))
257
+ assert is_seq([1, 2])
258
+ assert not is_seq("abcd")
259
+ assert not is_seq(np.int64)
260
+
261
+ class A:
262
+ def __getitem__(self, item):
263
+ return 1
264
+
265
+ assert not is_seq(A())
266
+
267
+
268
+ def test_is_array_like():
269
+ assert inference.is_array_like(Series([], dtype=object))
270
+ assert inference.is_array_like(Series([1, 2]))
271
+ assert inference.is_array_like(np.array(["a", "b"]))
272
+ assert inference.is_array_like(Index(["2016-01-01"]))
273
+ assert inference.is_array_like(np.array([2, 3]))
274
+ assert inference.is_array_like(MockNumpyLikeArray(np.array([2, 3])))
275
+
276
+ class DtypeList(list):
277
+ dtype = "special"
278
+
279
+ assert inference.is_array_like(DtypeList())
280
+
281
+ assert not inference.is_array_like([1, 2, 3])
282
+ assert not inference.is_array_like(())
283
+ assert not inference.is_array_like("foo")
284
+ assert not inference.is_array_like(123)
285
+
286
+
287
+ @pytest.mark.parametrize(
288
+ "inner",
289
+ [
290
+ [],
291
+ [1],
292
+ (1,),
293
+ (1, 2),
294
+ {"a": 1},
295
+ {1, "a"},
296
+ Series([1]),
297
+ Series([], dtype=object),
298
+ Series(["a"]).str,
299
+ (x for x in range(5)),
300
+ ],
301
+ )
302
+ @pytest.mark.parametrize("outer", [list, Series, np.array, tuple])
303
+ def test_is_nested_list_like_passes(inner, outer):
304
+ result = outer([inner for _ in range(5)])
305
+ assert inference.is_list_like(result)
306
+
307
+
308
+ @pytest.mark.parametrize(
309
+ "obj",
310
+ [
311
+ "abc",
312
+ [],
313
+ [1],
314
+ (1,),
315
+ ["a"],
316
+ "a",
317
+ {"a"},
318
+ [1, 2, 3],
319
+ Series([1]),
320
+ DataFrame({"A": [1]}),
321
+ ([1, 2] for _ in range(5)),
322
+ ],
323
+ )
324
+ def test_is_nested_list_like_fails(obj):
325
+ assert not inference.is_nested_list_like(obj)
326
+
327
+
328
+ @pytest.mark.parametrize("ll", [{}, {"A": 1}, Series([1]), collections.defaultdict()])
329
+ def test_is_dict_like_passes(ll):
330
+ assert inference.is_dict_like(ll)
331
+
332
+
333
+ @pytest.mark.parametrize(
334
+ "ll",
335
+ [
336
+ "1",
337
+ 1,
338
+ [1, 2],
339
+ (1, 2),
340
+ range(2),
341
+ Index([1]),
342
+ dict,
343
+ collections.defaultdict,
344
+ Series,
345
+ ],
346
+ )
347
+ def test_is_dict_like_fails(ll):
348
+ assert not inference.is_dict_like(ll)
349
+
350
+
351
+ @pytest.mark.parametrize("has_keys", [True, False])
352
+ @pytest.mark.parametrize("has_getitem", [True, False])
353
+ @pytest.mark.parametrize("has_contains", [True, False])
354
+ def test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):
355
+ class DictLike:
356
+ def __init__(self, d) -> None:
357
+ self.d = d
358
+
359
+ if has_keys:
360
+
361
+ def keys(self):
362
+ return self.d.keys()
363
+
364
+ if has_getitem:
365
+
366
+ def __getitem__(self, key):
367
+ return self.d.__getitem__(key)
368
+
369
+ if has_contains:
370
+
371
+ def __contains__(self, key) -> bool:
372
+ return self.d.__contains__(key)
373
+
374
+ d = DictLike({1: 2})
375
+ result = inference.is_dict_like(d)
376
+ expected = has_keys and has_getitem and has_contains
377
+
378
+ assert result is expected
379
+
380
+
381
+ def test_is_file_like():
382
+ class MockFile:
383
+ pass
384
+
385
+ is_file = inference.is_file_like
386
+
387
+ data = StringIO("data")
388
+ assert is_file(data)
389
+
390
+ # No read / write attributes
391
+ # No iterator attributes
392
+ m = MockFile()
393
+ assert not is_file(m)
394
+
395
+ MockFile.write = lambda self: 0
396
+
397
+ # Write attribute but not an iterator
398
+ m = MockFile()
399
+ assert not is_file(m)
400
+
401
+ # gh-16530: Valid iterator just means we have the
402
+ # __iter__ attribute for our purposes.
403
+ MockFile.__iter__ = lambda self: self
404
+
405
+ # Valid write-only file
406
+ m = MockFile()
407
+ assert is_file(m)
408
+
409
+ del MockFile.write
410
+ MockFile.read = lambda self: 0
411
+
412
+ # Valid read-only file
413
+ m = MockFile()
414
+ assert is_file(m)
415
+
416
+ # Iterator but no read / write attributes
417
+ data = [1, 2, 3]
418
+ assert not is_file(data)
419
+
420
+
421
+ test_tuple = collections.namedtuple("test_tuple", ["a", "b", "c"])
422
+
423
+
424
+ @pytest.mark.parametrize("ll", [test_tuple(1, 2, 3)])
425
+ def test_is_names_tuple_passes(ll):
426
+ assert inference.is_named_tuple(ll)
427
+
428
+
429
+ @pytest.mark.parametrize("ll", [(1, 2, 3), "a", Series({"pi": 3.14})])
430
+ def test_is_names_tuple_fails(ll):
431
+ assert not inference.is_named_tuple(ll)
432
+
433
+
434
+ def test_is_hashable():
435
+ # all new-style classes are hashable by default
436
+ class HashableClass:
437
+ pass
438
+
439
+ class UnhashableClass1:
440
+ __hash__ = None
441
+
442
+ class UnhashableClass2:
443
+ def __hash__(self):
444
+ raise TypeError("Not hashable")
445
+
446
+ hashable = (1, 3.14, np.float64(3.14), "a", (), (1,), HashableClass())
447
+ not_hashable = ([], UnhashableClass1())
448
+ abc_hashable_not_really_hashable = (([],), UnhashableClass2())
449
+
450
+ for i in hashable:
451
+ assert inference.is_hashable(i)
452
+ for i in not_hashable:
453
+ assert not inference.is_hashable(i)
454
+ for i in abc_hashable_not_really_hashable:
455
+ assert not inference.is_hashable(i)
456
+
457
+ # numpy.array is no longer collections.abc.Hashable as of
458
+ # https://github.com/numpy/numpy/pull/5326, just test
459
+ # is_hashable()
460
+ assert not inference.is_hashable(np.array([]))
461
+
462
+
463
+ @pytest.mark.parametrize("ll", [re.compile("ad")])
464
+ def test_is_re_passes(ll):
465
+ assert inference.is_re(ll)
466
+
467
+
468
+ @pytest.mark.parametrize("ll", ["x", 2, 3, object()])
469
+ def test_is_re_fails(ll):
470
+ assert not inference.is_re(ll)
471
+
472
+
473
+ @pytest.mark.parametrize(
474
+ "ll", [r"a", "x", r"asdf", re.compile("adsf"), r"\u2233\s*", re.compile(r"")]
475
+ )
476
+ def test_is_recompilable_passes(ll):
477
+ assert inference.is_re_compilable(ll)
478
+
479
+
480
+ @pytest.mark.parametrize("ll", [1, [], object()])
481
+ def test_is_recompilable_fails(ll):
482
+ assert not inference.is_re_compilable(ll)
483
+
484
+
485
+ class TestInference:
486
+ @pytest.mark.parametrize(
487
+ "arr",
488
+ [
489
+ np.array(list("abc"), dtype="S1"),
490
+ np.array(list("abc"), dtype="S1").astype(object),
491
+ [b"a", np.nan, b"c"],
492
+ ],
493
+ )
494
+ def test_infer_dtype_bytes(self, arr):
495
+ result = lib.infer_dtype(arr, skipna=True)
496
+ assert result == "bytes"
497
+
498
+ @pytest.mark.parametrize(
499
+ "value, expected",
500
+ [
501
+ (float("inf"), True),
502
+ (np.inf, True),
503
+ (-np.inf, False),
504
+ (1, False),
505
+ ("a", False),
506
+ ],
507
+ )
508
+ def test_isposinf_scalar(self, value, expected):
509
+ # GH 11352
510
+ result = libmissing.isposinf_scalar(value)
511
+ assert result is expected
512
+
513
+ @pytest.mark.parametrize(
514
+ "value, expected",
515
+ [
516
+ (float("-inf"), True),
517
+ (-np.inf, True),
518
+ (np.inf, False),
519
+ (1, False),
520
+ ("a", False),
521
+ ],
522
+ )
523
+ def test_isneginf_scalar(self, value, expected):
524
+ result = libmissing.isneginf_scalar(value)
525
+ assert result is expected
526
+
527
+ @pytest.mark.parametrize(
528
+ "convert_to_masked_nullable, exp",
529
+ [
530
+ (
531
+ True,
532
+ BooleanArray(
533
+ np.array([True, False], dtype="bool"), np.array([False, True])
534
+ ),
535
+ ),
536
+ (False, np.array([True, np.nan], dtype="object")),
537
+ ],
538
+ )
539
+ def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):
540
+ # GH 40687
541
+ arr = np.array([True, np.nan], dtype=object)
542
+ result = libops.maybe_convert_bool(
543
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
544
+ )
545
+ if convert_to_masked_nullable:
546
+ tm.assert_extension_array_equal(BooleanArray(*result), exp)
547
+ else:
548
+ result = result[0]
549
+ tm.assert_numpy_array_equal(result, exp)
550
+
551
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
552
+ @pytest.mark.parametrize("coerce_numeric", [True, False])
553
+ @pytest.mark.parametrize(
554
+ "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]
555
+ )
556
+ @pytest.mark.parametrize("prefix", ["", "-", "+"])
557
+ def test_maybe_convert_numeric_infinities(
558
+ self, coerce_numeric, infinity, prefix, convert_to_masked_nullable
559
+ ):
560
+ # see gh-13274
561
+ result, _ = lib.maybe_convert_numeric(
562
+ np.array([prefix + infinity], dtype=object),
563
+ na_values={"", "NULL", "nan"},
564
+ coerce_numeric=coerce_numeric,
565
+ convert_to_masked_nullable=convert_to_masked_nullable,
566
+ )
567
+ expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])
568
+ tm.assert_numpy_array_equal(result, expected)
569
+
570
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
571
+ def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):
572
+ msg = "Unable to parse string"
573
+ with pytest.raises(ValueError, match=msg):
574
+ lib.maybe_convert_numeric(
575
+ np.array(["foo_inf"], dtype=object),
576
+ na_values={"", "NULL", "nan"},
577
+ coerce_numeric=False,
578
+ convert_to_masked_nullable=convert_to_masked_nullable,
579
+ )
580
+
581
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
582
+ def test_maybe_convert_numeric_post_floatify_nan(
583
+ self, coerce, convert_to_masked_nullable
584
+ ):
585
+ # see gh-13314
586
+ data = np.array(["1.200", "-999.000", "4.500"], dtype=object)
587
+ expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)
588
+ nan_values = {-999, -999.0}
589
+
590
+ out = lib.maybe_convert_numeric(
591
+ data,
592
+ nan_values,
593
+ coerce,
594
+ convert_to_masked_nullable=convert_to_masked_nullable,
595
+ )
596
+ if convert_to_masked_nullable:
597
+ expected = FloatingArray(expected, np.isnan(expected))
598
+ tm.assert_extension_array_equal(expected, FloatingArray(*out))
599
+ else:
600
+ out = out[0]
601
+ tm.assert_numpy_array_equal(out, expected)
602
+
603
+ def test_convert_infs(self):
604
+ arr = np.array(["inf", "inf", "inf"], dtype="O")
605
+ result, _ = lib.maybe_convert_numeric(arr, set(), False)
606
+ assert result.dtype == np.float64
607
+
608
+ arr = np.array(["-inf", "-inf", "-inf"], dtype="O")
609
+ result, _ = lib.maybe_convert_numeric(arr, set(), False)
610
+ assert result.dtype == np.float64
611
+
612
+ def test_scientific_no_exponent(self):
613
+ # See PR 12215
614
+ arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")
615
+ result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
616
+ assert np.all(np.isnan(result))
617
+
618
+ def test_convert_non_hashable(self):
619
+ # GH13324
620
+ # make sure that we are handing non-hashables
621
+ arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)
622
+ result, _ = lib.maybe_convert_numeric(arr, set(), False, True)
623
+ tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))
624
+
625
+ def test_convert_numeric_uint64(self):
626
+ arr = np.array([2**63], dtype=object)
627
+ exp = np.array([2**63], dtype=np.uint64)
628
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
629
+
630
+ arr = np.array([str(2**63)], dtype=object)
631
+ exp = np.array([2**63], dtype=np.uint64)
632
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
633
+
634
+ arr = np.array([np.uint64(2**63)], dtype=object)
635
+ exp = np.array([2**63], dtype=np.uint64)
636
+ tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)
637
+
638
+ @pytest.mark.parametrize(
639
+ "arr",
640
+ [
641
+ np.array([2**63, np.nan], dtype=object),
642
+ np.array([str(2**63), np.nan], dtype=object),
643
+ np.array([np.nan, 2**63], dtype=object),
644
+ np.array([np.nan, str(2**63)], dtype=object),
645
+ ],
646
+ )
647
+ def test_convert_numeric_uint64_nan(self, coerce, arr):
648
+ expected = arr.astype(float) if coerce else arr.copy()
649
+ result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)
650
+ tm.assert_almost_equal(result, expected)
651
+
652
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
653
+ def test_convert_numeric_uint64_nan_values(
654
+ self, coerce, convert_to_masked_nullable
655
+ ):
656
+ arr = np.array([2**63, 2**63 + 1], dtype=object)
657
+ na_values = {2**63}
658
+
659
+ expected = (
660
+ np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy()
661
+ )
662
+ result = lib.maybe_convert_numeric(
663
+ arr,
664
+ na_values,
665
+ coerce_numeric=coerce,
666
+ convert_to_masked_nullable=convert_to_masked_nullable,
667
+ )
668
+ if convert_to_masked_nullable and coerce:
669
+ expected = IntegerArray(
670
+ np.array([0, 2**63 + 1], dtype="u8"),
671
+ np.array([True, False], dtype="bool"),
672
+ )
673
+ result = IntegerArray(*result)
674
+ else:
675
+ result = result[0] # discard mask
676
+ tm.assert_almost_equal(result, expected)
677
+
678
+ @pytest.mark.parametrize(
679
+ "case",
680
+ [
681
+ np.array([2**63, -1], dtype=object),
682
+ np.array([str(2**63), -1], dtype=object),
683
+ np.array([str(2**63), str(-1)], dtype=object),
684
+ np.array([-1, 2**63], dtype=object),
685
+ np.array([-1, str(2**63)], dtype=object),
686
+ np.array([str(-1), str(2**63)], dtype=object),
687
+ ],
688
+ )
689
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
690
+ def test_convert_numeric_int64_uint64(
691
+ self, case, coerce, convert_to_masked_nullable
692
+ ):
693
+ expected = case.astype(float) if coerce else case.copy()
694
+ result, _ = lib.maybe_convert_numeric(
695
+ case,
696
+ set(),
697
+ coerce_numeric=coerce,
698
+ convert_to_masked_nullable=convert_to_masked_nullable,
699
+ )
700
+
701
+ tm.assert_almost_equal(result, expected)
702
+
703
+ @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])
704
+ def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):
705
+ # GH32394
706
+ result = lib.maybe_convert_numeric(
707
+ np.array(["uint64"], dtype=object),
708
+ set(),
709
+ coerce_numeric=True,
710
+ convert_to_masked_nullable=convert_to_masked_nullable,
711
+ )
712
+ if convert_to_masked_nullable:
713
+ result = FloatingArray(*result)
714
+ else:
715
+ result = result[0]
716
+ assert np.isnan(result)
717
+
718
+ @pytest.mark.parametrize("value", [-(2**63) - 1, 2**64])
719
+ def test_convert_int_overflow(self, value):
720
+ # see gh-18584
721
+ arr = np.array([value], dtype=object)
722
+ result = lib.maybe_convert_objects(arr)
723
+ tm.assert_numpy_array_equal(arr, result)
724
+
725
+ @pytest.mark.parametrize("val", [None, np.nan, float("nan")])
726
+ @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])
727
+ def test_maybe_convert_objects_nat_inference(self, val, dtype):
728
+ dtype = np.dtype(dtype)
729
+ vals = np.array([pd.NaT, val], dtype=object)
730
+ result = lib.maybe_convert_objects(
731
+ vals,
732
+ convert_non_numeric=True,
733
+ dtype_if_all_nat=dtype,
734
+ )
735
+ assert result.dtype == dtype
736
+ assert np.isnat(result).all()
737
+
738
+ result = lib.maybe_convert_objects(
739
+ vals[::-1],
740
+ convert_non_numeric=True,
741
+ dtype_if_all_nat=dtype,
742
+ )
743
+ assert result.dtype == dtype
744
+ assert np.isnat(result).all()
745
+
746
+ @pytest.mark.parametrize(
747
+ "value, expected_dtype",
748
+ [
749
+ # see gh-4471
750
+ ([2**63], np.uint64),
751
+ # NumPy bug: can't compare uint64 to int64, as that
752
+ # results in both casting to float64, so we should
753
+ # make sure that this function is robust against it
754
+ ([np.uint64(2**63)], np.uint64),
755
+ ([2, -1], np.int64),
756
+ ([2**63, -1], object),
757
+ # GH#47294
758
+ ([np.uint8(1)], np.uint8),
759
+ ([np.uint16(1)], np.uint16),
760
+ ([np.uint32(1)], np.uint32),
761
+ ([np.uint64(1)], np.uint64),
762
+ ([np.uint8(2), np.uint16(1)], np.uint16),
763
+ ([np.uint32(2), np.uint16(1)], np.uint32),
764
+ ([np.uint32(2), -1], object),
765
+ ([np.uint32(2), 1], np.uint64),
766
+ ([np.uint32(2), np.int32(1)], object),
767
+ ],
768
+ )
769
+ def test_maybe_convert_objects_uint(self, value, expected_dtype):
770
+ arr = np.array(value, dtype=object)
771
+ exp = np.array(value, dtype=expected_dtype)
772
+ tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)
773
+
774
+ def test_maybe_convert_objects_datetime(self):
775
+ # GH27438
776
+ arr = np.array(
777
+ [np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object
778
+ )
779
+ exp = arr.copy()
780
+ out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
781
+ tm.assert_numpy_array_equal(out, exp)
782
+
783
+ arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)
784
+ exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")
785
+ out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
786
+ tm.assert_numpy_array_equal(out, exp)
787
+
788
+ # with convert_non_numeric=True, the nan is a valid NA value for td64
789
+ arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)
790
+ exp = exp[::-1]
791
+ out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
792
+ tm.assert_numpy_array_equal(out, exp)
793
+
794
+ def test_maybe_convert_objects_dtype_if_all_nat(self):
795
+ arr = np.array([pd.NaT, pd.NaT], dtype=object)
796
+ out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
797
+ # no dtype_if_all_nat passed -> we dont guess
798
+ tm.assert_numpy_array_equal(out, arr)
799
+
800
+ out = lib.maybe_convert_objects(
801
+ arr,
802
+ convert_non_numeric=True,
803
+ dtype_if_all_nat=np.dtype("timedelta64[ns]"),
804
+ )
805
+ exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]")
806
+ tm.assert_numpy_array_equal(out, exp)
807
+
808
+ out = lib.maybe_convert_objects(
809
+ arr,
810
+ convert_non_numeric=True,
811
+ dtype_if_all_nat=np.dtype("datetime64[ns]"),
812
+ )
813
+ exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]")
814
+ tm.assert_numpy_array_equal(out, exp)
815
+
816
+ def test_maybe_convert_objects_dtype_if_all_nat_invalid(self):
817
+ # we accept datetime64[ns], timedelta64[ns], and EADtype
818
+ arr = np.array([pd.NaT, pd.NaT], dtype=object)
819
+
820
+ with pytest.raises(ValueError, match="int64"):
821
+ lib.maybe_convert_objects(
822
+ arr,
823
+ convert_non_numeric=True,
824
+ dtype_if_all_nat=np.dtype("int64"),
825
+ )
826
+
827
+ @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
828
+ def test_maybe_convert_objects_datetime_overflow_safe(self, dtype):
829
+ stamp = datetime(2363, 10, 4) # Enterprise-D launch date
830
+ if dtype == "timedelta64[ns]":
831
+ stamp = stamp - datetime(1970, 1, 1)
832
+ arr = np.array([stamp], dtype=object)
833
+
834
+ out = lib.maybe_convert_objects(arr, convert_non_numeric=True)
835
+ # no OutOfBoundsDatetime/OutOfBoundsTimedeltas
836
+ tm.assert_numpy_array_equal(out, arr)
837
+
838
+ def test_maybe_convert_objects_mixed_datetimes(self):
839
+ ts = Timestamp("now")
840
+ vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None]
841
+
842
+ for data in itertools.permutations(vals):
843
+ data = np.array(list(data), dtype=object)
844
+ expected = DatetimeIndex(data)._data._ndarray
845
+ result = lib.maybe_convert_objects(data, convert_non_numeric=True)
846
+ tm.assert_numpy_array_equal(result, expected)
847
+
848
+ def test_maybe_convert_objects_timedelta64_nat(self):
849
+ obj = np.timedelta64("NaT", "ns")
850
+ arr = np.array([obj], dtype=object)
851
+ assert arr[0] is obj
852
+
853
+ result = lib.maybe_convert_objects(arr, convert_non_numeric=True)
854
+
855
+ expected = np.array([obj], dtype="m8[ns]")
856
+ tm.assert_numpy_array_equal(result, expected)
857
+
858
+ @pytest.mark.parametrize(
859
+ "exp",
860
+ [
861
+ IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])),
862
+ IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])),
863
+ ],
864
+ )
865
+ def test_maybe_convert_objects_nullable_integer(self, exp):
866
+ # GH27335
867
+ arr = np.array([2, np.nan], dtype=object)
868
+ result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
869
+
870
+ tm.assert_extension_array_equal(result, exp)
871
+
872
+ @pytest.mark.parametrize(
873
+ "dtype, val", [("int64", 1), ("uint64", np.iinfo(np.int64).max + 1)]
874
+ )
875
+ def test_maybe_convert_objects_nullable_none(self, dtype, val):
876
+ # GH#50043
877
+ arr = np.array([val, None, 3], dtype="object")
878
+ result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
879
+ expected = IntegerArray(
880
+ np.array([val, 0, 3], dtype=dtype), np.array([False, True, False])
881
+ )
882
+ tm.assert_extension_array_equal(result, expected)
883
+
884
+ @pytest.mark.parametrize(
885
+ "convert_to_masked_nullable, exp",
886
+ [
887
+ (True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))),
888
+ (False, np.array([2, np.nan], dtype="float64")),
889
+ ],
890
+ )
891
+ def test_maybe_convert_numeric_nullable_integer(
892
+ self, convert_to_masked_nullable, exp
893
+ ):
894
+ # GH 40687
895
+ arr = np.array([2, np.nan], dtype=object)
896
+ result = lib.maybe_convert_numeric(
897
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
898
+ )
899
+ if convert_to_masked_nullable:
900
+ result = IntegerArray(*result)
901
+ tm.assert_extension_array_equal(result, exp)
902
+ else:
903
+ result = result[0]
904
+ tm.assert_numpy_array_equal(result, exp)
905
+
906
+ @pytest.mark.parametrize(
907
+ "convert_to_masked_nullable, exp",
908
+ [
909
+ (
910
+ True,
911
+ FloatingArray(
912
+ np.array([2.0, 0.0], dtype="float64"), np.array([False, True])
913
+ ),
914
+ ),
915
+ (False, np.array([2.0, np.nan], dtype="float64")),
916
+ ],
917
+ )
918
+ def test_maybe_convert_numeric_floating_array(
919
+ self, convert_to_masked_nullable, exp
920
+ ):
921
+ # GH 40687
922
+ arr = np.array([2.0, np.nan], dtype=object)
923
+ result = lib.maybe_convert_numeric(
924
+ arr, set(), convert_to_masked_nullable=convert_to_masked_nullable
925
+ )
926
+ if convert_to_masked_nullable:
927
+ tm.assert_extension_array_equal(FloatingArray(*result), exp)
928
+ else:
929
+ result = result[0]
930
+ tm.assert_numpy_array_equal(result, exp)
931
+
932
+ def test_maybe_convert_objects_bool_nan(self):
933
+ # GH32146
934
+ ind = Index([True, False, np.nan], dtype=object)
935
+ exp = np.array([True, False, np.nan], dtype=object)
936
+ out = lib.maybe_convert_objects(ind.values, safe=1)
937
+ tm.assert_numpy_array_equal(out, exp)
938
+
939
+ def test_maybe_convert_objects_nullable_boolean(self):
940
+ # GH50047
941
+ arr = np.array([True, False], dtype=object)
942
+ exp = np.array([True, False])
943
+ out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
944
+ tm.assert_numpy_array_equal(out, exp)
945
+
946
+ arr = np.array([True, False, pd.NaT], dtype=object)
947
+ exp = np.array([True, False, pd.NaT], dtype=object)
948
+ out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
949
+ tm.assert_numpy_array_equal(out, exp)
950
+
951
+ @pytest.mark.parametrize("val", [None, np.nan])
952
+ def test_maybe_convert_objects_nullable_boolean_na(self, val):
953
+ # GH50047
954
+ arr = np.array([True, False, val], dtype=object)
955
+ exp = BooleanArray(
956
+ np.array([True, False, False]), np.array([False, False, True])
957
+ )
958
+ out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)
959
+ tm.assert_extension_array_equal(out, exp)
960
+
961
+ @pytest.mark.parametrize(
962
+ "data0",
963
+ [
964
+ True,
965
+ 1,
966
+ 1.0,
967
+ 1.0 + 1.0j,
968
+ np.int8(1),
969
+ np.int16(1),
970
+ np.int32(1),
971
+ np.int64(1),
972
+ np.float16(1),
973
+ np.float32(1),
974
+ np.float64(1),
975
+ np.complex64(1),
976
+ np.complex128(1),
977
+ ],
978
+ )
979
+ @pytest.mark.parametrize(
980
+ "data1",
981
+ [
982
+ True,
983
+ 1,
984
+ 1.0,
985
+ 1.0 + 1.0j,
986
+ np.int8(1),
987
+ np.int16(1),
988
+ np.int32(1),
989
+ np.int64(1),
990
+ np.float16(1),
991
+ np.float32(1),
992
+ np.float64(1),
993
+ np.complex64(1),
994
+ np.complex128(1),
995
+ ],
996
+ )
997
+ def test_maybe_convert_objects_itemsize(self, data0, data1):
998
+ # GH 40908
999
+ data = [data0, data1]
1000
+ arr = np.array(data, dtype="object")
1001
+
1002
+ common_kind = np.result_type(type(data0), type(data1)).kind
1003
+ kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind
1004
+ kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind
1005
+ if kind0 != "python" and kind1 != "python":
1006
+ kind = common_kind
1007
+ itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)
1008
+ elif is_bool(data0) or is_bool(data1):
1009
+ kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object"
1010
+ itemsize = ""
1011
+ elif is_complex(data0) or is_complex(data1):
1012
+ kind = common_kind
1013
+ itemsize = 16
1014
+ else:
1015
+ kind = common_kind
1016
+ itemsize = 8
1017
+
1018
+ expected = np.array(data, dtype=f"{kind}{itemsize}")
1019
+ result = lib.maybe_convert_objects(arr)
1020
+ tm.assert_numpy_array_equal(result, expected)
1021
+
1022
+ def test_mixed_dtypes_remain_object_array(self):
1023
+ # GH14956
1024
+ arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)
1025
+ result = lib.maybe_convert_objects(arr, convert_non_numeric=True)
1026
+ tm.assert_numpy_array_equal(result, arr)
1027
+
1028
+ @pytest.mark.parametrize(
1029
+ "idx",
1030
+ [
1031
+ pd.IntervalIndex.from_breaks(range(5), closed="both"),
1032
+ pd.period_range("2016-01-01", periods=3, freq="D"),
1033
+ ],
1034
+ )
1035
+ def test_maybe_convert_objects_ea(self, idx):
1036
+ result = lib.maybe_convert_objects(
1037
+ np.array(idx, dtype=object),
1038
+ convert_non_numeric=True,
1039
+ )
1040
+ tm.assert_extension_array_equal(result, idx._data)
1041
+
1042
+
1043
+ class TestTypeInference:
1044
+ # Dummy class used for testing with Python objects
1045
+ class Dummy:
1046
+ pass
1047
+
1048
+ def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):
1049
+ # see pandas/conftest.py
1050
+ inferred_dtype, values = any_skipna_inferred_dtype
1051
+
1052
+ # make sure the inferred dtype of the fixture is as requested
1053
+ assert inferred_dtype == lib.infer_dtype(values, skipna=True)
1054
+
1055
+ @pytest.mark.parametrize("skipna", [True, False])
1056
+ def test_length_zero(self, skipna):
1057
+ result = lib.infer_dtype(np.array([], dtype="i4"), skipna=skipna)
1058
+ assert result == "integer"
1059
+
1060
+ result = lib.infer_dtype([], skipna=skipna)
1061
+ assert result == "empty"
1062
+
1063
+ # GH 18004
1064
+ arr = np.array([np.array([], dtype=object), np.array([], dtype=object)])
1065
+ result = lib.infer_dtype(arr, skipna=skipna)
1066
+ assert result == "empty"
1067
+
1068
+ def test_integers(self):
1069
+ arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype="O")
1070
+ result = lib.infer_dtype(arr, skipna=True)
1071
+ assert result == "integer"
1072
+
1073
+ arr = np.array([1, 2, 3, np.int64(4), np.int32(5), "foo"], dtype="O")
1074
+ result = lib.infer_dtype(arr, skipna=True)
1075
+ assert result == "mixed-integer"
1076
+
1077
+ arr = np.array([1, 2, 3, 4, 5], dtype="i4")
1078
+ result = lib.infer_dtype(arr, skipna=True)
1079
+ assert result == "integer"
1080
+
1081
+ @pytest.mark.parametrize(
1082
+ "arr, skipna",
1083
+ [
1084
+ (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False),
1085
+ (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True),
1086
+ (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False),
1087
+ (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True),
1088
+ ],
1089
+ )
1090
+ def test_integer_na(self, arr, skipna):
1091
+ # GH 27392
1092
+ result = lib.infer_dtype(arr, skipna=skipna)
1093
+ expected = "integer" if skipna else "integer-na"
1094
+ assert result == expected
1095
+
1096
+ def test_infer_dtype_skipna_default(self):
1097
+ # infer_dtype `skipna` default deprecated in GH#24050,
1098
+ # changed to True in GH#29876
1099
+ arr = np.array([1, 2, 3, np.nan], dtype=object)
1100
+
1101
+ result = lib.infer_dtype(arr)
1102
+ assert result == "integer"
1103
+
1104
+ def test_bools(self):
1105
+ arr = np.array([True, False, True, True, True], dtype="O")
1106
+ result = lib.infer_dtype(arr, skipna=True)
1107
+ assert result == "boolean"
1108
+
1109
+ arr = np.array([np.bool_(True), np.bool_(False)], dtype="O")
1110
+ result = lib.infer_dtype(arr, skipna=True)
1111
+ assert result == "boolean"
1112
+
1113
+ arr = np.array([True, False, True, "foo"], dtype="O")
1114
+ result = lib.infer_dtype(arr, skipna=True)
1115
+ assert result == "mixed"
1116
+
1117
+ arr = np.array([True, False, True], dtype=bool)
1118
+ result = lib.infer_dtype(arr, skipna=True)
1119
+ assert result == "boolean"
1120
+
1121
+ arr = np.array([True, np.nan, False], dtype="O")
1122
+ result = lib.infer_dtype(arr, skipna=True)
1123
+ assert result == "boolean"
1124
+
1125
+ result = lib.infer_dtype(arr, skipna=False)
1126
+ assert result == "mixed"
1127
+
1128
+ def test_floats(self):
1129
+ arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype="O")
1130
+ result = lib.infer_dtype(arr, skipna=True)
1131
+ assert result == "floating"
1132
+
1133
+ arr = np.array([1, 2, 3, np.float64(4), np.float32(5), "foo"], dtype="O")
1134
+ result = lib.infer_dtype(arr, skipna=True)
1135
+ assert result == "mixed-integer"
1136
+
1137
+ arr = np.array([1, 2, 3, 4, 5], dtype="f4")
1138
+ result = lib.infer_dtype(arr, skipna=True)
1139
+ assert result == "floating"
1140
+
1141
+ arr = np.array([1, 2, 3, 4, 5], dtype="f8")
1142
+ result = lib.infer_dtype(arr, skipna=True)
1143
+ assert result == "floating"
1144
+
1145
+ def test_decimals(self):
1146
+ # GH15690
1147
+ arr = np.array([Decimal(1), Decimal(2), Decimal(3)])
1148
+ result = lib.infer_dtype(arr, skipna=True)
1149
+ assert result == "decimal"
1150
+
1151
+ arr = np.array([1.0, 2.0, Decimal(3)])
1152
+ result = lib.infer_dtype(arr, skipna=True)
1153
+ assert result == "mixed"
1154
+
1155
+ result = lib.infer_dtype(arr[::-1], skipna=True)
1156
+ assert result == "mixed"
1157
+
1158
+ arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)])
1159
+ result = lib.infer_dtype(arr, skipna=True)
1160
+ assert result == "decimal"
1161
+
1162
+ arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype="O")
1163
+ result = lib.infer_dtype(arr, skipna=True)
1164
+ assert result == "decimal"
1165
+
1166
+ # complex is compatible with nan, so skipna has no effect
1167
+ @pytest.mark.parametrize("skipna", [True, False])
1168
+ def test_complex(self, skipna):
1169
+ # gets cast to complex on array construction
1170
+ arr = np.array([1.0, 2.0, 1 + 1j])
1171
+ result = lib.infer_dtype(arr, skipna=skipna)
1172
+ assert result == "complex"
1173
+
1174
+ arr = np.array([1.0, 2.0, 1 + 1j], dtype="O")
1175
+ result = lib.infer_dtype(arr, skipna=skipna)
1176
+ assert result == "mixed"
1177
+
1178
+ result = lib.infer_dtype(arr[::-1], skipna=skipna)
1179
+ assert result == "mixed"
1180
+
1181
+ # gets cast to complex on array construction
1182
+ arr = np.array([1, np.nan, 1 + 1j])
1183
+ result = lib.infer_dtype(arr, skipna=skipna)
1184
+ assert result == "complex"
1185
+
1186
+ arr = np.array([1.0, np.nan, 1 + 1j], dtype="O")
1187
+ result = lib.infer_dtype(arr, skipna=skipna)
1188
+ assert result == "mixed"
1189
+
1190
+ # complex with nans stays complex
1191
+ arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype="O")
1192
+ result = lib.infer_dtype(arr, skipna=skipna)
1193
+ assert result == "complex"
1194
+
1195
+ # test smaller complex dtype; will pass through _try_infer_map fastpath
1196
+ arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)
1197
+ result = lib.infer_dtype(arr, skipna=skipna)
1198
+ assert result == "complex"
1199
+
1200
+ def test_string(self):
1201
+ pass
1202
+
1203
+ def test_unicode(self):
1204
+ arr = ["a", np.nan, "c"]
1205
+ result = lib.infer_dtype(arr, skipna=False)
1206
+ # This currently returns "mixed", but it's not clear that's optimal.
1207
+ # This could also return "string" or "mixed-string"
1208
+ assert result == "mixed"
1209
+
1210
+ # even though we use skipna, we are only skipping those NAs that are
1211
+ # considered matching by is_string_array
1212
+ arr = ["a", np.nan, "c"]
1213
+ result = lib.infer_dtype(arr, skipna=True)
1214
+ assert result == "string"
1215
+
1216
+ arr = ["a", pd.NA, "c"]
1217
+ result = lib.infer_dtype(arr, skipna=True)
1218
+ assert result == "string"
1219
+
1220
+ arr = ["a", pd.NaT, "c"]
1221
+ result = lib.infer_dtype(arr, skipna=True)
1222
+ assert result == "mixed"
1223
+
1224
+ arr = ["a", "c"]
1225
+ result = lib.infer_dtype(arr, skipna=False)
1226
+ assert result == "string"
1227
+
1228
+ @pytest.mark.parametrize(
1229
+ "dtype, missing, skipna, expected",
1230
+ [
1231
+ (float, np.nan, False, "floating"),
1232
+ (float, np.nan, True, "floating"),
1233
+ (object, np.nan, False, "floating"),
1234
+ (object, np.nan, True, "empty"),
1235
+ (object, None, False, "mixed"),
1236
+ (object, None, True, "empty"),
1237
+ ],
1238
+ )
1239
+ @pytest.mark.parametrize("box", [Series, np.array])
1240
+ def test_object_empty(self, box, missing, dtype, skipna, expected):
1241
+ # GH 23421
1242
+ arr = box([missing, missing], dtype=dtype)
1243
+
1244
+ result = lib.infer_dtype(arr, skipna=skipna)
1245
+ assert result == expected
1246
+
1247
+ def test_datetime(self):
1248
+ dates = [datetime(2012, 1, x) for x in range(1, 20)]
1249
+ index = Index(dates)
1250
+ assert index.inferred_type == "datetime64"
1251
+
1252
+ def test_infer_dtype_datetime64(self):
1253
+ arr = np.array(
1254
+ [np.datetime64("2011-01-01"), np.datetime64("2011-01-01")], dtype=object
1255
+ )
1256
+ assert lib.infer_dtype(arr, skipna=True) == "datetime64"
1257
+
1258
+ @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
1259
+ def test_infer_dtype_datetime64_with_na(self, na_value):
1260
+ # starts with nan
1261
+ arr = np.array([na_value, np.datetime64("2011-01-02")])
1262
+ assert lib.infer_dtype(arr, skipna=True) == "datetime64"
1263
+
1264
+ arr = np.array([na_value, np.datetime64("2011-01-02"), na_value])
1265
+ assert lib.infer_dtype(arr, skipna=True) == "datetime64"
1266
+
1267
+ @pytest.mark.parametrize(
1268
+ "arr",
1269
+ [
1270
+ np.array(
1271
+ [np.timedelta64("nat"), np.datetime64("2011-01-02")], dtype=object
1272
+ ),
1273
+ np.array(
1274
+ [np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object
1275
+ ),
1276
+ np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]),
1277
+ np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]),
1278
+ np.array([np.nan, Timestamp("2011-01-02"), 1.1]),
1279
+ np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")], dtype=object),
1280
+ np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object),
1281
+ np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object),
1282
+ ],
1283
+ )
1284
+ def test_infer_datetimelike_dtype_mixed(self, arr):
1285
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1286
+
1287
+ def test_infer_dtype_mixed_integer(self):
1288
+ arr = np.array([np.nan, Timestamp("2011-01-02"), 1])
1289
+ assert lib.infer_dtype(arr, skipna=True) == "mixed-integer"
1290
+
1291
+ @pytest.mark.parametrize(
1292
+ "arr",
1293
+ [
1294
+ np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]),
1295
+ np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]),
1296
+ np.array([datetime(2011, 1, 1), Timestamp("2011-01-02")]),
1297
+ ],
1298
+ )
1299
+ def test_infer_dtype_datetime(self, arr):
1300
+ assert lib.infer_dtype(arr, skipna=True) == "datetime"
1301
+
1302
+ @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
1303
+ @pytest.mark.parametrize(
1304
+ "time_stamp", [Timestamp("2011-01-01"), datetime(2011, 1, 1)]
1305
+ )
1306
+ def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):
1307
+ # starts with nan
1308
+ arr = np.array([na_value, time_stamp])
1309
+ assert lib.infer_dtype(arr, skipna=True) == "datetime"
1310
+
1311
+ arr = np.array([na_value, time_stamp, na_value])
1312
+ assert lib.infer_dtype(arr, skipna=True) == "datetime"
1313
+
1314
+ @pytest.mark.parametrize(
1315
+ "arr",
1316
+ [
1317
+ np.array([Timedelta("1 days"), Timedelta("2 days")]),
1318
+ np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object),
1319
+ np.array([timedelta(1), timedelta(2)]),
1320
+ ],
1321
+ )
1322
+ def test_infer_dtype_timedelta(self, arr):
1323
+ assert lib.infer_dtype(arr, skipna=True) == "timedelta"
1324
+
1325
+ @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
1326
+ @pytest.mark.parametrize(
1327
+ "delta", [Timedelta("1 days"), np.timedelta64(1, "D"), timedelta(1)]
1328
+ )
1329
+ def test_infer_dtype_timedelta_with_na(self, na_value, delta):
1330
+ # starts with nan
1331
+ arr = np.array([na_value, delta])
1332
+ assert lib.infer_dtype(arr, skipna=True) == "timedelta"
1333
+
1334
+ arr = np.array([na_value, delta, na_value])
1335
+ assert lib.infer_dtype(arr, skipna=True) == "timedelta"
1336
+
1337
+ def test_infer_dtype_period(self):
1338
+ # GH 13664
1339
+ arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="D")])
1340
+ assert lib.infer_dtype(arr, skipna=True) == "period"
1341
+
1342
+ # non-homogeneous freqs -> mixed
1343
+ arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="M")])
1344
+ assert lib.infer_dtype(arr, skipna=True) == "mixed"
1345
+
1346
+ @pytest.mark.parametrize("klass", [pd.array, Series, Index])
1347
+ @pytest.mark.parametrize("skipna", [True, False])
1348
+ def test_infer_dtype_period_array(self, klass, skipna):
1349
+ # https://github.com/pandas-dev/pandas/issues/23553
1350
+ values = klass(
1351
+ [
1352
+ Period("2011-01-01", freq="D"),
1353
+ Period("2011-01-02", freq="D"),
1354
+ pd.NaT,
1355
+ ]
1356
+ )
1357
+ assert lib.infer_dtype(values, skipna=skipna) == "period"
1358
+
1359
+ # periods but mixed freq
1360
+ values = klass(
1361
+ [
1362
+ Period("2011-01-01", freq="D"),
1363
+ Period("2011-01-02", freq="M"),
1364
+ pd.NaT,
1365
+ ]
1366
+ )
1367
+ # with pd.array this becomes NumpyExtensionArray which ends up
1368
+ # as "unknown-array"
1369
+ exp = "unknown-array" if klass is pd.array else "mixed"
1370
+ assert lib.infer_dtype(values, skipna=skipna) == exp
1371
+
1372
+ def test_infer_dtype_period_mixed(self):
1373
+ arr = np.array(
1374
+ [Period("2011-01", freq="M"), np.datetime64("nat")], dtype=object
1375
+ )
1376
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1377
+
1378
+ arr = np.array(
1379
+ [np.datetime64("nat"), Period("2011-01", freq="M")], dtype=object
1380
+ )
1381
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1382
+
1383
+ @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])
1384
+ def test_infer_dtype_period_with_na(self, na_value):
1385
+ # starts with nan
1386
+ arr = np.array([na_value, Period("2011-01", freq="D")])
1387
+ assert lib.infer_dtype(arr, skipna=True) == "period"
1388
+
1389
+ arr = np.array([na_value, Period("2011-01", freq="D"), na_value])
1390
+ assert lib.infer_dtype(arr, skipna=True) == "period"
1391
+
1392
+ def test_infer_dtype_all_nan_nat_like(self):
1393
+ arr = np.array([np.nan, np.nan])
1394
+ assert lib.infer_dtype(arr, skipna=True) == "floating"
1395
+
1396
+ # nan and None mix are result in mixed
1397
+ arr = np.array([np.nan, np.nan, None])
1398
+ assert lib.infer_dtype(arr, skipna=True) == "empty"
1399
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1400
+
1401
+ arr = np.array([None, np.nan, np.nan])
1402
+ assert lib.infer_dtype(arr, skipna=True) == "empty"
1403
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1404
+
1405
+ # pd.NaT
1406
+ arr = np.array([pd.NaT])
1407
+ assert lib.infer_dtype(arr, skipna=False) == "datetime"
1408
+
1409
+ arr = np.array([pd.NaT, np.nan])
1410
+ assert lib.infer_dtype(arr, skipna=False) == "datetime"
1411
+
1412
+ arr = np.array([np.nan, pd.NaT])
1413
+ assert lib.infer_dtype(arr, skipna=False) == "datetime"
1414
+
1415
+ arr = np.array([np.nan, pd.NaT, np.nan])
1416
+ assert lib.infer_dtype(arr, skipna=False) == "datetime"
1417
+
1418
+ arr = np.array([None, pd.NaT, None])
1419
+ assert lib.infer_dtype(arr, skipna=False) == "datetime"
1420
+
1421
+ # np.datetime64(nat)
1422
+ arr = np.array([np.datetime64("nat")])
1423
+ assert lib.infer_dtype(arr, skipna=False) == "datetime64"
1424
+
1425
+ for n in [np.nan, pd.NaT, None]:
1426
+ arr = np.array([n, np.datetime64("nat"), n])
1427
+ assert lib.infer_dtype(arr, skipna=False) == "datetime64"
1428
+
1429
+ arr = np.array([pd.NaT, n, np.datetime64("nat"), n])
1430
+ assert lib.infer_dtype(arr, skipna=False) == "datetime64"
1431
+
1432
+ arr = np.array([np.timedelta64("nat")], dtype=object)
1433
+ assert lib.infer_dtype(arr, skipna=False) == "timedelta"
1434
+
1435
+ for n in [np.nan, pd.NaT, None]:
1436
+ arr = np.array([n, np.timedelta64("nat"), n])
1437
+ assert lib.infer_dtype(arr, skipna=False) == "timedelta"
1438
+
1439
+ arr = np.array([pd.NaT, n, np.timedelta64("nat"), n])
1440
+ assert lib.infer_dtype(arr, skipna=False) == "timedelta"
1441
+
1442
+ # datetime / timedelta mixed
1443
+ arr = np.array([pd.NaT, np.datetime64("nat"), np.timedelta64("nat"), np.nan])
1444
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1445
+
1446
+ arr = np.array([np.timedelta64("nat"), np.datetime64("nat")], dtype=object)
1447
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1448
+
1449
+ def test_is_datetimelike_array_all_nan_nat_like(self):
1450
+ arr = np.array([np.nan, pd.NaT, np.datetime64("nat")])
1451
+ assert lib.is_datetime_array(arr)
1452
+ assert lib.is_datetime64_array(arr)
1453
+ assert not lib.is_timedelta_or_timedelta64_array(arr)
1454
+
1455
+ arr = np.array([np.nan, pd.NaT, np.timedelta64("nat")])
1456
+ assert not lib.is_datetime_array(arr)
1457
+ assert not lib.is_datetime64_array(arr)
1458
+ assert lib.is_timedelta_or_timedelta64_array(arr)
1459
+
1460
+ arr = np.array([np.nan, pd.NaT, np.datetime64("nat"), np.timedelta64("nat")])
1461
+ assert not lib.is_datetime_array(arr)
1462
+ assert not lib.is_datetime64_array(arr)
1463
+ assert not lib.is_timedelta_or_timedelta64_array(arr)
1464
+
1465
+ arr = np.array([np.nan, pd.NaT])
1466
+ assert lib.is_datetime_array(arr)
1467
+ assert lib.is_datetime64_array(arr)
1468
+ assert lib.is_timedelta_or_timedelta64_array(arr)
1469
+
1470
+ arr = np.array([np.nan, np.nan], dtype=object)
1471
+ assert not lib.is_datetime_array(arr)
1472
+ assert not lib.is_datetime64_array(arr)
1473
+ assert not lib.is_timedelta_or_timedelta64_array(arr)
1474
+
1475
+ assert lib.is_datetime_with_singletz_array(
1476
+ np.array(
1477
+ [
1478
+ Timestamp("20130101", tz="US/Eastern"),
1479
+ Timestamp("20130102", tz="US/Eastern"),
1480
+ ],
1481
+ dtype=object,
1482
+ )
1483
+ )
1484
+ assert not lib.is_datetime_with_singletz_array(
1485
+ np.array(
1486
+ [
1487
+ Timestamp("20130101", tz="US/Eastern"),
1488
+ Timestamp("20130102", tz="CET"),
1489
+ ],
1490
+ dtype=object,
1491
+ )
1492
+ )
1493
+
1494
+ @pytest.mark.parametrize(
1495
+ "func",
1496
+ [
1497
+ "is_datetime_array",
1498
+ "is_datetime64_array",
1499
+ "is_bool_array",
1500
+ "is_timedelta_or_timedelta64_array",
1501
+ "is_date_array",
1502
+ "is_time_array",
1503
+ "is_interval_array",
1504
+ ],
1505
+ )
1506
+ def test_other_dtypes_for_array(self, func):
1507
+ func = getattr(lib, func)
1508
+ arr = np.array(["foo", "bar"])
1509
+ assert not func(arr)
1510
+ assert not func(arr.reshape(2, 1))
1511
+
1512
+ arr = np.array([1, 2])
1513
+ assert not func(arr)
1514
+ assert not func(arr.reshape(2, 1))
1515
+
1516
+ def test_date(self):
1517
+ dates = [date(2012, 1, day) for day in range(1, 20)]
1518
+ index = Index(dates)
1519
+ assert index.inferred_type == "date"
1520
+
1521
+ dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]
1522
+ result = lib.infer_dtype(dates, skipna=False)
1523
+ assert result == "mixed"
1524
+
1525
+ result = lib.infer_dtype(dates, skipna=True)
1526
+ assert result == "date"
1527
+
1528
+ @pytest.mark.parametrize(
1529
+ "values",
1530
+ [
1531
+ [date(2020, 1, 1), Timestamp("2020-01-01")],
1532
+ [Timestamp("2020-01-01"), date(2020, 1, 1)],
1533
+ [date(2020, 1, 1), pd.NaT],
1534
+ [pd.NaT, date(2020, 1, 1)],
1535
+ ],
1536
+ )
1537
+ @pytest.mark.parametrize("skipna", [True, False])
1538
+ def test_infer_dtype_date_order_invariant(self, values, skipna):
1539
+ # https://github.com/pandas-dev/pandas/issues/33741
1540
+ result = lib.infer_dtype(values, skipna=skipna)
1541
+ assert result == "date"
1542
+
1543
+ def test_is_numeric_array(self):
1544
+ assert lib.is_float_array(np.array([1, 2.0]))
1545
+ assert lib.is_float_array(np.array([1, 2.0, np.nan]))
1546
+ assert not lib.is_float_array(np.array([1, 2]))
1547
+
1548
+ assert lib.is_integer_array(np.array([1, 2]))
1549
+ assert not lib.is_integer_array(np.array([1, 2.0]))
1550
+
1551
+ def test_is_string_array(self):
1552
+ # We should only be accepting pd.NA, np.nan,
1553
+ # other floating point nans e.g. float('nan')]
1554
+ # when skipna is True.
1555
+ assert lib.is_string_array(np.array(["foo", "bar"]))
1556
+ assert not lib.is_string_array(
1557
+ np.array(["foo", "bar", pd.NA], dtype=object), skipna=False
1558
+ )
1559
+ assert lib.is_string_array(
1560
+ np.array(["foo", "bar", pd.NA], dtype=object), skipna=True
1561
+ )
1562
+ # we allow NaN/None in the StringArray constructor, so its allowed here
1563
+ assert lib.is_string_array(
1564
+ np.array(["foo", "bar", None], dtype=object), skipna=True
1565
+ )
1566
+ assert lib.is_string_array(
1567
+ np.array(["foo", "bar", np.nan], dtype=object), skipna=True
1568
+ )
1569
+ # But not e.g. datetimelike or Decimal NAs
1570
+ assert not lib.is_string_array(
1571
+ np.array(["foo", "bar", pd.NaT], dtype=object), skipna=True
1572
+ )
1573
+ assert not lib.is_string_array(
1574
+ np.array(["foo", "bar", np.datetime64("NaT")], dtype=object), skipna=True
1575
+ )
1576
+ assert not lib.is_string_array(
1577
+ np.array(["foo", "bar", Decimal("NaN")], dtype=object), skipna=True
1578
+ )
1579
+
1580
+ assert not lib.is_string_array(
1581
+ np.array(["foo", "bar", None], dtype=object), skipna=False
1582
+ )
1583
+ assert not lib.is_string_array(
1584
+ np.array(["foo", "bar", np.nan], dtype=object), skipna=False
1585
+ )
1586
+ assert not lib.is_string_array(np.array([1, 2]))
1587
+
1588
+ def test_to_object_array_tuples(self):
1589
+ r = (5, 6)
1590
+ values = [r]
1591
+ lib.to_object_array_tuples(values)
1592
+
1593
+ # make sure record array works
1594
+ record = namedtuple("record", "x y")
1595
+ r = record(5, 6)
1596
+ values = [r]
1597
+ lib.to_object_array_tuples(values)
1598
+
1599
+ def test_object(self):
1600
+ # GH 7431
1601
+ # cannot infer more than this as only a single element
1602
+ arr = np.array([None], dtype="O")
1603
+ result = lib.infer_dtype(arr, skipna=False)
1604
+ assert result == "mixed"
1605
+ result = lib.infer_dtype(arr, skipna=True)
1606
+ assert result == "empty"
1607
+
1608
+ def test_to_object_array_width(self):
1609
+ # see gh-13320
1610
+ rows = [[1, 2, 3], [4, 5, 6]]
1611
+
1612
+ expected = np.array(rows, dtype=object)
1613
+ out = lib.to_object_array(rows)
1614
+ tm.assert_numpy_array_equal(out, expected)
1615
+
1616
+ expected = np.array(rows, dtype=object)
1617
+ out = lib.to_object_array(rows, min_width=1)
1618
+ tm.assert_numpy_array_equal(out, expected)
1619
+
1620
+ expected = np.array(
1621
+ [[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object
1622
+ )
1623
+ out = lib.to_object_array(rows, min_width=5)
1624
+ tm.assert_numpy_array_equal(out, expected)
1625
+
1626
+ def test_is_period(self):
1627
+ # GH#55264
1628
+ msg = "is_period is deprecated and will be removed in a future version"
1629
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1630
+ assert lib.is_period(Period("2011-01", freq="M"))
1631
+ assert not lib.is_period(PeriodIndex(["2011-01"], freq="M"))
1632
+ assert not lib.is_period(Timestamp("2011-01"))
1633
+ assert not lib.is_period(1)
1634
+ assert not lib.is_period(np.nan)
1635
+
1636
+ def test_is_interval(self):
1637
+ # GH#55264
1638
+ msg = "is_interval is deprecated and will be removed in a future version"
1639
+ item = Interval(1, 2)
1640
+ with tm.assert_produces_warning(FutureWarning, match=msg):
1641
+ assert lib.is_interval(item)
1642
+ assert not lib.is_interval(pd.IntervalIndex([item]))
1643
+ assert not lib.is_interval(pd.IntervalIndex([item])._engine)
1644
+
1645
+ def test_categorical(self):
1646
+ # GH 8974
1647
+ arr = Categorical(list("abc"))
1648
+ result = lib.infer_dtype(arr, skipna=True)
1649
+ assert result == "categorical"
1650
+
1651
+ result = lib.infer_dtype(Series(arr), skipna=True)
1652
+ assert result == "categorical"
1653
+
1654
+ arr = Categorical(list("abc"), categories=["cegfab"], ordered=True)
1655
+ result = lib.infer_dtype(arr, skipna=True)
1656
+ assert result == "categorical"
1657
+
1658
+ result = lib.infer_dtype(Series(arr), skipna=True)
1659
+ assert result == "categorical"
1660
+
1661
+ @pytest.mark.parametrize("asobject", [True, False])
1662
+ def test_interval(self, asobject):
1663
+ idx = pd.IntervalIndex.from_breaks(range(5), closed="both")
1664
+ if asobject:
1665
+ idx = idx.astype(object)
1666
+
1667
+ inferred = lib.infer_dtype(idx, skipna=False)
1668
+ assert inferred == "interval"
1669
+
1670
+ inferred = lib.infer_dtype(idx._data, skipna=False)
1671
+ assert inferred == "interval"
1672
+
1673
+ inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False)
1674
+ assert inferred == "interval"
1675
+
1676
+ @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0])
1677
+ def test_interval_mismatched_closed(self, value):
1678
+ first = Interval(value, value, closed="left")
1679
+ second = Interval(value, value, closed="right")
1680
+
1681
+ # if closed match, we should infer "interval"
1682
+ arr = np.array([first, first], dtype=object)
1683
+ assert lib.infer_dtype(arr, skipna=False) == "interval"
1684
+
1685
+ # if closed dont match, we should _not_ get "interval"
1686
+ arr2 = np.array([first, second], dtype=object)
1687
+ assert lib.infer_dtype(arr2, skipna=False) == "mixed"
1688
+
1689
+ def test_interval_mismatched_subtype(self):
1690
+ first = Interval(0, 1, closed="left")
1691
+ second = Interval(Timestamp(0), Timestamp(1), closed="left")
1692
+ third = Interval(Timedelta(0), Timedelta(1), closed="left")
1693
+
1694
+ arr = np.array([first, second])
1695
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1696
+
1697
+ arr = np.array([second, third])
1698
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1699
+
1700
+ arr = np.array([first, third])
1701
+ assert lib.infer_dtype(arr, skipna=False) == "mixed"
1702
+
1703
+ # float vs int subdtype are compatible
1704
+ flt_interval = Interval(1.5, 2.5, closed="left")
1705
+ arr = np.array([first, flt_interval], dtype=object)
1706
+ assert lib.infer_dtype(arr, skipna=False) == "interval"
1707
+
1708
+ @pytest.mark.parametrize("klass", [pd.array, Series])
1709
+ @pytest.mark.parametrize("skipna", [True, False])
1710
+ @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])
1711
+ def test_string_dtype(self, data, skipna, klass, nullable_string_dtype):
1712
+ # StringArray
1713
+ val = klass(data, dtype=nullable_string_dtype)
1714
+ inferred = lib.infer_dtype(val, skipna=skipna)
1715
+ assert inferred == "string"
1716
+
1717
+ @pytest.mark.parametrize("klass", [pd.array, Series])
1718
+ @pytest.mark.parametrize("skipna", [True, False])
1719
+ @pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]])
1720
+ def test_boolean_dtype(self, data, skipna, klass):
1721
+ # BooleanArray
1722
+ val = klass(data, dtype="boolean")
1723
+ inferred = lib.infer_dtype(val, skipna=skipna)
1724
+ assert inferred == "boolean"
1725
+
1726
+
1727
+ class TestNumberScalar:
1728
+ def test_is_number(self):
1729
+ assert is_number(True)
1730
+ assert is_number(1)
1731
+ assert is_number(1.1)
1732
+ assert is_number(1 + 3j)
1733
+ assert is_number(np.int64(1))
1734
+ assert is_number(np.float64(1.1))
1735
+ assert is_number(np.complex128(1 + 3j))
1736
+ assert is_number(np.nan)
1737
+
1738
+ assert not is_number(None)
1739
+ assert not is_number("x")
1740
+ assert not is_number(datetime(2011, 1, 1))
1741
+ assert not is_number(np.datetime64("2011-01-01"))
1742
+ assert not is_number(Timestamp("2011-01-01"))
1743
+ assert not is_number(Timestamp("2011-01-01", tz="US/Eastern"))
1744
+ assert not is_number(timedelta(1000))
1745
+ assert not is_number(Timedelta("1 days"))
1746
+
1747
+ # questionable
1748
+ assert not is_number(np.bool_(False))
1749
+ assert is_number(np.timedelta64(1, "D"))
1750
+
1751
+ def test_is_bool(self):
1752
+ assert is_bool(True)
1753
+ assert is_bool(False)
1754
+ assert is_bool(np.bool_(False))
1755
+
1756
+ assert not is_bool(1)
1757
+ assert not is_bool(1.1)
1758
+ assert not is_bool(1 + 3j)
1759
+ assert not is_bool(np.int64(1))
1760
+ assert not is_bool(np.float64(1.1))
1761
+ assert not is_bool(np.complex128(1 + 3j))
1762
+ assert not is_bool(np.nan)
1763
+ assert not is_bool(None)
1764
+ assert not is_bool("x")
1765
+ assert not is_bool(datetime(2011, 1, 1))
1766
+ assert not is_bool(np.datetime64("2011-01-01"))
1767
+ assert not is_bool(Timestamp("2011-01-01"))
1768
+ assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern"))
1769
+ assert not is_bool(timedelta(1000))
1770
+ assert not is_bool(np.timedelta64(1, "D"))
1771
+ assert not is_bool(Timedelta("1 days"))
1772
+
1773
+ def test_is_integer(self):
1774
+ assert is_integer(1)
1775
+ assert is_integer(np.int64(1))
1776
+
1777
+ assert not is_integer(True)
1778
+ assert not is_integer(1.1)
1779
+ assert not is_integer(1 + 3j)
1780
+ assert not is_integer(False)
1781
+ assert not is_integer(np.bool_(False))
1782
+ assert not is_integer(np.float64(1.1))
1783
+ assert not is_integer(np.complex128(1 + 3j))
1784
+ assert not is_integer(np.nan)
1785
+ assert not is_integer(None)
1786
+ assert not is_integer("x")
1787
+ assert not is_integer(datetime(2011, 1, 1))
1788
+ assert not is_integer(np.datetime64("2011-01-01"))
1789
+ assert not is_integer(Timestamp("2011-01-01"))
1790
+ assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern"))
1791
+ assert not is_integer(timedelta(1000))
1792
+ assert not is_integer(Timedelta("1 days"))
1793
+ assert not is_integer(np.timedelta64(1, "D"))
1794
+
1795
+ def test_is_float(self):
1796
+ assert is_float(1.1)
1797
+ assert is_float(np.float64(1.1))
1798
+ assert is_float(np.nan)
1799
+
1800
+ assert not is_float(True)
1801
+ assert not is_float(1)
1802
+ assert not is_float(1 + 3j)
1803
+ assert not is_float(False)
1804
+ assert not is_float(np.bool_(False))
1805
+ assert not is_float(np.int64(1))
1806
+ assert not is_float(np.complex128(1 + 3j))
1807
+ assert not is_float(None)
1808
+ assert not is_float("x")
1809
+ assert not is_float(datetime(2011, 1, 1))
1810
+ assert not is_float(np.datetime64("2011-01-01"))
1811
+ assert not is_float(Timestamp("2011-01-01"))
1812
+ assert not is_float(Timestamp("2011-01-01", tz="US/Eastern"))
1813
+ assert not is_float(timedelta(1000))
1814
+ assert not is_float(np.timedelta64(1, "D"))
1815
+ assert not is_float(Timedelta("1 days"))
1816
+
1817
+ def test_is_datetime_dtypes(self):
1818
+ ts = pd.date_range("20130101", periods=3)
1819
+ tsa = pd.date_range("20130101", periods=3, tz="US/Eastern")
1820
+
1821
+ msg = "is_datetime64tz_dtype is deprecated"
1822
+
1823
+ assert is_datetime64_dtype("datetime64")
1824
+ assert is_datetime64_dtype("datetime64[ns]")
1825
+ assert is_datetime64_dtype(ts)
1826
+ assert not is_datetime64_dtype(tsa)
1827
+
1828
+ assert not is_datetime64_ns_dtype("datetime64")
1829
+ assert is_datetime64_ns_dtype("datetime64[ns]")
1830
+ assert is_datetime64_ns_dtype(ts)
1831
+ assert is_datetime64_ns_dtype(tsa)
1832
+
1833
+ assert is_datetime64_any_dtype("datetime64")
1834
+ assert is_datetime64_any_dtype("datetime64[ns]")
1835
+ assert is_datetime64_any_dtype(ts)
1836
+ assert is_datetime64_any_dtype(tsa)
1837
+
1838
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1839
+ assert not is_datetime64tz_dtype("datetime64")
1840
+ assert not is_datetime64tz_dtype("datetime64[ns]")
1841
+ assert not is_datetime64tz_dtype(ts)
1842
+ assert is_datetime64tz_dtype(tsa)
1843
+
1844
+ @pytest.mark.parametrize("tz", ["US/Eastern", "UTC"])
1845
+ def test_is_datetime_dtypes_with_tz(self, tz):
1846
+ dtype = f"datetime64[ns, {tz}]"
1847
+ assert not is_datetime64_dtype(dtype)
1848
+
1849
+ msg = "is_datetime64tz_dtype is deprecated"
1850
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
1851
+ assert is_datetime64tz_dtype(dtype)
1852
+ assert is_datetime64_ns_dtype(dtype)
1853
+ assert is_datetime64_any_dtype(dtype)
1854
+
1855
+ def test_is_timedelta(self):
1856
+ assert is_timedelta64_dtype("timedelta64")
1857
+ assert is_timedelta64_dtype("timedelta64[ns]")
1858
+ assert not is_timedelta64_ns_dtype("timedelta64")
1859
+ assert is_timedelta64_ns_dtype("timedelta64[ns]")
1860
+
1861
+ tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]")
1862
+ assert is_timedelta64_dtype(tdi)
1863
+ assert is_timedelta64_ns_dtype(tdi)
1864
+ assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]"))
1865
+
1866
+ assert not is_timedelta64_ns_dtype(Index([], dtype=np.float64))
1867
+ assert not is_timedelta64_ns_dtype(Index([], dtype=np.int64))
1868
+
1869
+
1870
+ class TestIsScalar:
1871
+ def test_is_scalar_builtin_scalars(self):
1872
+ assert is_scalar(None)
1873
+ assert is_scalar(True)
1874
+ assert is_scalar(False)
1875
+ assert is_scalar(Fraction())
1876
+ assert is_scalar(0.0)
1877
+ assert is_scalar(1)
1878
+ assert is_scalar(complex(2))
1879
+ assert is_scalar(float("NaN"))
1880
+ assert is_scalar(np.nan)
1881
+ assert is_scalar("foobar")
1882
+ assert is_scalar(b"foobar")
1883
+ assert is_scalar(datetime(2014, 1, 1))
1884
+ assert is_scalar(date(2014, 1, 1))
1885
+ assert is_scalar(time(12, 0))
1886
+ assert is_scalar(timedelta(hours=1))
1887
+ assert is_scalar(pd.NaT)
1888
+ assert is_scalar(pd.NA)
1889
+
1890
+ def test_is_scalar_builtin_nonscalars(self):
1891
+ assert not is_scalar({})
1892
+ assert not is_scalar([])
1893
+ assert not is_scalar([1])
1894
+ assert not is_scalar(())
1895
+ assert not is_scalar((1,))
1896
+ assert not is_scalar(slice(None))
1897
+ assert not is_scalar(Ellipsis)
1898
+
1899
+ def test_is_scalar_numpy_array_scalars(self):
1900
+ assert is_scalar(np.int64(1))
1901
+ assert is_scalar(np.float64(1.0))
1902
+ assert is_scalar(np.int32(1))
1903
+ assert is_scalar(np.complex64(2))
1904
+ assert is_scalar(np.object_("foobar"))
1905
+ assert is_scalar(np.str_("foobar"))
1906
+ assert is_scalar(np.bytes_(b"foobar"))
1907
+ assert is_scalar(np.datetime64("2014-01-01"))
1908
+ assert is_scalar(np.timedelta64(1, "h"))
1909
+
1910
+ @pytest.mark.parametrize(
1911
+ "zerodim",
1912
+ [
1913
+ np.array(1),
1914
+ np.array("foobar"),
1915
+ np.array(np.datetime64("2014-01-01")),
1916
+ np.array(np.timedelta64(1, "h")),
1917
+ np.array(np.datetime64("NaT")),
1918
+ ],
1919
+ )
1920
+ def test_is_scalar_numpy_zerodim_arrays(self, zerodim):
1921
+ assert not is_scalar(zerodim)
1922
+ assert is_scalar(lib.item_from_zerodim(zerodim))
1923
+
1924
+ @pytest.mark.parametrize("arr", [np.array([]), np.array([[]])])
1925
+ def test_is_scalar_numpy_arrays(self, arr):
1926
+ assert not is_scalar(arr)
1927
+ assert not is_scalar(MockNumpyLikeArray(arr))
1928
+
1929
+ def test_is_scalar_pandas_scalars(self):
1930
+ assert is_scalar(Timestamp("2014-01-01"))
1931
+ assert is_scalar(Timedelta(hours=1))
1932
+ assert is_scalar(Period("2014-01-01"))
1933
+ assert is_scalar(Interval(left=0, right=1))
1934
+ assert is_scalar(DateOffset(days=1))
1935
+ assert is_scalar(pd.offsets.Minute(3))
1936
+
1937
+ def test_is_scalar_pandas_containers(self):
1938
+ assert not is_scalar(Series(dtype=object))
1939
+ assert not is_scalar(Series([1]))
1940
+ assert not is_scalar(DataFrame())
1941
+ assert not is_scalar(DataFrame([[1]]))
1942
+ assert not is_scalar(Index([]))
1943
+ assert not is_scalar(Index([1]))
1944
+ assert not is_scalar(Categorical([]))
1945
+ assert not is_scalar(DatetimeIndex([])._data)
1946
+ assert not is_scalar(TimedeltaIndex([])._data)
1947
+ assert not is_scalar(DatetimeIndex([])._data.to_period("D"))
1948
+ assert not is_scalar(pd.array([1, 2, 3]))
1949
+
1950
+ def test_is_scalar_number(self):
1951
+ # Number() is not recognied by PyNumber_Check, so by extension
1952
+ # is not recognized by is_scalar, but instances of non-abstract
1953
+ # subclasses are.
1954
+
1955
+ class Numeric(Number):
1956
+ def __init__(self, value) -> None:
1957
+ self.value = value
1958
+
1959
+ def __int__(self) -> int:
1960
+ return self.value
1961
+
1962
+ num = Numeric(1)
1963
+ assert is_scalar(num)
1964
+
1965
+
1966
+ @pytest.mark.parametrize("unit", ["ms", "us", "ns"])
1967
+ def test_datetimeindex_from_empty_datetime64_array(unit):
1968
+ idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))
1969
+ assert len(idx) == 0
1970
+
1971
+
1972
+ def test_nan_to_nat_conversions():
1973
+ df = DataFrame(
1974
+ {"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")}
1975
+ )
1976
+ df.iloc[3:6, :] = np.nan
1977
+ result = df.loc[4, "B"]
1978
+ assert result is pd.NaT
1979
+
1980
+ s = df["B"].copy()
1981
+ s[8:9] = np.nan
1982
+ assert s[8] is pd.NaT
1983
+
1984
+
1985
+ @pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")
1986
+ def test_is_scipy_sparse(spmatrix):
1987
+ pytest.importorskip("scipy")
1988
+ assert is_scipy_sparse(spmatrix([[0, 1]]))
1989
+ assert not is_scipy_sparse(np.array([1]))
1990
+
1991
+
1992
+ def test_ensure_int32():
1993
+ values = np.arange(10, dtype=np.int32)
1994
+ result = ensure_int32(values)
1995
+ assert result.dtype == np.int32
1996
+
1997
+ values = np.arange(10, dtype=np.int64)
1998
+ result = ensure_int32(values)
1999
+ assert result.dtype == np.int32
2000
+
2001
+
2002
+ @pytest.mark.parametrize(
2003
+ "right,result",
2004
+ [
2005
+ (0, np.uint8),
2006
+ (-1, np.int16),
2007
+ (300, np.uint16),
2008
+ # For floats, we just upcast directly to float64 instead of trying to
2009
+ # find a smaller floating dtype
2010
+ (300.0, np.uint16), # for integer floats, we convert them to ints
2011
+ (300.1, np.float64),
2012
+ (np.int16(300), np.int16 if np_version_gt2 else np.uint16),
2013
+ ],
2014
+ )
2015
+ def test_find_result_type_uint_int(right, result):
2016
+ left_dtype = np.dtype("uint8")
2017
+ assert find_result_type(left_dtype, right) == result
2018
+
2019
+
2020
+ @pytest.mark.parametrize(
2021
+ "right,result",
2022
+ [
2023
+ (0, np.int8),
2024
+ (-1, np.int8),
2025
+ (300, np.int16),
2026
+ # For floats, we just upcast directly to float64 instead of trying to
2027
+ # find a smaller floating dtype
2028
+ (300.0, np.int16), # for integer floats, we convert them to ints
2029
+ (300.1, np.float64),
2030
+ (np.int16(300), np.int16),
2031
+ ],
2032
+ )
2033
+ def test_find_result_type_int_int(right, result):
2034
+ left_dtype = np.dtype("int8")
2035
+ assert find_result_type(left_dtype, right) == result
2036
+
2037
+
2038
+ @pytest.mark.parametrize(
2039
+ "right,result",
2040
+ [
2041
+ (300.0, np.float64),
2042
+ (np.float32(300), np.float32),
2043
+ ],
2044
+ )
2045
+ def test_find_result_type_floats(right, result):
2046
+ left_dtype = np.dtype("float16")
2047
+ assert find_result_type(left_dtype, right) == result
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__init__.py ADDED
File without changes
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_na_scalar.cpython-310.pyc ADDED
Binary file (7.95 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/__pycache__/test_nat.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__init__.py ADDED
File without changes
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (181 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_arithmetic.cpython-310.pyc ADDED
Binary file (5.73 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_constructors.cpython-310.pyc ADDED
Binary file (2.02 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_contains.cpython-310.pyc ADDED
Binary file (2.32 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_formats.cpython-310.pyc ADDED
Binary file (575 Bytes). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_interval.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/__pycache__/test_overlaps.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_arithmetic.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ Interval,
8
+ Timedelta,
9
+ Timestamp,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+
14
+ class TestIntervalArithmetic:
15
+ def test_interval_add(self, closed):
16
+ interval = Interval(0, 1, closed=closed)
17
+ expected = Interval(1, 2, closed=closed)
18
+
19
+ result = interval + 1
20
+ assert result == expected
21
+
22
+ result = 1 + interval
23
+ assert result == expected
24
+
25
+ result = interval
26
+ result += 1
27
+ assert result == expected
28
+
29
+ msg = r"unsupported operand type\(s\) for \+"
30
+ with pytest.raises(TypeError, match=msg):
31
+ interval + interval
32
+
33
+ with pytest.raises(TypeError, match=msg):
34
+ interval + "foo"
35
+
36
+ def test_interval_sub(self, closed):
37
+ interval = Interval(0, 1, closed=closed)
38
+ expected = Interval(-1, 0, closed=closed)
39
+
40
+ result = interval - 1
41
+ assert result == expected
42
+
43
+ result = interval
44
+ result -= 1
45
+ assert result == expected
46
+
47
+ msg = r"unsupported operand type\(s\) for -"
48
+ with pytest.raises(TypeError, match=msg):
49
+ interval - interval
50
+
51
+ with pytest.raises(TypeError, match=msg):
52
+ interval - "foo"
53
+
54
+ def test_interval_mult(self, closed):
55
+ interval = Interval(0, 1, closed=closed)
56
+ expected = Interval(0, 2, closed=closed)
57
+
58
+ result = interval * 2
59
+ assert result == expected
60
+
61
+ result = 2 * interval
62
+ assert result == expected
63
+
64
+ result = interval
65
+ result *= 2
66
+ assert result == expected
67
+
68
+ msg = r"unsupported operand type\(s\) for \*"
69
+ with pytest.raises(TypeError, match=msg):
70
+ interval * interval
71
+
72
+ msg = r"can\'t multiply sequence by non-int"
73
+ with pytest.raises(TypeError, match=msg):
74
+ interval * "foo"
75
+
76
+ def test_interval_div(self, closed):
77
+ interval = Interval(0, 1, closed=closed)
78
+ expected = Interval(0, 0.5, closed=closed)
79
+
80
+ result = interval / 2.0
81
+ assert result == expected
82
+
83
+ result = interval
84
+ result /= 2.0
85
+ assert result == expected
86
+
87
+ msg = r"unsupported operand type\(s\) for /"
88
+ with pytest.raises(TypeError, match=msg):
89
+ interval / interval
90
+
91
+ with pytest.raises(TypeError, match=msg):
92
+ interval / "foo"
93
+
94
+ def test_interval_floordiv(self, closed):
95
+ interval = Interval(1, 2, closed=closed)
96
+ expected = Interval(0, 1, closed=closed)
97
+
98
+ result = interval // 2
99
+ assert result == expected
100
+
101
+ result = interval
102
+ result //= 2
103
+ assert result == expected
104
+
105
+ msg = r"unsupported operand type\(s\) for //"
106
+ with pytest.raises(TypeError, match=msg):
107
+ interval // interval
108
+
109
+ with pytest.raises(TypeError, match=msg):
110
+ interval // "foo"
111
+
112
+ @pytest.mark.parametrize("method", ["__add__", "__sub__"])
113
+ @pytest.mark.parametrize(
114
+ "interval",
115
+ [
116
+ Interval(
117
+ Timestamp("2017-01-01 00:00:00"), Timestamp("2018-01-01 00:00:00")
118
+ ),
119
+ Interval(Timedelta(days=7), Timedelta(days=14)),
120
+ ],
121
+ )
122
+ @pytest.mark.parametrize(
123
+ "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
124
+ )
125
+ def test_time_interval_add_subtract_timedelta(self, interval, delta, method):
126
+ # https://github.com/pandas-dev/pandas/issues/32023
127
+ result = getattr(interval, method)(delta)
128
+ left = getattr(interval.left, method)(delta)
129
+ right = getattr(interval.right, method)(delta)
130
+ expected = Interval(left, right)
131
+
132
+ assert result == expected
133
+
134
+ @pytest.mark.parametrize("interval", [Interval(1, 2), Interval(1.0, 2.0)])
135
+ @pytest.mark.parametrize(
136
+ "delta", [Timedelta(days=7), timedelta(7), np.timedelta64(7, "D")]
137
+ )
138
+ def test_numeric_interval_add_timedelta_raises(self, interval, delta):
139
+ # https://github.com/pandas-dev/pandas/issues/32023
140
+ msg = "|".join(
141
+ [
142
+ "unsupported operand",
143
+ "cannot use operands",
144
+ "Only numeric, Timestamp and Timedelta endpoints are allowed",
145
+ ]
146
+ )
147
+ with pytest.raises((TypeError, ValueError), match=msg):
148
+ interval + delta
149
+
150
+ with pytest.raises((TypeError, ValueError), match=msg):
151
+ delta + interval
152
+
153
+ @pytest.mark.parametrize("klass", [timedelta, np.timedelta64, Timedelta])
154
+ def test_timedelta_add_timestamp_interval(self, klass):
155
+ delta = klass(0)
156
+ expected = Interval(Timestamp("2020-01-01"), Timestamp("2020-02-01"))
157
+
158
+ result = delta + expected
159
+ assert result == expected
160
+
161
+ result = expected + delta
162
+ assert result == expected
163
+
164
+
165
+ class TestIntervalComparisons:
166
+ def test_interval_equal(self):
167
+ assert Interval(0, 1) == Interval(0, 1, closed="right")
168
+ assert Interval(0, 1) != Interval(0, 1, closed="left")
169
+ assert Interval(0, 1) != 0
170
+
171
+ def test_interval_comparison(self):
172
+ msg = (
173
+ "'<' not supported between instances of "
174
+ "'pandas._libs.interval.Interval' and 'int'"
175
+ )
176
+ with pytest.raises(TypeError, match=msg):
177
+ Interval(0, 1) < 2
178
+
179
+ assert Interval(0, 1) < Interval(1, 2)
180
+ assert Interval(0, 1) < Interval(0, 2)
181
+ assert Interval(0, 1) < Interval(0.5, 1.5)
182
+ assert Interval(0, 1) <= Interval(0, 1)
183
+ assert Interval(0, 1) > Interval(-1, 2)
184
+ assert Interval(0, 1) >= Interval(0, 1)
185
+
186
+ def test_equality_comparison_broadcasts_over_array(self):
187
+ # https://github.com/pandas-dev/pandas/issues/35931
188
+ interval = Interval(0, 1)
189
+ arr = np.array([interval, interval])
190
+ result = interval == arr
191
+ expected = np.array([True, True])
192
+ tm.assert_numpy_array_equal(result, expected)
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_constructors.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Period,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ class TestIntervalConstructors:
11
+ @pytest.mark.parametrize(
12
+ "left, right",
13
+ [
14
+ ("a", "z"),
15
+ (("a", "b"), ("c", "d")),
16
+ (list("AB"), list("ab")),
17
+ (Interval(0, 1), Interval(1, 2)),
18
+ (Period("2018Q1", freq="Q"), Period("2018Q1", freq="Q")),
19
+ ],
20
+ )
21
+ def test_construct_errors(self, left, right):
22
+ # GH#23013
23
+ msg = "Only numeric, Timestamp and Timedelta endpoints are allowed"
24
+ with pytest.raises(ValueError, match=msg):
25
+ Interval(left, right)
26
+
27
+ def test_constructor_errors(self):
28
+ msg = "invalid option for 'closed': foo"
29
+ with pytest.raises(ValueError, match=msg):
30
+ Interval(0, 1, closed="foo")
31
+
32
+ msg = "left side of interval must be <= right side"
33
+ with pytest.raises(ValueError, match=msg):
34
+ Interval(1, 0)
35
+
36
+ @pytest.mark.parametrize(
37
+ "tz_left, tz_right", [(None, "UTC"), ("UTC", None), ("UTC", "US/Eastern")]
38
+ )
39
+ def test_constructor_errors_tz(self, tz_left, tz_right):
40
+ # GH#18538
41
+ left = Timestamp("2017-01-01", tz=tz_left)
42
+ right = Timestamp("2017-01-02", tz=tz_right)
43
+
44
+ if tz_left is None or tz_right is None:
45
+ error = TypeError
46
+ msg = "Cannot compare tz-naive and tz-aware timestamps"
47
+ else:
48
+ error = ValueError
49
+ msg = "left and right must have the same time zone"
50
+ with pytest.raises(error, match=msg):
51
+ Interval(left, right)
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_contains.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Timedelta,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ class TestContains:
11
+ def test_contains(self):
12
+ interval = Interval(0, 1)
13
+ assert 0.5 in interval
14
+ assert 1 in interval
15
+ assert 0 not in interval
16
+
17
+ interval_both = Interval(0, 1, "both")
18
+ assert 0 in interval_both
19
+ assert 1 in interval_both
20
+
21
+ interval_neither = Interval(0, 1, closed="neither")
22
+ assert 0 not in interval_neither
23
+ assert 0.5 in interval_neither
24
+ assert 1 not in interval_neither
25
+
26
+ def test_contains_interval(self, inclusive_endpoints_fixture):
27
+ interval1 = Interval(0, 1, "both")
28
+ interval2 = Interval(0, 1, inclusive_endpoints_fixture)
29
+ assert interval1 in interval1
30
+ assert interval2 in interval2
31
+ assert interval2 in interval1
32
+ assert interval1 not in interval2 or inclusive_endpoints_fixture == "both"
33
+
34
+ def test_contains_infinite_length(self):
35
+ interval1 = Interval(0, 1, "both")
36
+ interval2 = Interval(float("-inf"), float("inf"), "neither")
37
+ assert interval1 in interval2
38
+ assert interval2 not in interval1
39
+
40
+ def test_contains_zero_length(self):
41
+ interval1 = Interval(0, 1, "both")
42
+ interval2 = Interval(-1, -1, "both")
43
+ interval3 = Interval(0.5, 0.5, "both")
44
+ assert interval2 not in interval1
45
+ assert interval3 in interval1
46
+ assert interval2 not in interval3 and interval3 not in interval2
47
+ assert interval1 not in interval2 and interval1 not in interval3
48
+
49
+ @pytest.mark.parametrize(
50
+ "type1",
51
+ [
52
+ (0, 1),
53
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
54
+ (Timedelta("0h"), Timedelta("1h")),
55
+ ],
56
+ )
57
+ @pytest.mark.parametrize(
58
+ "type2",
59
+ [
60
+ (0, 1),
61
+ (Timestamp(2000, 1, 1, 0), Timestamp(2000, 1, 1, 1)),
62
+ (Timedelta("0h"), Timedelta("1h")),
63
+ ],
64
+ )
65
+ def test_contains_mixed_types(self, type1, type2):
66
+ interval1 = Interval(*type1)
67
+ interval2 = Interval(*type2)
68
+ if type1 == type2:
69
+ assert interval1 in interval2
70
+ else:
71
+ msg = "^'<=' not supported between instances of"
72
+ with pytest.raises(TypeError, match=msg):
73
+ interval1 in interval2
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_formats.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas import Interval
2
+
3
+
4
+ def test_interval_repr():
5
+ interval = Interval(0, 1)
6
+ assert repr(interval) == "Interval(0, 1, closed='right')"
7
+ assert str(interval) == "(0, 1]"
8
+
9
+ interval_left = Interval(0, 1, closed="left")
10
+ assert repr(interval_left) == "Interval(0, 1, closed='left')"
11
+ assert str(interval_left) == "[0, 1)"
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_interval.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ Interval,
6
+ Timedelta,
7
+ Timestamp,
8
+ )
9
+
10
+
11
+ @pytest.fixture
12
+ def interval():
13
+ return Interval(0, 1)
14
+
15
+
16
+ class TestInterval:
17
+ def test_properties(self, interval):
18
+ assert interval.closed == "right"
19
+ assert interval.left == 0
20
+ assert interval.right == 1
21
+ assert interval.mid == 0.5
22
+
23
+ def test_hash(self, interval):
24
+ # should not raise
25
+ hash(interval)
26
+
27
+ @pytest.mark.parametrize(
28
+ "left, right, expected",
29
+ [
30
+ (0, 5, 5),
31
+ (-2, 5.5, 7.5),
32
+ (10, 10, 0),
33
+ (10, np.inf, np.inf),
34
+ (-np.inf, -5, np.inf),
35
+ (-np.inf, np.inf, np.inf),
36
+ (Timedelta("0 days"), Timedelta("5 days"), Timedelta("5 days")),
37
+ (Timedelta("10 days"), Timedelta("10 days"), Timedelta("0 days")),
38
+ (Timedelta("1h10min"), Timedelta("5h5min"), Timedelta("3h55min")),
39
+ (Timedelta("5s"), Timedelta("1h"), Timedelta("59min55s")),
40
+ ],
41
+ )
42
+ def test_length(self, left, right, expected):
43
+ # GH 18789
44
+ iv = Interval(left, right)
45
+ result = iv.length
46
+ assert result == expected
47
+
48
+ @pytest.mark.parametrize(
49
+ "left, right, expected",
50
+ [
51
+ ("2017-01-01", "2017-01-06", "5 days"),
52
+ ("2017-01-01", "2017-01-01 12:00:00", "12 hours"),
53
+ ("2017-01-01 12:00", "2017-01-01 12:00:00", "0 days"),
54
+ ("2017-01-01 12:01", "2017-01-05 17:31:00", "4 days 5 hours 30 min"),
55
+ ],
56
+ )
57
+ @pytest.mark.parametrize("tz", (None, "UTC", "CET", "US/Eastern"))
58
+ def test_length_timestamp(self, tz, left, right, expected):
59
+ # GH 18789
60
+ iv = Interval(Timestamp(left, tz=tz), Timestamp(right, tz=tz))
61
+ result = iv.length
62
+ expected = Timedelta(expected)
63
+ assert result == expected
64
+
65
+ @pytest.mark.parametrize(
66
+ "left, right",
67
+ [
68
+ (0, 1),
69
+ (Timedelta("0 days"), Timedelta("1 day")),
70
+ (Timestamp("2018-01-01"), Timestamp("2018-01-02")),
71
+ (
72
+ Timestamp("2018-01-01", tz="US/Eastern"),
73
+ Timestamp("2018-01-02", tz="US/Eastern"),
74
+ ),
75
+ ],
76
+ )
77
+ def test_is_empty(self, left, right, closed):
78
+ # GH27219
79
+ # non-empty always return False
80
+ iv = Interval(left, right, closed)
81
+ assert iv.is_empty is False
82
+
83
+ # same endpoint is empty except when closed='both' (contains one point)
84
+ iv = Interval(left, left, closed)
85
+ result = iv.is_empty
86
+ expected = closed != "both"
87
+ assert result is expected
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/interval/test_overlaps.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import (
4
+ Interval,
5
+ Timedelta,
6
+ Timestamp,
7
+ )
8
+
9
+
10
+ @pytest.fixture(
11
+ params=[
12
+ (Timedelta("0 days"), Timedelta("1 day")),
13
+ (Timestamp("2018-01-01"), Timedelta("1 day")),
14
+ (0, 1),
15
+ ],
16
+ ids=lambda x: type(x[0]).__name__,
17
+ )
18
+ def start_shift(request):
19
+ """
20
+ Fixture for generating intervals of types from a start value and a shift
21
+ value that can be added to start to generate an endpoint
22
+ """
23
+ return request.param
24
+
25
+
26
+ class TestOverlaps:
27
+ def test_overlaps_self(self, start_shift, closed):
28
+ start, shift = start_shift
29
+ interval = Interval(start, start + shift, closed)
30
+ assert interval.overlaps(interval)
31
+
32
+ def test_overlaps_nested(self, start_shift, closed, other_closed):
33
+ start, shift = start_shift
34
+ interval1 = Interval(start, start + 3 * shift, other_closed)
35
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
36
+
37
+ # nested intervals should always overlap
38
+ assert interval1.overlaps(interval2)
39
+
40
+ def test_overlaps_disjoint(self, start_shift, closed, other_closed):
41
+ start, shift = start_shift
42
+ interval1 = Interval(start, start + shift, other_closed)
43
+ interval2 = Interval(start + 2 * shift, start + 3 * shift, closed)
44
+
45
+ # disjoint intervals should never overlap
46
+ assert not interval1.overlaps(interval2)
47
+
48
+ def test_overlaps_endpoint(self, start_shift, closed, other_closed):
49
+ start, shift = start_shift
50
+ interval1 = Interval(start, start + shift, other_closed)
51
+ interval2 = Interval(start + shift, start + 2 * shift, closed)
52
+
53
+ # overlap if shared endpoint is closed for both (overlap at a point)
54
+ result = interval1.overlaps(interval2)
55
+ expected = interval1.closed_right and interval2.closed_left
56
+ assert result == expected
57
+
58
+ @pytest.mark.parametrize(
59
+ "other",
60
+ [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")],
61
+ ids=lambda x: type(x).__name__,
62
+ )
63
+ def test_overlaps_invalid_type(self, other):
64
+ interval = Interval(0, 1)
65
+ msg = f"`other` must be an Interval, got {type(other).__name__}"
66
+ with pytest.raises(TypeError, match=msg):
67
+ interval.overlaps(other)
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/test_na_scalar.py ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ time,
4
+ timedelta,
5
+ )
6
+ import pickle
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ from pandas._libs.missing import NA
12
+
13
+ from pandas.core.dtypes.common import is_scalar
14
+
15
+ import pandas as pd
16
+ import pandas._testing as tm
17
+
18
+
19
+ def test_singleton():
20
+ assert NA is NA
21
+ new_NA = type(NA)()
22
+ assert new_NA is NA
23
+
24
+
25
+ def test_repr():
26
+ assert repr(NA) == "<NA>"
27
+ assert str(NA) == "<NA>"
28
+
29
+
30
+ def test_format():
31
+ # GH-34740
32
+ assert format(NA) == "<NA>"
33
+ assert format(NA, ">10") == " <NA>"
34
+ assert format(NA, "xxx") == "<NA>" # NA is flexible, accept any format spec
35
+
36
+ assert f"{NA}" == "<NA>"
37
+ assert f"{NA:>10}" == " <NA>"
38
+ assert f"{NA:xxx}" == "<NA>"
39
+
40
+
41
+ def test_truthiness():
42
+ msg = "boolean value of NA is ambiguous"
43
+
44
+ with pytest.raises(TypeError, match=msg):
45
+ bool(NA)
46
+
47
+ with pytest.raises(TypeError, match=msg):
48
+ not NA
49
+
50
+
51
+ def test_hashable():
52
+ assert hash(NA) == hash(NA)
53
+ d = {NA: "test"}
54
+ assert d[NA] == "test"
55
+
56
+
57
+ @pytest.mark.parametrize(
58
+ "other", [NA, 1, 1.0, "a", b"a", np.int64(1), np.nan], ids=repr
59
+ )
60
+ def test_arithmetic_ops(all_arithmetic_functions, other):
61
+ op = all_arithmetic_functions
62
+
63
+ if op.__name__ in ("pow", "rpow", "rmod") and isinstance(other, (str, bytes)):
64
+ pytest.skip(reason=f"{op.__name__} with NA and {other} not defined.")
65
+ if op.__name__ in ("divmod", "rdivmod"):
66
+ assert op(NA, other) is (NA, NA)
67
+ else:
68
+ if op.__name__ == "rpow":
69
+ # avoid special case
70
+ other += 1
71
+ assert op(NA, other) is NA
72
+
73
+
74
+ @pytest.mark.parametrize(
75
+ "other",
76
+ [
77
+ NA,
78
+ 1,
79
+ 1.0,
80
+ "a",
81
+ b"a",
82
+ np.int64(1),
83
+ np.nan,
84
+ np.bool_(True),
85
+ time(0),
86
+ date(1, 2, 3),
87
+ timedelta(1),
88
+ pd.NaT,
89
+ ],
90
+ )
91
+ def test_comparison_ops(comparison_op, other):
92
+ assert comparison_op(NA, other) is NA
93
+ assert comparison_op(other, NA) is NA
94
+
95
+
96
+ @pytest.mark.parametrize(
97
+ "value",
98
+ [
99
+ 0,
100
+ 0.0,
101
+ -0,
102
+ -0.0,
103
+ False,
104
+ np.bool_(False),
105
+ np.int_(0),
106
+ np.float64(0),
107
+ np.int_(-0),
108
+ np.float64(-0),
109
+ ],
110
+ )
111
+ @pytest.mark.parametrize("asarray", [True, False])
112
+ def test_pow_special(value, asarray):
113
+ if asarray:
114
+ value = np.array([value])
115
+ result = NA**value
116
+
117
+ if asarray:
118
+ result = result[0]
119
+ else:
120
+ # this assertion isn't possible for ndarray.
121
+ assert isinstance(result, type(value))
122
+ assert result == 1
123
+
124
+
125
+ @pytest.mark.parametrize(
126
+ "value", [1, 1.0, True, np.bool_(True), np.int_(1), np.float64(1)]
127
+ )
128
+ @pytest.mark.parametrize("asarray", [True, False])
129
+ def test_rpow_special(value, asarray):
130
+ if asarray:
131
+ value = np.array([value])
132
+ result = value**NA
133
+
134
+ if asarray:
135
+ result = result[0]
136
+ elif not isinstance(value, (np.float64, np.bool_, np.int_)):
137
+ # this assertion isn't possible with asarray=True
138
+ assert isinstance(result, type(value))
139
+
140
+ assert result == value
141
+
142
+
143
+ @pytest.mark.parametrize("value", [-1, -1.0, np.int_(-1), np.float64(-1)])
144
+ @pytest.mark.parametrize("asarray", [True, False])
145
+ def test_rpow_minus_one(value, asarray):
146
+ if asarray:
147
+ value = np.array([value])
148
+ result = value**NA
149
+
150
+ if asarray:
151
+ result = result[0]
152
+
153
+ assert pd.isna(result)
154
+
155
+
156
+ def test_unary_ops():
157
+ assert +NA is NA
158
+ assert -NA is NA
159
+ assert abs(NA) is NA
160
+ assert ~NA is NA
161
+
162
+
163
+ def test_logical_and():
164
+ assert NA & True is NA
165
+ assert True & NA is NA
166
+ assert NA & False is False
167
+ assert False & NA is False
168
+ assert NA & NA is NA
169
+
170
+ msg = "unsupported operand type"
171
+ with pytest.raises(TypeError, match=msg):
172
+ NA & 5
173
+
174
+
175
+ def test_logical_or():
176
+ assert NA | True is True
177
+ assert True | NA is True
178
+ assert NA | False is NA
179
+ assert False | NA is NA
180
+ assert NA | NA is NA
181
+
182
+ msg = "unsupported operand type"
183
+ with pytest.raises(TypeError, match=msg):
184
+ NA | 5
185
+
186
+
187
+ def test_logical_xor():
188
+ assert NA ^ True is NA
189
+ assert True ^ NA is NA
190
+ assert NA ^ False is NA
191
+ assert False ^ NA is NA
192
+ assert NA ^ NA is NA
193
+
194
+ msg = "unsupported operand type"
195
+ with pytest.raises(TypeError, match=msg):
196
+ NA ^ 5
197
+
198
+
199
+ def test_logical_not():
200
+ assert ~NA is NA
201
+
202
+
203
+ @pytest.mark.parametrize("shape", [(3,), (3, 3), (1, 2, 3)])
204
+ def test_arithmetic_ndarray(shape, all_arithmetic_functions):
205
+ op = all_arithmetic_functions
206
+ a = np.zeros(shape)
207
+ if op.__name__ == "pow":
208
+ a += 5
209
+ result = op(NA, a)
210
+ expected = np.full(a.shape, NA, dtype=object)
211
+ tm.assert_numpy_array_equal(result, expected)
212
+
213
+
214
+ def test_is_scalar():
215
+ assert is_scalar(NA) is True
216
+
217
+
218
+ def test_isna():
219
+ assert pd.isna(NA) is True
220
+ assert pd.notna(NA) is False
221
+
222
+
223
+ def test_series_isna():
224
+ s = pd.Series([1, NA], dtype=object)
225
+ expected = pd.Series([False, True])
226
+ tm.assert_series_equal(s.isna(), expected)
227
+
228
+
229
+ def test_ufunc():
230
+ assert np.log(NA) is NA
231
+ assert np.add(NA, 1) is NA
232
+ result = np.divmod(NA, 1)
233
+ assert result[0] is NA and result[1] is NA
234
+
235
+ result = np.frexp(NA)
236
+ assert result[0] is NA and result[1] is NA
237
+
238
+
239
+ def test_ufunc_raises():
240
+ msg = "ufunc method 'at'"
241
+ with pytest.raises(ValueError, match=msg):
242
+ np.log.at(NA, 0)
243
+
244
+
245
+ def test_binary_input_not_dunder():
246
+ a = np.array([1, 2, 3])
247
+ expected = np.array([NA, NA, NA], dtype=object)
248
+ result = np.logaddexp(a, NA)
249
+ tm.assert_numpy_array_equal(result, expected)
250
+
251
+ result = np.logaddexp(NA, a)
252
+ tm.assert_numpy_array_equal(result, expected)
253
+
254
+ # all NA, multiple inputs
255
+ assert np.logaddexp(NA, NA) is NA
256
+
257
+ result = np.modf(NA, NA)
258
+ assert len(result) == 2
259
+ assert all(x is NA for x in result)
260
+
261
+
262
+ def test_divmod_ufunc():
263
+ # binary in, binary out.
264
+ a = np.array([1, 2, 3])
265
+ expected = np.array([NA, NA, NA], dtype=object)
266
+
267
+ result = np.divmod(a, NA)
268
+ assert isinstance(result, tuple)
269
+ for arr in result:
270
+ tm.assert_numpy_array_equal(arr, expected)
271
+ tm.assert_numpy_array_equal(arr, expected)
272
+
273
+ result = np.divmod(NA, a)
274
+ for arr in result:
275
+ tm.assert_numpy_array_equal(arr, expected)
276
+ tm.assert_numpy_array_equal(arr, expected)
277
+
278
+
279
+ def test_integer_hash_collision_dict():
280
+ # GH 30013
281
+ result = {NA: "foo", hash(NA): "bar"}
282
+
283
+ assert result[NA] == "foo"
284
+ assert result[hash(NA)] == "bar"
285
+
286
+
287
+ def test_integer_hash_collision_set():
288
+ # GH 30013
289
+ result = {NA, hash(NA)}
290
+
291
+ assert len(result) == 2
292
+ assert NA in result
293
+ assert hash(NA) in result
294
+
295
+
296
+ def test_pickle_roundtrip():
297
+ # https://github.com/pandas-dev/pandas/issues/31847
298
+ result = pickle.loads(pickle.dumps(NA))
299
+ assert result is NA
300
+
301
+
302
+ def test_pickle_roundtrip_pandas():
303
+ result = tm.round_trip_pickle(NA)
304
+ assert result is NA
305
+
306
+
307
+ @pytest.mark.parametrize(
308
+ "values, dtype", [([1, 2, NA], "Int64"), (["A", "B", NA], "string")]
309
+ )
310
+ @pytest.mark.parametrize("as_frame", [True, False])
311
+ def test_pickle_roundtrip_containers(as_frame, values, dtype):
312
+ s = pd.Series(pd.array(values, dtype=dtype))
313
+ if as_frame:
314
+ s = s.to_frame(name="A")
315
+ result = tm.round_trip_pickle(s)
316
+ tm.assert_equal(result, s)
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/test_nat.py ADDED
@@ -0,0 +1,709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ datetime,
3
+ timedelta,
4
+ )
5
+ import operator
6
+
7
+ import numpy as np
8
+ import pytest
9
+ import pytz
10
+
11
+ from pandas._libs.tslibs import iNaT
12
+ from pandas.compat.numpy import np_version_gte1p24p3
13
+
14
+ from pandas import (
15
+ DatetimeIndex,
16
+ DatetimeTZDtype,
17
+ Index,
18
+ NaT,
19
+ Period,
20
+ Series,
21
+ Timedelta,
22
+ TimedeltaIndex,
23
+ Timestamp,
24
+ isna,
25
+ offsets,
26
+ )
27
+ import pandas._testing as tm
28
+ from pandas.core import roperator
29
+ from pandas.core.arrays import (
30
+ DatetimeArray,
31
+ PeriodArray,
32
+ TimedeltaArray,
33
+ )
34
+
35
+
36
+ class TestNaTFormatting:
37
+ def test_repr(self):
38
+ assert repr(NaT) == "NaT"
39
+
40
+ def test_str(self):
41
+ assert str(NaT) == "NaT"
42
+
43
+ def test_isoformat(self):
44
+ assert NaT.isoformat() == "NaT"
45
+
46
+
47
+ @pytest.mark.parametrize(
48
+ "nat,idx",
49
+ [
50
+ (Timestamp("NaT"), DatetimeArray),
51
+ (Timedelta("NaT"), TimedeltaArray),
52
+ (Period("NaT", freq="M"), PeriodArray),
53
+ ],
54
+ )
55
+ def test_nat_fields(nat, idx):
56
+ for field in idx._field_ops:
57
+ # weekday is a property of DTI, but a method
58
+ # on NaT/Timestamp for compat with datetime
59
+ if field == "weekday":
60
+ continue
61
+
62
+ result = getattr(NaT, field)
63
+ assert np.isnan(result)
64
+
65
+ result = getattr(nat, field)
66
+ assert np.isnan(result)
67
+
68
+ for field in idx._bool_ops:
69
+ result = getattr(NaT, field)
70
+ assert result is False
71
+
72
+ result = getattr(nat, field)
73
+ assert result is False
74
+
75
+
76
+ def test_nat_vector_field_access():
77
+ idx = DatetimeIndex(["1/1/2000", None, None, "1/4/2000"])
78
+
79
+ for field in DatetimeArray._field_ops:
80
+ # weekday is a property of DTI, but a method
81
+ # on NaT/Timestamp for compat with datetime
82
+ if field == "weekday":
83
+ continue
84
+
85
+ result = getattr(idx, field)
86
+ expected = Index([getattr(x, field) for x in idx])
87
+ tm.assert_index_equal(result, expected)
88
+
89
+ ser = Series(idx)
90
+
91
+ for field in DatetimeArray._field_ops:
92
+ # weekday is a property of DTI, but a method
93
+ # on NaT/Timestamp for compat with datetime
94
+ if field == "weekday":
95
+ continue
96
+
97
+ result = getattr(ser.dt, field)
98
+ expected = [getattr(x, field) for x in idx]
99
+ tm.assert_series_equal(result, Series(expected))
100
+
101
+ for field in DatetimeArray._bool_ops:
102
+ result = getattr(ser.dt, field)
103
+ expected = [getattr(x, field) for x in idx]
104
+ tm.assert_series_equal(result, Series(expected))
105
+
106
+
107
+ @pytest.mark.parametrize("klass", [Timestamp, Timedelta, Period])
108
+ @pytest.mark.parametrize(
109
+ "value", [None, np.nan, iNaT, float("nan"), NaT, "NaT", "nat", "", "NAT"]
110
+ )
111
+ def test_identity(klass, value):
112
+ assert klass(value) is NaT
113
+
114
+
115
+ @pytest.mark.parametrize("klass", [Timestamp, Timedelta])
116
+ @pytest.mark.parametrize("method", ["round", "floor", "ceil"])
117
+ @pytest.mark.parametrize("freq", ["s", "5s", "min", "5min", "h", "5h"])
118
+ def test_round_nat(klass, method, freq):
119
+ # see gh-14940
120
+ ts = klass("nat")
121
+
122
+ round_method = getattr(ts, method)
123
+ assert round_method(freq) is ts
124
+
125
+
126
+ @pytest.mark.parametrize(
127
+ "method",
128
+ [
129
+ "astimezone",
130
+ "combine",
131
+ "ctime",
132
+ "dst",
133
+ "fromordinal",
134
+ "fromtimestamp",
135
+ "fromisocalendar",
136
+ "isocalendar",
137
+ "strftime",
138
+ "strptime",
139
+ "time",
140
+ "timestamp",
141
+ "timetuple",
142
+ "timetz",
143
+ "toordinal",
144
+ "tzname",
145
+ "utcfromtimestamp",
146
+ "utcnow",
147
+ "utcoffset",
148
+ "utctimetuple",
149
+ "timestamp",
150
+ ],
151
+ )
152
+ def test_nat_methods_raise(method):
153
+ # see gh-9513, gh-17329
154
+ msg = f"NaTType does not support {method}"
155
+
156
+ with pytest.raises(ValueError, match=msg):
157
+ getattr(NaT, method)()
158
+
159
+
160
+ @pytest.mark.parametrize("method", ["weekday", "isoweekday"])
161
+ def test_nat_methods_nan(method):
162
+ # see gh-9513, gh-17329
163
+ assert np.isnan(getattr(NaT, method)())
164
+
165
+
166
+ @pytest.mark.parametrize(
167
+ "method", ["date", "now", "replace", "today", "tz_convert", "tz_localize"]
168
+ )
169
+ def test_nat_methods_nat(method):
170
+ # see gh-8254, gh-9513, gh-17329
171
+ assert getattr(NaT, method)() is NaT
172
+
173
+
174
+ @pytest.mark.parametrize(
175
+ "get_nat", [lambda x: NaT, lambda x: Timedelta(x), lambda x: Timestamp(x)]
176
+ )
177
+ def test_nat_iso_format(get_nat):
178
+ # see gh-12300
179
+ assert get_nat("NaT").isoformat() == "NaT"
180
+ assert get_nat("NaT").isoformat(timespec="nanoseconds") == "NaT"
181
+
182
+
183
+ @pytest.mark.parametrize(
184
+ "klass,expected",
185
+ [
186
+ (Timestamp, ["normalize", "to_julian_date", "to_period", "unit"]),
187
+ (
188
+ Timedelta,
189
+ [
190
+ "components",
191
+ "resolution_string",
192
+ "to_pytimedelta",
193
+ "to_timedelta64",
194
+ "unit",
195
+ "view",
196
+ ],
197
+ ),
198
+ ],
199
+ )
200
+ def test_missing_public_nat_methods(klass, expected):
201
+ # see gh-17327
202
+ #
203
+ # NaT should have *most* of the Timestamp and Timedelta methods.
204
+ # Here, we check which public methods NaT does not have. We
205
+ # ignore any missing private methods.
206
+ nat_names = dir(NaT)
207
+ klass_names = dir(klass)
208
+
209
+ missing = [x for x in klass_names if x not in nat_names and not x.startswith("_")]
210
+ missing.sort()
211
+
212
+ assert missing == expected
213
+
214
+
215
+ def _get_overlap_public_nat_methods(klass, as_tuple=False):
216
+ """
217
+ Get overlapping public methods between NaT and another class.
218
+
219
+ Parameters
220
+ ----------
221
+ klass : type
222
+ The class to compare with NaT
223
+ as_tuple : bool, default False
224
+ Whether to return a list of tuples of the form (klass, method).
225
+
226
+ Returns
227
+ -------
228
+ overlap : list
229
+ """
230
+ nat_names = dir(NaT)
231
+ klass_names = dir(klass)
232
+
233
+ overlap = [
234
+ x
235
+ for x in nat_names
236
+ if x in klass_names and not x.startswith("_") and callable(getattr(klass, x))
237
+ ]
238
+
239
+ # Timestamp takes precedence over Timedelta in terms of overlap.
240
+ if klass is Timedelta:
241
+ ts_names = dir(Timestamp)
242
+ overlap = [x for x in overlap if x not in ts_names]
243
+
244
+ if as_tuple:
245
+ overlap = [(klass, method) for method in overlap]
246
+
247
+ overlap.sort()
248
+ return overlap
249
+
250
+
251
+ @pytest.mark.parametrize(
252
+ "klass,expected",
253
+ [
254
+ (
255
+ Timestamp,
256
+ [
257
+ "as_unit",
258
+ "astimezone",
259
+ "ceil",
260
+ "combine",
261
+ "ctime",
262
+ "date",
263
+ "day_name",
264
+ "dst",
265
+ "floor",
266
+ "fromisocalendar",
267
+ "fromisoformat",
268
+ "fromordinal",
269
+ "fromtimestamp",
270
+ "isocalendar",
271
+ "isoformat",
272
+ "isoweekday",
273
+ "month_name",
274
+ "now",
275
+ "replace",
276
+ "round",
277
+ "strftime",
278
+ "strptime",
279
+ "time",
280
+ "timestamp",
281
+ "timetuple",
282
+ "timetz",
283
+ "to_datetime64",
284
+ "to_numpy",
285
+ "to_pydatetime",
286
+ "today",
287
+ "toordinal",
288
+ "tz_convert",
289
+ "tz_localize",
290
+ "tzname",
291
+ "utcfromtimestamp",
292
+ "utcnow",
293
+ "utcoffset",
294
+ "utctimetuple",
295
+ "weekday",
296
+ ],
297
+ ),
298
+ (Timedelta, ["total_seconds"]),
299
+ ],
300
+ )
301
+ def test_overlap_public_nat_methods(klass, expected):
302
+ # see gh-17327
303
+ #
304
+ # NaT should have *most* of the Timestamp and Timedelta methods.
305
+ # In case when Timestamp, Timedelta, and NaT are overlap, the overlap
306
+ # is considered to be with Timestamp and NaT, not Timedelta.
307
+ assert _get_overlap_public_nat_methods(klass) == expected
308
+
309
+
310
+ @pytest.mark.parametrize(
311
+ "compare",
312
+ (
313
+ _get_overlap_public_nat_methods(Timestamp, True)
314
+ + _get_overlap_public_nat_methods(Timedelta, True)
315
+ ),
316
+ ids=lambda x: f"{x[0].__name__}.{x[1]}",
317
+ )
318
+ def test_nat_doc_strings(compare):
319
+ # see gh-17327
320
+ #
321
+ # The docstrings for overlapping methods should match.
322
+ klass, method = compare
323
+ klass_doc = getattr(klass, method).__doc__
324
+
325
+ if klass == Timestamp and method == "isoformat":
326
+ pytest.skip(
327
+ "Ignore differences with Timestamp.isoformat() as they're intentional"
328
+ )
329
+
330
+ if method == "to_numpy":
331
+ # GH#44460 can return either dt64 or td64 depending on dtype,
332
+ # different docstring is intentional
333
+ pytest.skip(f"different docstring for {method} is intentional")
334
+
335
+ nat_doc = getattr(NaT, method).__doc__
336
+ assert klass_doc == nat_doc
337
+
338
+
339
+ _ops = {
340
+ "left_plus_right": lambda a, b: a + b,
341
+ "right_plus_left": lambda a, b: b + a,
342
+ "left_minus_right": lambda a, b: a - b,
343
+ "right_minus_left": lambda a, b: b - a,
344
+ "left_times_right": lambda a, b: a * b,
345
+ "right_times_left": lambda a, b: b * a,
346
+ "left_div_right": lambda a, b: a / b,
347
+ "right_div_left": lambda a, b: b / a,
348
+ }
349
+
350
+
351
+ @pytest.mark.parametrize("op_name", list(_ops.keys()))
352
+ @pytest.mark.parametrize(
353
+ "value,val_type",
354
+ [
355
+ (2, "scalar"),
356
+ (1.5, "floating"),
357
+ (np.nan, "floating"),
358
+ ("foo", "str"),
359
+ (timedelta(3600), "timedelta"),
360
+ (Timedelta("5s"), "timedelta"),
361
+ (datetime(2014, 1, 1), "timestamp"),
362
+ (Timestamp("2014-01-01"), "timestamp"),
363
+ (Timestamp("2014-01-01", tz="UTC"), "timestamp"),
364
+ (Timestamp("2014-01-01", tz="US/Eastern"), "timestamp"),
365
+ (pytz.timezone("Asia/Tokyo").localize(datetime(2014, 1, 1)), "timestamp"),
366
+ ],
367
+ )
368
+ def test_nat_arithmetic_scalar(op_name, value, val_type):
369
+ # see gh-6873
370
+ invalid_ops = {
371
+ "scalar": {"right_div_left"},
372
+ "floating": {
373
+ "right_div_left",
374
+ "left_minus_right",
375
+ "right_minus_left",
376
+ "left_plus_right",
377
+ "right_plus_left",
378
+ },
379
+ "str": set(_ops.keys()),
380
+ "timedelta": {"left_times_right", "right_times_left"},
381
+ "timestamp": {
382
+ "left_times_right",
383
+ "right_times_left",
384
+ "left_div_right",
385
+ "right_div_left",
386
+ },
387
+ }
388
+
389
+ op = _ops[op_name]
390
+
391
+ if op_name in invalid_ops.get(val_type, set()):
392
+ if (
393
+ val_type == "timedelta"
394
+ and "times" in op_name
395
+ and isinstance(value, Timedelta)
396
+ ):
397
+ typs = "(Timedelta|NaTType)"
398
+ msg = rf"unsupported operand type\(s\) for \*: '{typs}' and '{typs}'"
399
+ elif val_type == "str":
400
+ # un-specific check here because the message comes from str
401
+ # and varies by method
402
+ msg = "|".join(
403
+ [
404
+ "can only concatenate str",
405
+ "unsupported operand type",
406
+ "can't multiply sequence",
407
+ "Can't convert 'NaTType'",
408
+ "must be str, not NaTType",
409
+ ]
410
+ )
411
+ else:
412
+ msg = "unsupported operand type"
413
+
414
+ with pytest.raises(TypeError, match=msg):
415
+ op(NaT, value)
416
+ else:
417
+ if val_type == "timedelta" and "div" in op_name:
418
+ expected = np.nan
419
+ else:
420
+ expected = NaT
421
+
422
+ assert op(NaT, value) is expected
423
+
424
+
425
+ @pytest.mark.parametrize(
426
+ "val,expected", [(np.nan, NaT), (NaT, np.nan), (np.timedelta64("NaT"), np.nan)]
427
+ )
428
+ def test_nat_rfloordiv_timedelta(val, expected):
429
+ # see gh-#18846
430
+ #
431
+ # See also test_timedelta.TestTimedeltaArithmetic.test_floordiv
432
+ td = Timedelta(hours=3, minutes=4)
433
+ assert td // val is expected
434
+
435
+
436
+ @pytest.mark.parametrize(
437
+ "op_name",
438
+ ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
439
+ )
440
+ @pytest.mark.parametrize(
441
+ "value",
442
+ [
443
+ DatetimeIndex(["2011-01-01", "2011-01-02"], name="x"),
444
+ DatetimeIndex(["2011-01-01", "2011-01-02"], tz="US/Eastern", name="x"),
445
+ DatetimeArray._from_sequence(["2011-01-01", "2011-01-02"], dtype="M8[ns]"),
446
+ DatetimeArray._from_sequence(
447
+ ["2011-01-01", "2011-01-02"], dtype=DatetimeTZDtype(tz="US/Pacific")
448
+ ),
449
+ TimedeltaIndex(["1 day", "2 day"], name="x"),
450
+ ],
451
+ )
452
+ def test_nat_arithmetic_index(op_name, value):
453
+ # see gh-11718
454
+ exp_name = "x"
455
+ exp_data = [NaT] * 2
456
+
457
+ if value.dtype.kind == "M" and "plus" in op_name:
458
+ expected = DatetimeIndex(exp_data, tz=value.tz, name=exp_name)
459
+ else:
460
+ expected = TimedeltaIndex(exp_data, name=exp_name)
461
+ expected = expected.as_unit(value.unit)
462
+
463
+ if not isinstance(value, Index):
464
+ expected = expected.array
465
+
466
+ op = _ops[op_name]
467
+ result = op(NaT, value)
468
+ tm.assert_equal(result, expected)
469
+
470
+
471
+ @pytest.mark.parametrize(
472
+ "op_name",
473
+ ["left_plus_right", "right_plus_left", "left_minus_right", "right_minus_left"],
474
+ )
475
+ @pytest.mark.parametrize("box", [TimedeltaIndex, Series, TimedeltaArray._from_sequence])
476
+ def test_nat_arithmetic_td64_vector(op_name, box):
477
+ # see gh-19124
478
+ vec = box(["1 day", "2 day"], dtype="timedelta64[ns]")
479
+ box_nat = box([NaT, NaT], dtype="timedelta64[ns]")
480
+ tm.assert_equal(_ops[op_name](vec, NaT), box_nat)
481
+
482
+
483
+ @pytest.mark.parametrize(
484
+ "dtype,op,out_dtype",
485
+ [
486
+ ("datetime64[ns]", operator.add, "datetime64[ns]"),
487
+ ("datetime64[ns]", roperator.radd, "datetime64[ns]"),
488
+ ("datetime64[ns]", operator.sub, "timedelta64[ns]"),
489
+ ("datetime64[ns]", roperator.rsub, "timedelta64[ns]"),
490
+ ("timedelta64[ns]", operator.add, "datetime64[ns]"),
491
+ ("timedelta64[ns]", roperator.radd, "datetime64[ns]"),
492
+ ("timedelta64[ns]", operator.sub, "datetime64[ns]"),
493
+ ("timedelta64[ns]", roperator.rsub, "timedelta64[ns]"),
494
+ ],
495
+ )
496
+ def test_nat_arithmetic_ndarray(dtype, op, out_dtype):
497
+ other = np.arange(10).astype(dtype)
498
+ result = op(NaT, other)
499
+
500
+ expected = np.empty(other.shape, dtype=out_dtype)
501
+ expected.fill("NaT")
502
+ tm.assert_numpy_array_equal(result, expected)
503
+
504
+
505
+ def test_nat_pinned_docstrings():
506
+ # see gh-17327
507
+ assert NaT.ctime.__doc__ == Timestamp.ctime.__doc__
508
+
509
+
510
+ def test_to_numpy_alias():
511
+ # GH 24653: alias .to_numpy() for scalars
512
+ expected = NaT.to_datetime64()
513
+ result = NaT.to_numpy()
514
+
515
+ assert isna(expected) and isna(result)
516
+
517
+ # GH#44460
518
+ result = NaT.to_numpy("M8[s]")
519
+ assert isinstance(result, np.datetime64)
520
+ assert result.dtype == "M8[s]"
521
+
522
+ result = NaT.to_numpy("m8[ns]")
523
+ assert isinstance(result, np.timedelta64)
524
+ assert result.dtype == "m8[ns]"
525
+
526
+ result = NaT.to_numpy("m8[s]")
527
+ assert isinstance(result, np.timedelta64)
528
+ assert result.dtype == "m8[s]"
529
+
530
+ with pytest.raises(ValueError, match="NaT.to_numpy dtype must be a "):
531
+ NaT.to_numpy(np.int64)
532
+
533
+
534
+ @pytest.mark.parametrize(
535
+ "other",
536
+ [
537
+ Timedelta(0),
538
+ Timedelta(0).to_pytimedelta(),
539
+ pytest.param(
540
+ Timedelta(0).to_timedelta64(),
541
+ marks=pytest.mark.xfail(
542
+ not np_version_gte1p24p3,
543
+ reason="td64 doesn't return NotImplemented, see numpy#17017",
544
+ # When this xfail is fixed, test_nat_comparisons_numpy
545
+ # can be removed.
546
+ ),
547
+ ),
548
+ Timestamp(0),
549
+ Timestamp(0).to_pydatetime(),
550
+ pytest.param(
551
+ Timestamp(0).to_datetime64(),
552
+ marks=pytest.mark.xfail(
553
+ not np_version_gte1p24p3,
554
+ reason="dt64 doesn't return NotImplemented, see numpy#17017",
555
+ ),
556
+ ),
557
+ Timestamp(0).tz_localize("UTC"),
558
+ NaT,
559
+ ],
560
+ )
561
+ def test_nat_comparisons(compare_operators_no_eq_ne, other):
562
+ # GH 26039
563
+ opname = compare_operators_no_eq_ne
564
+
565
+ assert getattr(NaT, opname)(other) is False
566
+
567
+ op = getattr(operator, opname.strip("_"))
568
+ assert op(NaT, other) is False
569
+ assert op(other, NaT) is False
570
+
571
+
572
+ @pytest.mark.parametrize("other", [np.timedelta64(0, "ns"), np.datetime64("now", "ns")])
573
+ def test_nat_comparisons_numpy(other):
574
+ # Once numpy#17017 is fixed and the xfailed cases in test_nat_comparisons
575
+ # pass, this test can be removed
576
+ assert not NaT == other
577
+ assert NaT != other
578
+ assert not NaT < other
579
+ assert not NaT > other
580
+ assert not NaT <= other
581
+ assert not NaT >= other
582
+
583
+
584
+ @pytest.mark.parametrize("other_and_type", [("foo", "str"), (2, "int"), (2.0, "float")])
585
+ @pytest.mark.parametrize(
586
+ "symbol_and_op",
587
+ [("<=", operator.le), ("<", operator.lt), (">=", operator.ge), (">", operator.gt)],
588
+ )
589
+ def test_nat_comparisons_invalid(other_and_type, symbol_and_op):
590
+ # GH#35585
591
+ other, other_type = other_and_type
592
+ symbol, op = symbol_and_op
593
+
594
+ assert not NaT == other
595
+ assert not other == NaT
596
+
597
+ assert NaT != other
598
+ assert other != NaT
599
+
600
+ msg = f"'{symbol}' not supported between instances of 'NaTType' and '{other_type}'"
601
+ with pytest.raises(TypeError, match=msg):
602
+ op(NaT, other)
603
+
604
+ msg = f"'{symbol}' not supported between instances of '{other_type}' and 'NaTType'"
605
+ with pytest.raises(TypeError, match=msg):
606
+ op(other, NaT)
607
+
608
+
609
+ @pytest.mark.parametrize(
610
+ "other",
611
+ [
612
+ np.array(["foo"] * 2, dtype=object),
613
+ np.array([2, 3], dtype="int64"),
614
+ np.array([2.0, 3.5], dtype="float64"),
615
+ ],
616
+ ids=["str", "int", "float"],
617
+ )
618
+ def test_nat_comparisons_invalid_ndarray(other):
619
+ # GH#40722
620
+ expected = np.array([False, False])
621
+ result = NaT == other
622
+ tm.assert_numpy_array_equal(result, expected)
623
+ result = other == NaT
624
+ tm.assert_numpy_array_equal(result, expected)
625
+
626
+ expected = np.array([True, True])
627
+ result = NaT != other
628
+ tm.assert_numpy_array_equal(result, expected)
629
+ result = other != NaT
630
+ tm.assert_numpy_array_equal(result, expected)
631
+
632
+ for symbol, op in [
633
+ ("<=", operator.le),
634
+ ("<", operator.lt),
635
+ (">=", operator.ge),
636
+ (">", operator.gt),
637
+ ]:
638
+ msg = f"'{symbol}' not supported between"
639
+
640
+ with pytest.raises(TypeError, match=msg):
641
+ op(NaT, other)
642
+
643
+ if other.dtype == np.dtype("object"):
644
+ # uses the reverse operator, so symbol changes
645
+ msg = None
646
+ with pytest.raises(TypeError, match=msg):
647
+ op(other, NaT)
648
+
649
+
650
+ def test_compare_date(fixed_now_ts):
651
+ # GH#39151 comparing NaT with date object is deprecated
652
+ # See also: tests.scalar.timestamps.test_comparisons::test_compare_date
653
+
654
+ dt = fixed_now_ts.to_pydatetime().date()
655
+
656
+ msg = "Cannot compare NaT with datetime.date object"
657
+ for left, right in [(NaT, dt), (dt, NaT)]:
658
+ assert not left == right
659
+ assert left != right
660
+
661
+ with pytest.raises(TypeError, match=msg):
662
+ left < right
663
+ with pytest.raises(TypeError, match=msg):
664
+ left <= right
665
+ with pytest.raises(TypeError, match=msg):
666
+ left > right
667
+ with pytest.raises(TypeError, match=msg):
668
+ left >= right
669
+
670
+
671
+ @pytest.mark.parametrize(
672
+ "obj",
673
+ [
674
+ offsets.YearEnd(2),
675
+ offsets.YearBegin(2),
676
+ offsets.MonthBegin(1),
677
+ offsets.MonthEnd(2),
678
+ offsets.MonthEnd(12),
679
+ offsets.Day(2),
680
+ offsets.Day(5),
681
+ offsets.Hour(24),
682
+ offsets.Hour(3),
683
+ offsets.Minute(),
684
+ np.timedelta64(3, "h"),
685
+ np.timedelta64(4, "h"),
686
+ np.timedelta64(3200, "s"),
687
+ np.timedelta64(3600, "s"),
688
+ np.timedelta64(3600 * 24, "s"),
689
+ np.timedelta64(2, "D"),
690
+ np.timedelta64(365, "D"),
691
+ timedelta(-2),
692
+ timedelta(365),
693
+ timedelta(minutes=120),
694
+ timedelta(days=4, minutes=180),
695
+ timedelta(hours=23),
696
+ timedelta(hours=23, minutes=30),
697
+ timedelta(hours=48),
698
+ ],
699
+ )
700
+ def test_nat_addsub_tdlike_scalar(obj):
701
+ assert NaT + obj is NaT
702
+ assert obj + NaT is NaT
703
+ assert NaT - obj is NaT
704
+
705
+
706
+ def test_pickle():
707
+ # GH#4606
708
+ p = tm.round_trip_pickle(NaT)
709
+ assert p is NaT
omnilmm/lib/python3.10/site-packages/pandas/tests/scalar/timedelta/test_constructors.py ADDED
@@ -0,0 +1,698 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import timedelta
2
+ from itertools import product
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._libs.tslibs import OutOfBoundsTimedelta
8
+ from pandas._libs.tslibs.dtypes import NpyDatetimeUnit
9
+
10
+ from pandas import (
11
+ Index,
12
+ NaT,
13
+ Timedelta,
14
+ TimedeltaIndex,
15
+ offsets,
16
+ to_timedelta,
17
+ )
18
+ import pandas._testing as tm
19
+
20
+
21
+ class TestTimedeltaConstructorUnitKeyword:
22
+ @pytest.mark.parametrize("unit", ["Y", "y", "M"])
23
+ def test_unit_m_y_raises(self, unit):
24
+ msg = "Units 'M', 'Y', and 'y' are no longer supported"
25
+
26
+ with pytest.raises(ValueError, match=msg):
27
+ Timedelta(10, unit)
28
+
29
+ with pytest.raises(ValueError, match=msg):
30
+ to_timedelta(10, unit)
31
+
32
+ with pytest.raises(ValueError, match=msg):
33
+ to_timedelta([1, 2], unit)
34
+
35
+ @pytest.mark.parametrize(
36
+ "unit,unit_depr",
37
+ [
38
+ ("h", "H"),
39
+ ("min", "T"),
40
+ ("s", "S"),
41
+ ("ms", "L"),
42
+ ("ns", "N"),
43
+ ("us", "U"),
44
+ ],
45
+ )
46
+ def test_units_H_T_S_L_N_U_deprecated(self, unit, unit_depr):
47
+ # GH#52536
48
+ msg = f"'{unit_depr}' is deprecated and will be removed in a future version."
49
+
50
+ expected = Timedelta(1, unit=unit)
51
+ with tm.assert_produces_warning(FutureWarning, match=msg):
52
+ result = Timedelta(1, unit=unit_depr)
53
+ tm.assert_equal(result, expected)
54
+
55
+ @pytest.mark.parametrize(
56
+ "unit, np_unit",
57
+ [(value, "W") for value in ["W", "w"]]
58
+ + [(value, "D") for value in ["D", "d", "days", "day", "Days", "Day"]]
59
+ + [
60
+ (value, "m")
61
+ for value in [
62
+ "m",
63
+ "minute",
64
+ "min",
65
+ "minutes",
66
+ "Minute",
67
+ "Min",
68
+ "Minutes",
69
+ ]
70
+ ]
71
+ + [
72
+ (value, "s")
73
+ for value in [
74
+ "s",
75
+ "seconds",
76
+ "sec",
77
+ "second",
78
+ "Seconds",
79
+ "Sec",
80
+ "Second",
81
+ ]
82
+ ]
83
+ + [
84
+ (value, "ms")
85
+ for value in [
86
+ "ms",
87
+ "milliseconds",
88
+ "millisecond",
89
+ "milli",
90
+ "millis",
91
+ "MS",
92
+ "Milliseconds",
93
+ "Millisecond",
94
+ "Milli",
95
+ "Millis",
96
+ ]
97
+ ]
98
+ + [
99
+ (value, "us")
100
+ for value in [
101
+ "us",
102
+ "microseconds",
103
+ "microsecond",
104
+ "micro",
105
+ "micros",
106
+ "u",
107
+ "US",
108
+ "Microseconds",
109
+ "Microsecond",
110
+ "Micro",
111
+ "Micros",
112
+ "U",
113
+ ]
114
+ ]
115
+ + [
116
+ (value, "ns")
117
+ for value in [
118
+ "ns",
119
+ "nanoseconds",
120
+ "nanosecond",
121
+ "nano",
122
+ "nanos",
123
+ "n",
124
+ "NS",
125
+ "Nanoseconds",
126
+ "Nanosecond",
127
+ "Nano",
128
+ "Nanos",
129
+ "N",
130
+ ]
131
+ ],
132
+ )
133
+ @pytest.mark.parametrize("wrapper", [np.array, list, Index])
134
+ def test_unit_parser(self, unit, np_unit, wrapper):
135
+ # validate all units, GH 6855, GH 21762
136
+ # array-likes
137
+ expected = TimedeltaIndex(
138
+ [np.timedelta64(i, np_unit) for i in np.arange(5).tolist()],
139
+ dtype="m8[ns]",
140
+ )
141
+ # TODO(2.0): the desired output dtype may have non-nano resolution
142
+ msg = f"'{unit}' is deprecated and will be removed in a future version."
143
+
144
+ if (unit, np_unit) in (("u", "us"), ("U", "us"), ("n", "ns"), ("N", "ns")):
145
+ warn = FutureWarning
146
+ else:
147
+ warn = FutureWarning
148
+ msg = "The 'unit' keyword in TimedeltaIndex construction is deprecated"
149
+ with tm.assert_produces_warning(warn, match=msg):
150
+ result = to_timedelta(wrapper(range(5)), unit=unit)
151
+ tm.assert_index_equal(result, expected)
152
+ result = TimedeltaIndex(wrapper(range(5)), unit=unit)
153
+ tm.assert_index_equal(result, expected)
154
+
155
+ str_repr = [f"{x}{unit}" for x in np.arange(5)]
156
+ result = to_timedelta(wrapper(str_repr))
157
+ tm.assert_index_equal(result, expected)
158
+ result = to_timedelta(wrapper(str_repr))
159
+ tm.assert_index_equal(result, expected)
160
+
161
+ # scalar
162
+ expected = Timedelta(np.timedelta64(2, np_unit).astype("timedelta64[ns]"))
163
+ result = to_timedelta(2, unit=unit)
164
+ assert result == expected
165
+ result = Timedelta(2, unit=unit)
166
+ assert result == expected
167
+
168
+ result = to_timedelta(f"2{unit}")
169
+ assert result == expected
170
+ result = Timedelta(f"2{unit}")
171
+ assert result == expected
172
+
173
+
174
+ def test_construct_from_kwargs_overflow():
175
+ # GH#55503
176
+ msg = "seconds=86400000000000000000, milliseconds=0, microseconds=0, nanoseconds=0"
177
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
178
+ Timedelta(days=10**6)
179
+ msg = "seconds=60000000000000000000, milliseconds=0, microseconds=0, nanoseconds=0"
180
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
181
+ Timedelta(minutes=10**9)
182
+
183
+
184
+ def test_construct_with_weeks_unit_overflow():
185
+ # GH#47268 don't silently wrap around
186
+ with pytest.raises(OutOfBoundsTimedelta, match="without overflow"):
187
+ Timedelta(1000000000000000000, unit="W")
188
+
189
+ with pytest.raises(OutOfBoundsTimedelta, match="without overflow"):
190
+ Timedelta(1000000000000000000.0, unit="W")
191
+
192
+
193
+ def test_construct_from_td64_with_unit():
194
+ # ignore the unit, as it may cause silently overflows leading to incorrect
195
+ # results, and in non-overflow cases is irrelevant GH#46827
196
+ obj = np.timedelta64(123456789000000000, "h")
197
+
198
+ with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"):
199
+ Timedelta(obj, unit="ps")
200
+
201
+ with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"):
202
+ Timedelta(obj, unit="ns")
203
+
204
+ with pytest.raises(OutOfBoundsTimedelta, match="123456789000000000 hours"):
205
+ Timedelta(obj)
206
+
207
+
208
+ def test_from_td64_retain_resolution():
209
+ # case where we retain millisecond resolution
210
+ obj = np.timedelta64(12345, "ms")
211
+
212
+ td = Timedelta(obj)
213
+ assert td._value == obj.view("i8")
214
+ assert td._creso == NpyDatetimeUnit.NPY_FR_ms.value
215
+
216
+ # Case where we cast to nearest-supported reso
217
+ obj2 = np.timedelta64(1234, "D")
218
+ td2 = Timedelta(obj2)
219
+ assert td2._creso == NpyDatetimeUnit.NPY_FR_s.value
220
+ assert td2 == obj2
221
+ assert td2.days == 1234
222
+
223
+ # Case that _would_ overflow if we didn't support non-nano
224
+ obj3 = np.timedelta64(1000000000000000000, "us")
225
+ td3 = Timedelta(obj3)
226
+ assert td3.total_seconds() == 1000000000000
227
+ assert td3._creso == NpyDatetimeUnit.NPY_FR_us.value
228
+
229
+
230
+ def test_from_pytimedelta_us_reso():
231
+ # pytimedelta has microsecond resolution, so Timedelta(pytd) inherits that
232
+ td = timedelta(days=4, minutes=3)
233
+ result = Timedelta(td)
234
+ assert result.to_pytimedelta() == td
235
+ assert result._creso == NpyDatetimeUnit.NPY_FR_us.value
236
+
237
+
238
+ def test_from_tick_reso():
239
+ tick = offsets.Nano()
240
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ns.value
241
+
242
+ tick = offsets.Micro()
243
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_us.value
244
+
245
+ tick = offsets.Milli()
246
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_ms.value
247
+
248
+ tick = offsets.Second()
249
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value
250
+
251
+ # everything above Second gets cast to the closest supported reso: second
252
+ tick = offsets.Minute()
253
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value
254
+
255
+ tick = offsets.Hour()
256
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value
257
+
258
+ tick = offsets.Day()
259
+ assert Timedelta(tick)._creso == NpyDatetimeUnit.NPY_FR_s.value
260
+
261
+
262
+ def test_construction():
263
+ expected = np.timedelta64(10, "D").astype("m8[ns]").view("i8")
264
+ assert Timedelta(10, unit="d")._value == expected
265
+ assert Timedelta(10.0, unit="d")._value == expected
266
+ assert Timedelta("10 days")._value == expected
267
+ assert Timedelta(days=10)._value == expected
268
+ assert Timedelta(days=10.0)._value == expected
269
+
270
+ expected += np.timedelta64(10, "s").astype("m8[ns]").view("i8")
271
+ assert Timedelta("10 days 00:00:10")._value == expected
272
+ assert Timedelta(days=10, seconds=10)._value == expected
273
+ assert Timedelta(days=10, milliseconds=10 * 1000)._value == expected
274
+ assert Timedelta(days=10, microseconds=10 * 1000 * 1000)._value == expected
275
+
276
+ # rounding cases
277
+ assert Timedelta(82739999850000)._value == 82739999850000
278
+ assert "0 days 22:58:59.999850" in str(Timedelta(82739999850000))
279
+ assert Timedelta(123072001000000)._value == 123072001000000
280
+ assert "1 days 10:11:12.001" in str(Timedelta(123072001000000))
281
+
282
+ # string conversion with/without leading zero
283
+ # GH#9570
284
+ assert Timedelta("0:00:00") == timedelta(hours=0)
285
+ assert Timedelta("00:00:00") == timedelta(hours=0)
286
+ assert Timedelta("-1:00:00") == -timedelta(hours=1)
287
+ assert Timedelta("-01:00:00") == -timedelta(hours=1)
288
+
289
+ # more strings & abbrevs
290
+ # GH#8190
291
+ assert Timedelta("1 h") == timedelta(hours=1)
292
+ assert Timedelta("1 hour") == timedelta(hours=1)
293
+ assert Timedelta("1 hr") == timedelta(hours=1)
294
+ assert Timedelta("1 hours") == timedelta(hours=1)
295
+ assert Timedelta("-1 hours") == -timedelta(hours=1)
296
+ assert Timedelta("1 m") == timedelta(minutes=1)
297
+ assert Timedelta("1.5 m") == timedelta(seconds=90)
298
+ assert Timedelta("1 minute") == timedelta(minutes=1)
299
+ assert Timedelta("1 minutes") == timedelta(minutes=1)
300
+ assert Timedelta("1 s") == timedelta(seconds=1)
301
+ assert Timedelta("1 second") == timedelta(seconds=1)
302
+ assert Timedelta("1 seconds") == timedelta(seconds=1)
303
+ assert Timedelta("1 ms") == timedelta(milliseconds=1)
304
+ assert Timedelta("1 milli") == timedelta(milliseconds=1)
305
+ assert Timedelta("1 millisecond") == timedelta(milliseconds=1)
306
+ assert Timedelta("1 us") == timedelta(microseconds=1)
307
+ assert Timedelta("1 µs") == timedelta(microseconds=1)
308
+ assert Timedelta("1 micros") == timedelta(microseconds=1)
309
+ assert Timedelta("1 microsecond") == timedelta(microseconds=1)
310
+ assert Timedelta("1.5 microsecond") == Timedelta("00:00:00.000001500")
311
+ assert Timedelta("1 ns") == Timedelta("00:00:00.000000001")
312
+ assert Timedelta("1 nano") == Timedelta("00:00:00.000000001")
313
+ assert Timedelta("1 nanosecond") == Timedelta("00:00:00.000000001")
314
+
315
+ # combos
316
+ assert Timedelta("10 days 1 hour") == timedelta(days=10, hours=1)
317
+ assert Timedelta("10 days 1 h") == timedelta(days=10, hours=1)
318
+ assert Timedelta("10 days 1 h 1m 1s") == timedelta(
319
+ days=10, hours=1, minutes=1, seconds=1
320
+ )
321
+ assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
322
+ days=10, hours=1, minutes=1, seconds=1
323
+ )
324
+ assert Timedelta("-10 days 1 h 1m 1s") == -timedelta(
325
+ days=10, hours=1, minutes=1, seconds=1
326
+ )
327
+ assert Timedelta("-10 days 1 h 1m 1s 3us") == -timedelta(
328
+ days=10, hours=1, minutes=1, seconds=1, microseconds=3
329
+ )
330
+ assert Timedelta("-10 days 1 h 1.5m 1s 3us") == -timedelta(
331
+ days=10, hours=1, minutes=1, seconds=31, microseconds=3
332
+ )
333
+
334
+ # Currently invalid as it has a - on the hh:mm:dd part
335
+ # (only allowed on the days)
336
+ msg = "only leading negative signs are allowed"
337
+ with pytest.raises(ValueError, match=msg):
338
+ Timedelta("-10 days -1 h 1.5m 1s 3us")
339
+
340
+ # only leading neg signs are allowed
341
+ with pytest.raises(ValueError, match=msg):
342
+ Timedelta("10 days -1 h 1.5m 1s 3us")
343
+
344
+ # no units specified
345
+ msg = "no units specified"
346
+ with pytest.raises(ValueError, match=msg):
347
+ Timedelta("3.1415")
348
+
349
+ # invalid construction
350
+ msg = "cannot construct a Timedelta"
351
+ with pytest.raises(ValueError, match=msg):
352
+ Timedelta()
353
+
354
+ msg = "unit abbreviation w/o a number"
355
+ with pytest.raises(ValueError, match=msg):
356
+ Timedelta("foo")
357
+
358
+ msg = (
359
+ "cannot construct a Timedelta from "
360
+ "the passed arguments, allowed keywords are "
361
+ )
362
+ with pytest.raises(ValueError, match=msg):
363
+ Timedelta(day=10)
364
+
365
+ # floats
366
+ expected = np.timedelta64(10, "s").astype("m8[ns]").view("i8") + np.timedelta64(
367
+ 500, "ms"
368
+ ).astype("m8[ns]").view("i8")
369
+ assert Timedelta(10.5, unit="s")._value == expected
370
+
371
+ # offset
372
+ assert to_timedelta(offsets.Hour(2)) == Timedelta(hours=2)
373
+ assert Timedelta(offsets.Hour(2)) == Timedelta(hours=2)
374
+ assert Timedelta(offsets.Second(2)) == Timedelta(seconds=2)
375
+
376
+ # GH#11995: unicode
377
+ expected = Timedelta("1h")
378
+ result = Timedelta("1h")
379
+ assert result == expected
380
+ assert to_timedelta(offsets.Hour(2)) == Timedelta("0 days, 02:00:00")
381
+
382
+ msg = "unit abbreviation w/o a number"
383
+ with pytest.raises(ValueError, match=msg):
384
+ Timedelta("foo bar")
385
+
386
+
387
+ @pytest.mark.parametrize(
388
+ "item",
389
+ list(
390
+ {
391
+ "days": "D",
392
+ "seconds": "s",
393
+ "microseconds": "us",
394
+ "milliseconds": "ms",
395
+ "minutes": "m",
396
+ "hours": "h",
397
+ "weeks": "W",
398
+ }.items()
399
+ ),
400
+ )
401
+ @pytest.mark.parametrize(
402
+ "npdtype", [np.int64, np.int32, np.int16, np.float64, np.float32, np.float16]
403
+ )
404
+ def test_td_construction_with_np_dtypes(npdtype, item):
405
+ # GH#8757: test construction with np dtypes
406
+ pykwarg, npkwarg = item
407
+ expected = np.timedelta64(1, npkwarg).astype("m8[ns]").view("i8")
408
+ assert Timedelta(**{pykwarg: npdtype(1)})._value == expected
409
+
410
+
411
+ @pytest.mark.parametrize(
412
+ "val",
413
+ [
414
+ "1s",
415
+ "-1s",
416
+ "1us",
417
+ "-1us",
418
+ "1 day",
419
+ "-1 day",
420
+ "-23:59:59.999999",
421
+ "-1 days +23:59:59.999999",
422
+ "-1ns",
423
+ "1ns",
424
+ "-23:59:59.999999999",
425
+ ],
426
+ )
427
+ def test_td_from_repr_roundtrip(val):
428
+ # round-trip both for string and value
429
+ td = Timedelta(val)
430
+ assert Timedelta(td._value) == td
431
+
432
+ assert Timedelta(str(td)) == td
433
+ assert Timedelta(td._repr_base(format="all")) == td
434
+ assert Timedelta(td._repr_base()) == td
435
+
436
+
437
+ def test_overflow_on_construction():
438
+ # GH#3374
439
+ value = Timedelta("1day")._value * 20169940
440
+ msg = "Cannot cast 1742682816000000000000 from ns to 'ns' without overflow"
441
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
442
+ Timedelta(value)
443
+
444
+ # xref GH#17637
445
+ msg = "Cannot cast 139993 from D to 'ns' without overflow"
446
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
447
+ Timedelta(7 * 19999, unit="D")
448
+
449
+ # used to overflow before non-ns support
450
+ td = Timedelta(timedelta(days=13 * 19999))
451
+ assert td._creso == NpyDatetimeUnit.NPY_FR_us.value
452
+ assert td.days == 13 * 19999
453
+
454
+
455
+ @pytest.mark.parametrize(
456
+ "val, unit",
457
+ [
458
+ (15251, "W"), # 1
459
+ (106752, "D"), # change from previous:
460
+ (2562048, "h"), # 0 hours
461
+ (153722868, "m"), # 13 minutes
462
+ (9223372037, "s"), # 44 seconds
463
+ ],
464
+ )
465
+ def test_construction_out_of_bounds_td64ns(val, unit):
466
+ # TODO: parametrize over units just above/below the implementation bounds
467
+ # once GH#38964 is resolved
468
+
469
+ # Timedelta.max is just under 106752 days
470
+ td64 = np.timedelta64(val, unit)
471
+ assert td64.astype("m8[ns]").view("i8") < 0 # i.e. naive astype will be wrong
472
+
473
+ td = Timedelta(td64)
474
+ if unit != "M":
475
+ # with unit="M" the conversion to "s" is poorly defined
476
+ # (and numpy issues DeprecationWarning)
477
+ assert td.asm8 == td64
478
+ assert td.asm8.dtype == "m8[s]"
479
+ msg = r"Cannot cast 1067\d\d days .* to unit='ns' without overflow"
480
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
481
+ td.as_unit("ns")
482
+
483
+ # But just back in bounds and we are OK
484
+ assert Timedelta(td64 - 1) == td64 - 1
485
+
486
+ td64 *= -1
487
+ assert td64.astype("m8[ns]").view("i8") > 0 # i.e. naive astype will be wrong
488
+
489
+ td2 = Timedelta(td64)
490
+ msg = r"Cannot cast -1067\d\d days .* to unit='ns' without overflow"
491
+ with pytest.raises(OutOfBoundsTimedelta, match=msg):
492
+ td2.as_unit("ns")
493
+
494
+ # But just back in bounds and we are OK
495
+ assert Timedelta(td64 + 1) == td64 + 1
496
+
497
+
498
+ @pytest.mark.parametrize(
499
+ "val, unit",
500
+ [
501
+ (15251 * 10**9, "W"),
502
+ (106752 * 10**9, "D"),
503
+ (2562048 * 10**9, "h"),
504
+ (153722868 * 10**9, "m"),
505
+ ],
506
+ )
507
+ def test_construction_out_of_bounds_td64s(val, unit):
508
+ td64 = np.timedelta64(val, unit)
509
+ with pytest.raises(OutOfBoundsTimedelta, match=str(td64)):
510
+ Timedelta(td64)
511
+
512
+ # But just back in bounds and we are OK
513
+ assert Timedelta(td64 - 10**9) == td64 - 10**9
514
+
515
+
516
+ @pytest.mark.parametrize(
517
+ "fmt,exp",
518
+ [
519
+ (
520
+ "P6DT0H50M3.010010012S",
521
+ Timedelta(
522
+ days=6,
523
+ minutes=50,
524
+ seconds=3,
525
+ milliseconds=10,
526
+ microseconds=10,
527
+ nanoseconds=12,
528
+ ),
529
+ ),
530
+ (
531
+ "P-6DT0H50M3.010010012S",
532
+ Timedelta(
533
+ days=-6,
534
+ minutes=50,
535
+ seconds=3,
536
+ milliseconds=10,
537
+ microseconds=10,
538
+ nanoseconds=12,
539
+ ),
540
+ ),
541
+ ("P4DT12H30M5S", Timedelta(days=4, hours=12, minutes=30, seconds=5)),
542
+ ("P0DT0H0M0.000000123S", Timedelta(nanoseconds=123)),
543
+ ("P0DT0H0M0.00001S", Timedelta(microseconds=10)),
544
+ ("P0DT0H0M0.001S", Timedelta(milliseconds=1)),
545
+ ("P0DT0H1M0S", Timedelta(minutes=1)),
546
+ ("P1DT25H61M61S", Timedelta(days=1, hours=25, minutes=61, seconds=61)),
547
+ ("PT1S", Timedelta(seconds=1)),
548
+ ("PT0S", Timedelta(seconds=0)),
549
+ ("P1WT0S", Timedelta(days=7, seconds=0)),
550
+ ("P1D", Timedelta(days=1)),
551
+ ("P1DT1H", Timedelta(days=1, hours=1)),
552
+ ("P1W", Timedelta(days=7)),
553
+ ("PT300S", Timedelta(seconds=300)),
554
+ ("P1DT0H0M00000000000S", Timedelta(days=1)),
555
+ ("PT-6H3M", Timedelta(hours=-6, minutes=3)),
556
+ ("-PT6H3M", Timedelta(hours=-6, minutes=-3)),
557
+ ("-PT-6H+3M", Timedelta(hours=6, minutes=-3)),
558
+ ],
559
+ )
560
+ def test_iso_constructor(fmt, exp):
561
+ assert Timedelta(fmt) == exp
562
+
563
+
564
+ @pytest.mark.parametrize(
565
+ "fmt",
566
+ [
567
+ "PPPPPPPPPPPP",
568
+ "PDTHMS",
569
+ "P0DT999H999M999S",
570
+ "P1DT0H0M0.0000000000000S",
571
+ "P1DT0H0M0.S",
572
+ "P",
573
+ "-P",
574
+ ],
575
+ )
576
+ def test_iso_constructor_raises(fmt):
577
+ msg = f"Invalid ISO 8601 Duration format - {fmt}"
578
+ with pytest.raises(ValueError, match=msg):
579
+ Timedelta(fmt)
580
+
581
+
582
+ @pytest.mark.parametrize(
583
+ "constructed_td, conversion",
584
+ [
585
+ (Timedelta(nanoseconds=100), "100ns"),
586
+ (
587
+ Timedelta(
588
+ days=1,
589
+ hours=1,
590
+ minutes=1,
591
+ weeks=1,
592
+ seconds=1,
593
+ milliseconds=1,
594
+ microseconds=1,
595
+ nanoseconds=1,
596
+ ),
597
+ 694861001001001,
598
+ ),
599
+ (Timedelta(microseconds=1) + Timedelta(nanoseconds=1), "1us1ns"),
600
+ (Timedelta(microseconds=1) - Timedelta(nanoseconds=1), "999ns"),
601
+ (Timedelta(microseconds=1) + 5 * Timedelta(nanoseconds=-2), "990ns"),
602
+ ],
603
+ )
604
+ def test_td_constructor_on_nanoseconds(constructed_td, conversion):
605
+ # GH#9273
606
+ assert constructed_td == Timedelta(conversion)
607
+
608
+
609
+ def test_td_constructor_value_error():
610
+ msg = "Invalid type <class 'str'>. Must be int or float."
611
+ with pytest.raises(TypeError, match=msg):
612
+ Timedelta(nanoseconds="abc")
613
+
614
+
615
+ def test_timedelta_constructor_identity():
616
+ # Test for #30543
617
+ expected = Timedelta(np.timedelta64(1, "s"))
618
+ result = Timedelta(expected)
619
+ assert result is expected
620
+
621
+
622
+ def test_timedelta_pass_td_and_kwargs_raises():
623
+ # don't silently ignore the kwargs GH#48898
624
+ td = Timedelta(days=1)
625
+ msg = (
626
+ "Cannot pass both a Timedelta input and timedelta keyword arguments, "
627
+ r"got \['days'\]"
628
+ )
629
+ with pytest.raises(ValueError, match=msg):
630
+ Timedelta(td, days=2)
631
+
632
+
633
+ @pytest.mark.parametrize(
634
+ "constructor, value, unit, expectation",
635
+ [
636
+ (Timedelta, "10s", "ms", (ValueError, "unit must not be specified")),
637
+ (to_timedelta, "10s", "ms", (ValueError, "unit must not be specified")),
638
+ (to_timedelta, ["1", 2, 3], "s", (ValueError, "unit must not be specified")),
639
+ ],
640
+ )
641
+ def test_string_with_unit(constructor, value, unit, expectation):
642
+ exp, match = expectation
643
+ with pytest.raises(exp, match=match):
644
+ _ = constructor(value, unit=unit)
645
+
646
+
647
+ @pytest.mark.parametrize(
648
+ "value",
649
+ [
650
+ "".join(elements)
651
+ for repetition in (1, 2)
652
+ for elements in product("+-, ", repeat=repetition)
653
+ ],
654
+ )
655
+ def test_string_without_numbers(value):
656
+ # GH39710 Timedelta input string with only symbols and no digits raises an error
657
+ msg = (
658
+ "symbols w/o a number"
659
+ if value != "--"
660
+ else "only leading negative signs are allowed"
661
+ )
662
+ with pytest.raises(ValueError, match=msg):
663
+ Timedelta(value)
664
+
665
+
666
+ def test_timedelta_new_npnat():
667
+ # GH#48898
668
+ nat = np.timedelta64("NaT", "h")
669
+ assert Timedelta(nat) is NaT
670
+
671
+
672
+ def test_subclass_respected():
673
+ # GH#49579
674
+ class MyCustomTimedelta(Timedelta):
675
+ pass
676
+
677
+ td = MyCustomTimedelta("1 minute")
678
+ assert isinstance(td, MyCustomTimedelta)
679
+
680
+
681
+ def test_non_nano_value():
682
+ # https://github.com/pandas-dev/pandas/issues/49076
683
+ result = Timedelta(10, unit="D").as_unit("s").value
684
+ # `.value` shows nanoseconds, even though unit is 's'
685
+ assert result == 864000000000000
686
+
687
+ # out-of-nanoseconds-bounds `.value` raises informative message
688
+ msg = (
689
+ r"Cannot convert Timedelta to nanoseconds without overflow. "
690
+ r"Use `.asm8.view\('i8'\)` to cast represent Timedelta in its "
691
+ r"own unit \(here, s\).$"
692
+ )
693
+ td = Timedelta(1_000, "D").as_unit("s") * 1_000
694
+ with pytest.raises(OverflowError, match=msg):
695
+ td.value
696
+ # check that the suggested workaround actually works
697
+ result = td.asm8.view("i8")
698
+ assert result == 86400000000