Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- mgm/lib/python3.10/site-packages/pandas/tests/extension/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py +39 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/base.py +2 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py +87 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/index.py +19 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py +137 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py +720 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py +153 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py +379 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py +200 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/test_common.py +105 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py +144 -0
- mgm/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py +503 -0
- mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_arrow_interface.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_stack_unstack.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_subclass.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/conftest.py +242 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py +342 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc +0 -0
- mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc +0 -0
mgm/lib/python3.10/site-packages/pandas/tests/extension/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.79 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc
ADDED
|
Binary file (1.63 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc
ADDED
|
Binary file (334 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc
ADDED
|
Binary file (3.67 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc
ADDED
|
Binary file (5.73 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc
ADDED
|
Binary file (9.36 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc
ADDED
|
Binary file (5.32 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc
ADDED
|
Binary file (14.6 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc
ADDED
|
Binary file (5.54 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc
ADDED
|
Binary file (975 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc
ADDED
|
Binary file (4.33 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc
ADDED
|
Binary file (1.37 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc
ADDED
|
Binary file (23.5 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc
ADDED
|
Binary file (6.14 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc
ADDED
|
Binary file (8.93 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc
ADDED
|
Binary file (1.84 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc
ADDED
|
Binary file (4.36 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc
ADDED
|
Binary file (11.6 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc
ADDED
|
Binary file (15.9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
import pandas as pd
|
| 4 |
+
import pandas._testing as tm
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BaseAccumulateTests:
|
| 8 |
+
"""
|
| 9 |
+
Accumulation specific tests. Generally these only
|
| 10 |
+
make sense for numeric/boolean operations.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
|
| 14 |
+
# Do we expect this accumulation to be supported for this dtype?
|
| 15 |
+
# We default to assuming "no"; subclass authors should override here.
|
| 16 |
+
return False
|
| 17 |
+
|
| 18 |
+
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
|
| 19 |
+
try:
|
| 20 |
+
alt = ser.astype("float64")
|
| 21 |
+
except TypeError:
|
| 22 |
+
# e.g. Period can't be cast to float64
|
| 23 |
+
alt = ser.astype(object)
|
| 24 |
+
|
| 25 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
| 26 |
+
expected = getattr(alt, op_name)(skipna=skipna)
|
| 27 |
+
tm.assert_series_equal(result, expected, check_dtype=False)
|
| 28 |
+
|
| 29 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 30 |
+
def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
|
| 31 |
+
op_name = all_numeric_accumulations
|
| 32 |
+
ser = pd.Series(data)
|
| 33 |
+
|
| 34 |
+
if self._supports_accumulation(ser, op_name):
|
| 35 |
+
self.check_accumulate(ser, op_name, skipna)
|
| 36 |
+
else:
|
| 37 |
+
with pytest.raises((NotImplementedError, TypeError)):
|
| 38 |
+
# TODO: require TypeError for things that will _never_ work?
|
| 39 |
+
getattr(ser, op_name)(skipna=skipna)
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/base.py
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class BaseExtensionTests:
|
| 2 |
+
pass
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
import pandas.util._test_decorators as td
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
from pandas.core.internals.blocks import NumpyBlock
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class BaseCastingTests:
|
| 12 |
+
"""Casting to and from ExtensionDtypes"""
|
| 13 |
+
|
| 14 |
+
def test_astype_object_series(self, all_data):
|
| 15 |
+
ser = pd.Series(all_data, name="A")
|
| 16 |
+
result = ser.astype(object)
|
| 17 |
+
assert result.dtype == np.dtype(object)
|
| 18 |
+
if hasattr(result._mgr, "blocks"):
|
| 19 |
+
blk = result._mgr.blocks[0]
|
| 20 |
+
assert isinstance(blk, NumpyBlock)
|
| 21 |
+
assert blk.is_object
|
| 22 |
+
assert isinstance(result._mgr.array, np.ndarray)
|
| 23 |
+
assert result._mgr.array.dtype == np.dtype(object)
|
| 24 |
+
|
| 25 |
+
def test_astype_object_frame(self, all_data):
|
| 26 |
+
df = pd.DataFrame({"A": all_data})
|
| 27 |
+
|
| 28 |
+
result = df.astype(object)
|
| 29 |
+
if hasattr(result._mgr, "blocks"):
|
| 30 |
+
blk = result._mgr.blocks[0]
|
| 31 |
+
assert isinstance(blk, NumpyBlock), type(blk)
|
| 32 |
+
assert blk.is_object
|
| 33 |
+
assert isinstance(result._mgr.arrays[0], np.ndarray)
|
| 34 |
+
assert result._mgr.arrays[0].dtype == np.dtype(object)
|
| 35 |
+
|
| 36 |
+
# check that we can compare the dtypes
|
| 37 |
+
comp = result.dtypes == df.dtypes
|
| 38 |
+
assert not comp.any()
|
| 39 |
+
|
| 40 |
+
def test_tolist(self, data):
|
| 41 |
+
result = pd.Series(data).tolist()
|
| 42 |
+
expected = list(data)
|
| 43 |
+
assert result == expected
|
| 44 |
+
|
| 45 |
+
def test_astype_str(self, data):
|
| 46 |
+
result = pd.Series(data[:5]).astype(str)
|
| 47 |
+
expected = pd.Series([str(x) for x in data[:5]], dtype=str)
|
| 48 |
+
tm.assert_series_equal(result, expected)
|
| 49 |
+
|
| 50 |
+
@pytest.mark.parametrize(
|
| 51 |
+
"nullable_string_dtype",
|
| 52 |
+
[
|
| 53 |
+
"string[python]",
|
| 54 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
| 55 |
+
],
|
| 56 |
+
)
|
| 57 |
+
def test_astype_string(self, data, nullable_string_dtype):
|
| 58 |
+
# GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)
|
| 59 |
+
result = pd.Series(data[:5]).astype(nullable_string_dtype)
|
| 60 |
+
expected = pd.Series(
|
| 61 |
+
[str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]],
|
| 62 |
+
dtype=nullable_string_dtype,
|
| 63 |
+
)
|
| 64 |
+
tm.assert_series_equal(result, expected)
|
| 65 |
+
|
| 66 |
+
def test_to_numpy(self, data):
|
| 67 |
+
expected = np.asarray(data)
|
| 68 |
+
|
| 69 |
+
result = data.to_numpy()
|
| 70 |
+
tm.assert_equal(result, expected)
|
| 71 |
+
|
| 72 |
+
result = pd.Series(data).to_numpy()
|
| 73 |
+
tm.assert_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
def test_astype_empty_dataframe(self, dtype):
|
| 76 |
+
# https://github.com/pandas-dev/pandas/issues/33113
|
| 77 |
+
df = pd.DataFrame()
|
| 78 |
+
result = df.astype(dtype)
|
| 79 |
+
tm.assert_frame_equal(result, df)
|
| 80 |
+
|
| 81 |
+
@pytest.mark.parametrize("copy", [True, False])
|
| 82 |
+
def test_astype_own_type(self, data, copy):
|
| 83 |
+
# ensure that astype returns the original object for equal dtype and copy=False
|
| 84 |
+
# https://github.com/pandas-dev/pandas/issues/28488
|
| 85 |
+
result = data.astype(data.dtype, copy=copy)
|
| 86 |
+
assert (result is data) is (not copy)
|
| 87 |
+
tm.assert_extension_array_equal(result, data)
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/index.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Tests for Indexes backed by arbitrary ExtensionArrays.
|
| 3 |
+
"""
|
| 4 |
+
import pandas as pd
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class BaseIndexTests:
|
| 8 |
+
"""Tests for Index object backed by an ExtensionArray"""
|
| 9 |
+
|
| 10 |
+
def test_index_from_array(self, data):
|
| 11 |
+
idx = pd.Index(data)
|
| 12 |
+
assert data.dtype == idx.dtype
|
| 13 |
+
|
| 14 |
+
def test_index_from_listlike_with_dtype(self, data):
|
| 15 |
+
idx = pd.Index(data, dtype=data.dtype)
|
| 16 |
+
assert idx.dtype == data.dtype
|
| 17 |
+
|
| 18 |
+
idx = pd.Index(list(data), dtype=data.dtype)
|
| 19 |
+
assert idx.dtype == data.dtype
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
| 5 |
+
from pandas.core.dtypes.common import is_extension_array_dtype
|
| 6 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
| 7 |
+
|
| 8 |
+
import pandas as pd
|
| 9 |
+
import pandas._testing as tm
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BaseInterfaceTests:
|
| 13 |
+
"""Tests that the basic interface is satisfied."""
|
| 14 |
+
|
| 15 |
+
# ------------------------------------------------------------------------
|
| 16 |
+
# Interface
|
| 17 |
+
# ------------------------------------------------------------------------
|
| 18 |
+
|
| 19 |
+
def test_len(self, data):
|
| 20 |
+
assert len(data) == 100
|
| 21 |
+
|
| 22 |
+
def test_size(self, data):
|
| 23 |
+
assert data.size == 100
|
| 24 |
+
|
| 25 |
+
def test_ndim(self, data):
|
| 26 |
+
assert data.ndim == 1
|
| 27 |
+
|
| 28 |
+
def test_can_hold_na_valid(self, data):
|
| 29 |
+
# GH-20761
|
| 30 |
+
assert data._can_hold_na is True
|
| 31 |
+
|
| 32 |
+
def test_contains(self, data, data_missing):
|
| 33 |
+
# GH-37867
|
| 34 |
+
# Tests for membership checks. Membership checks for nan-likes is tricky and
|
| 35 |
+
# the settled on rule is: `nan_like in arr` is True if nan_like is
|
| 36 |
+
# arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
|
| 37 |
+
|
| 38 |
+
na_value = data.dtype.na_value
|
| 39 |
+
# ensure data without missing values
|
| 40 |
+
data = data[~data.isna()]
|
| 41 |
+
|
| 42 |
+
# first elements are non-missing
|
| 43 |
+
assert data[0] in data
|
| 44 |
+
assert data_missing[0] in data_missing
|
| 45 |
+
|
| 46 |
+
# check the presence of na_value
|
| 47 |
+
assert na_value in data_missing
|
| 48 |
+
assert na_value not in data
|
| 49 |
+
|
| 50 |
+
# the data can never contain other nan-likes than na_value
|
| 51 |
+
for na_value_obj in tm.NULL_OBJECTS:
|
| 52 |
+
if na_value_obj is na_value or type(na_value_obj) == type(na_value):
|
| 53 |
+
# type check for e.g. two instances of Decimal("NAN")
|
| 54 |
+
continue
|
| 55 |
+
assert na_value_obj not in data
|
| 56 |
+
assert na_value_obj not in data_missing
|
| 57 |
+
|
| 58 |
+
def test_memory_usage(self, data):
|
| 59 |
+
s = pd.Series(data)
|
| 60 |
+
result = s.memory_usage(index=False)
|
| 61 |
+
assert result == s.nbytes
|
| 62 |
+
|
| 63 |
+
def test_array_interface(self, data):
|
| 64 |
+
result = np.array(data)
|
| 65 |
+
assert result[0] == data[0]
|
| 66 |
+
|
| 67 |
+
result = np.array(data, dtype=object)
|
| 68 |
+
expected = np.array(list(data), dtype=object)
|
| 69 |
+
if expected.ndim > 1:
|
| 70 |
+
# nested data, explicitly construct as 1D
|
| 71 |
+
expected = construct_1d_object_array_from_listlike(list(data))
|
| 72 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 73 |
+
|
| 74 |
+
def test_is_extension_array_dtype(self, data):
|
| 75 |
+
assert is_extension_array_dtype(data)
|
| 76 |
+
assert is_extension_array_dtype(data.dtype)
|
| 77 |
+
assert is_extension_array_dtype(pd.Series(data))
|
| 78 |
+
assert isinstance(data.dtype, ExtensionDtype)
|
| 79 |
+
|
| 80 |
+
def test_no_values_attribute(self, data):
|
| 81 |
+
# GH-20735: EA's with .values attribute give problems with internal
|
| 82 |
+
# code, disallowing this for now until solved
|
| 83 |
+
assert not hasattr(data, "values")
|
| 84 |
+
assert not hasattr(data, "_values")
|
| 85 |
+
|
| 86 |
+
def test_is_numeric_honored(self, data):
|
| 87 |
+
result = pd.Series(data)
|
| 88 |
+
if hasattr(result._mgr, "blocks"):
|
| 89 |
+
assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric
|
| 90 |
+
|
| 91 |
+
def test_isna_extension_array(self, data_missing):
|
| 92 |
+
# If your `isna` returns an ExtensionArray, you must also implement
|
| 93 |
+
# _reduce. At the *very* least, you must implement any and all
|
| 94 |
+
na = data_missing.isna()
|
| 95 |
+
if is_extension_array_dtype(na):
|
| 96 |
+
assert na._reduce("any")
|
| 97 |
+
assert na.any()
|
| 98 |
+
|
| 99 |
+
assert not na._reduce("all")
|
| 100 |
+
assert not na.all()
|
| 101 |
+
|
| 102 |
+
assert na.dtype._is_boolean
|
| 103 |
+
|
| 104 |
+
def test_copy(self, data):
|
| 105 |
+
# GH#27083 removing deep keyword from EA.copy
|
| 106 |
+
assert data[0] != data[1]
|
| 107 |
+
result = data.copy()
|
| 108 |
+
|
| 109 |
+
if data.dtype._is_immutable:
|
| 110 |
+
pytest.skip(f"test_copy assumes mutability and {data.dtype} is immutable")
|
| 111 |
+
|
| 112 |
+
data[1] = data[0]
|
| 113 |
+
assert result[1] != result[0]
|
| 114 |
+
|
| 115 |
+
def test_view(self, data):
|
| 116 |
+
# view with no dtype should return a shallow copy, *not* the same
|
| 117 |
+
# object
|
| 118 |
+
assert data[1] != data[0]
|
| 119 |
+
|
| 120 |
+
result = data.view()
|
| 121 |
+
assert result is not data
|
| 122 |
+
assert type(result) == type(data)
|
| 123 |
+
|
| 124 |
+
if data.dtype._is_immutable:
|
| 125 |
+
pytest.skip(f"test_view assumes mutability and {data.dtype} is immutable")
|
| 126 |
+
|
| 127 |
+
result[1] = result[0]
|
| 128 |
+
assert data[1] == data[0]
|
| 129 |
+
|
| 130 |
+
# check specifically that the `dtype` kwarg is accepted
|
| 131 |
+
data.view(dtype=None)
|
| 132 |
+
|
| 133 |
+
def test_tolist(self, data):
|
| 134 |
+
result = data.tolist()
|
| 135 |
+
expected = list(data)
|
| 136 |
+
assert isinstance(result, list)
|
| 137 |
+
assert result == expected
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py
ADDED
|
@@ -0,0 +1,720 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import inspect
|
| 2 |
+
import operator
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
import pytest
|
| 6 |
+
|
| 7 |
+
from pandas._typing import Dtype
|
| 8 |
+
|
| 9 |
+
from pandas.core.dtypes.common import is_bool_dtype
|
| 10 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
| 11 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
| 12 |
+
|
| 13 |
+
import pandas as pd
|
| 14 |
+
import pandas._testing as tm
|
| 15 |
+
from pandas.core.sorting import nargsort
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class BaseMethodsTests:
|
| 19 |
+
"""Various Series and DataFrame methods."""
|
| 20 |
+
|
| 21 |
+
def test_hash_pandas_object(self, data):
|
| 22 |
+
# _hash_pandas_object should return a uint64 ndarray of the same length
|
| 23 |
+
# as the data
|
| 24 |
+
from pandas.core.util.hashing import _default_hash_key
|
| 25 |
+
|
| 26 |
+
res = data._hash_pandas_object(
|
| 27 |
+
encoding="utf-8", hash_key=_default_hash_key, categorize=False
|
| 28 |
+
)
|
| 29 |
+
assert res.dtype == np.uint64
|
| 30 |
+
assert res.shape == data.shape
|
| 31 |
+
|
| 32 |
+
def test_value_counts_default_dropna(self, data):
|
| 33 |
+
# make sure we have consistent default dropna kwarg
|
| 34 |
+
if not hasattr(data, "value_counts"):
|
| 35 |
+
pytest.skip(f"value_counts is not implemented for {type(data)}")
|
| 36 |
+
sig = inspect.signature(data.value_counts)
|
| 37 |
+
kwarg = sig.parameters["dropna"]
|
| 38 |
+
assert kwarg.default is True
|
| 39 |
+
|
| 40 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
| 41 |
+
def test_value_counts(self, all_data, dropna):
|
| 42 |
+
all_data = all_data[:10]
|
| 43 |
+
if dropna:
|
| 44 |
+
other = all_data[~all_data.isna()]
|
| 45 |
+
else:
|
| 46 |
+
other = all_data
|
| 47 |
+
|
| 48 |
+
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
|
| 49 |
+
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
|
| 50 |
+
|
| 51 |
+
tm.assert_series_equal(result, expected)
|
| 52 |
+
|
| 53 |
+
def test_value_counts_with_normalize(self, data):
|
| 54 |
+
# GH 33172
|
| 55 |
+
data = data[:10].unique()
|
| 56 |
+
values = np.array(data[~data.isna()])
|
| 57 |
+
ser = pd.Series(data, dtype=data.dtype)
|
| 58 |
+
|
| 59 |
+
result = ser.value_counts(normalize=True).sort_index()
|
| 60 |
+
|
| 61 |
+
if not isinstance(data, pd.Categorical):
|
| 62 |
+
expected = pd.Series(
|
| 63 |
+
[1 / len(values)] * len(values), index=result.index, name="proportion"
|
| 64 |
+
)
|
| 65 |
+
else:
|
| 66 |
+
expected = pd.Series(0.0, index=result.index, name="proportion")
|
| 67 |
+
expected[result > 0] = 1 / len(values)
|
| 68 |
+
|
| 69 |
+
if getattr(data.dtype, "storage", "") == "pyarrow" or isinstance(
|
| 70 |
+
data.dtype, pd.ArrowDtype
|
| 71 |
+
):
|
| 72 |
+
# TODO: avoid special-casing
|
| 73 |
+
expected = expected.astype("double[pyarrow]")
|
| 74 |
+
elif getattr(data.dtype, "storage", "") == "pyarrow_numpy":
|
| 75 |
+
# TODO: avoid special-casing
|
| 76 |
+
expected = expected.astype("float64")
|
| 77 |
+
elif na_value_for_dtype(data.dtype) is pd.NA:
|
| 78 |
+
# TODO(GH#44692): avoid special-casing
|
| 79 |
+
expected = expected.astype("Float64")
|
| 80 |
+
|
| 81 |
+
tm.assert_series_equal(result, expected)
|
| 82 |
+
|
| 83 |
+
def test_count(self, data_missing):
|
| 84 |
+
df = pd.DataFrame({"A": data_missing})
|
| 85 |
+
result = df.count(axis="columns")
|
| 86 |
+
expected = pd.Series([0, 1])
|
| 87 |
+
tm.assert_series_equal(result, expected)
|
| 88 |
+
|
| 89 |
+
def test_series_count(self, data_missing):
|
| 90 |
+
# GH#26835
|
| 91 |
+
ser = pd.Series(data_missing)
|
| 92 |
+
result = ser.count()
|
| 93 |
+
expected = 1
|
| 94 |
+
assert result == expected
|
| 95 |
+
|
| 96 |
+
def test_apply_simple_series(self, data):
|
| 97 |
+
result = pd.Series(data).apply(id)
|
| 98 |
+
assert isinstance(result, pd.Series)
|
| 99 |
+
|
| 100 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
| 101 |
+
def test_map(self, data_missing, na_action):
|
| 102 |
+
result = data_missing.map(lambda x: x, na_action=na_action)
|
| 103 |
+
expected = data_missing.to_numpy()
|
| 104 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 105 |
+
|
| 106 |
+
def test_argsort(self, data_for_sorting):
|
| 107 |
+
result = pd.Series(data_for_sorting).argsort()
|
| 108 |
+
# argsort result gets passed to take, so should be np.intp
|
| 109 |
+
expected = pd.Series(np.array([2, 0, 1], dtype=np.intp))
|
| 110 |
+
tm.assert_series_equal(result, expected)
|
| 111 |
+
|
| 112 |
+
def test_argsort_missing_array(self, data_missing_for_sorting):
|
| 113 |
+
result = data_missing_for_sorting.argsort()
|
| 114 |
+
# argsort result gets passed to take, so should be np.intp
|
| 115 |
+
expected = np.array([2, 0, 1], dtype=np.intp)
|
| 116 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 117 |
+
|
| 118 |
+
def test_argsort_missing(self, data_missing_for_sorting):
|
| 119 |
+
msg = "The behavior of Series.argsort in the presence of NA values"
|
| 120 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
| 121 |
+
result = pd.Series(data_missing_for_sorting).argsort()
|
| 122 |
+
expected = pd.Series(np.array([1, -1, 0], dtype=np.intp))
|
| 123 |
+
tm.assert_series_equal(result, expected)
|
| 124 |
+
|
| 125 |
+
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
|
| 126 |
+
# GH 24382
|
| 127 |
+
is_bool = data_for_sorting.dtype._is_boolean
|
| 128 |
+
|
| 129 |
+
exp_argmax = 1
|
| 130 |
+
exp_argmax_repeated = 3
|
| 131 |
+
if is_bool:
|
| 132 |
+
# See data_for_sorting docstring
|
| 133 |
+
exp_argmax = 0
|
| 134 |
+
exp_argmax_repeated = 1
|
| 135 |
+
|
| 136 |
+
# data_for_sorting -> [B, C, A] with A < B < C
|
| 137 |
+
assert data_for_sorting.argmax() == exp_argmax
|
| 138 |
+
assert data_for_sorting.argmin() == 2
|
| 139 |
+
|
| 140 |
+
# with repeated values -> first occurrence
|
| 141 |
+
data = data_for_sorting.take([2, 0, 0, 1, 1, 2])
|
| 142 |
+
assert data.argmax() == exp_argmax_repeated
|
| 143 |
+
assert data.argmin() == 0
|
| 144 |
+
|
| 145 |
+
# with missing values
|
| 146 |
+
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
|
| 147 |
+
assert data_missing_for_sorting.argmax() == 0
|
| 148 |
+
assert data_missing_for_sorting.argmin() == 2
|
| 149 |
+
|
| 150 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
| 151 |
+
def test_argmin_argmax_empty_array(self, method, data):
|
| 152 |
+
# GH 24382
|
| 153 |
+
err_msg = "attempt to get"
|
| 154 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 155 |
+
getattr(data[:0], method)()
|
| 156 |
+
|
| 157 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
| 158 |
+
def test_argmin_argmax_all_na(self, method, data, na_value):
|
| 159 |
+
# all missing with skipna=True is the same as empty
|
| 160 |
+
err_msg = "attempt to get"
|
| 161 |
+
data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype)
|
| 162 |
+
with pytest.raises(ValueError, match=err_msg):
|
| 163 |
+
getattr(data_na, method)()
|
| 164 |
+
|
| 165 |
+
@pytest.mark.parametrize(
|
| 166 |
+
"op_name, skipna, expected",
|
| 167 |
+
[
|
| 168 |
+
("idxmax", True, 0),
|
| 169 |
+
("idxmin", True, 2),
|
| 170 |
+
("argmax", True, 0),
|
| 171 |
+
("argmin", True, 2),
|
| 172 |
+
("idxmax", False, np.nan),
|
| 173 |
+
("idxmin", False, np.nan),
|
| 174 |
+
("argmax", False, -1),
|
| 175 |
+
("argmin", False, -1),
|
| 176 |
+
],
|
| 177 |
+
)
|
| 178 |
+
def test_argreduce_series(
|
| 179 |
+
self, data_missing_for_sorting, op_name, skipna, expected
|
| 180 |
+
):
|
| 181 |
+
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
|
| 182 |
+
warn = None
|
| 183 |
+
msg = "The behavior of Series.argmax/argmin"
|
| 184 |
+
if op_name.startswith("arg") and expected == -1:
|
| 185 |
+
warn = FutureWarning
|
| 186 |
+
if op_name.startswith("idx") and np.isnan(expected):
|
| 187 |
+
warn = FutureWarning
|
| 188 |
+
msg = f"The behavior of Series.{op_name}"
|
| 189 |
+
ser = pd.Series(data_missing_for_sorting)
|
| 190 |
+
with tm.assert_produces_warning(warn, match=msg):
|
| 191 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
| 192 |
+
tm.assert_almost_equal(result, expected)
|
| 193 |
+
|
| 194 |
+
def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting):
|
| 195 |
+
# GH#38733
|
| 196 |
+
data = data_missing_for_sorting
|
| 197 |
+
|
| 198 |
+
with pytest.raises(NotImplementedError, match=""):
|
| 199 |
+
data.argmin(skipna=False)
|
| 200 |
+
|
| 201 |
+
with pytest.raises(NotImplementedError, match=""):
|
| 202 |
+
data.argmax(skipna=False)
|
| 203 |
+
|
| 204 |
+
@pytest.mark.parametrize(
|
| 205 |
+
"na_position, expected",
|
| 206 |
+
[
|
| 207 |
+
("last", np.array([2, 0, 1], dtype=np.dtype("intp"))),
|
| 208 |
+
("first", np.array([1, 2, 0], dtype=np.dtype("intp"))),
|
| 209 |
+
],
|
| 210 |
+
)
|
| 211 |
+
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
|
| 212 |
+
# GH 25439
|
| 213 |
+
result = nargsort(data_missing_for_sorting, na_position=na_position)
|
| 214 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 215 |
+
|
| 216 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 217 |
+
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
|
| 218 |
+
ser = pd.Series(data_for_sorting)
|
| 219 |
+
result = ser.sort_values(ascending=ascending, key=sort_by_key)
|
| 220 |
+
expected = ser.iloc[[2, 0, 1]]
|
| 221 |
+
if not ascending:
|
| 222 |
+
# GH 35922. Expect stable sort
|
| 223 |
+
if ser.nunique() == 2:
|
| 224 |
+
expected = ser.iloc[[0, 1, 2]]
|
| 225 |
+
else:
|
| 226 |
+
expected = ser.iloc[[1, 0, 2]]
|
| 227 |
+
|
| 228 |
+
tm.assert_series_equal(result, expected)
|
| 229 |
+
|
| 230 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 231 |
+
def test_sort_values_missing(
|
| 232 |
+
self, data_missing_for_sorting, ascending, sort_by_key
|
| 233 |
+
):
|
| 234 |
+
ser = pd.Series(data_missing_for_sorting)
|
| 235 |
+
result = ser.sort_values(ascending=ascending, key=sort_by_key)
|
| 236 |
+
if ascending:
|
| 237 |
+
expected = ser.iloc[[2, 0, 1]]
|
| 238 |
+
else:
|
| 239 |
+
expected = ser.iloc[[0, 2, 1]]
|
| 240 |
+
tm.assert_series_equal(result, expected)
|
| 241 |
+
|
| 242 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
| 243 |
+
def test_sort_values_frame(self, data_for_sorting, ascending):
|
| 244 |
+
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
|
| 245 |
+
result = df.sort_values(["A", "B"])
|
| 246 |
+
expected = pd.DataFrame(
|
| 247 |
+
{"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
|
| 248 |
+
)
|
| 249 |
+
tm.assert_frame_equal(result, expected)
|
| 250 |
+
|
| 251 |
+
@pytest.mark.parametrize("keep", ["first", "last", False])
|
| 252 |
+
def test_duplicated(self, data, keep):
|
| 253 |
+
arr = data.take([0, 1, 0, 1])
|
| 254 |
+
result = arr.duplicated(keep=keep)
|
| 255 |
+
if keep == "first":
|
| 256 |
+
expected = np.array([False, False, True, True])
|
| 257 |
+
elif keep == "last":
|
| 258 |
+
expected = np.array([True, True, False, False])
|
| 259 |
+
else:
|
| 260 |
+
expected = np.array([True, True, True, True])
|
| 261 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 262 |
+
|
| 263 |
+
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
|
| 264 |
+
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
|
| 265 |
+
def test_unique(self, data, box, method):
|
| 266 |
+
duplicated = box(data._from_sequence([data[0], data[0]], dtype=data.dtype))
|
| 267 |
+
|
| 268 |
+
result = method(duplicated)
|
| 269 |
+
|
| 270 |
+
assert len(result) == 1
|
| 271 |
+
assert isinstance(result, type(data))
|
| 272 |
+
assert result[0] == duplicated[0]
|
| 273 |
+
|
| 274 |
+
def test_factorize(self, data_for_grouping):
|
| 275 |
+
codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
|
| 276 |
+
|
| 277 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
| 278 |
+
if is_bool:
|
| 279 |
+
# only 2 unique values
|
| 280 |
+
expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 0], dtype=np.intp)
|
| 281 |
+
expected_uniques = data_for_grouping.take([0, 4])
|
| 282 |
+
else:
|
| 283 |
+
expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp)
|
| 284 |
+
expected_uniques = data_for_grouping.take([0, 4, 7])
|
| 285 |
+
|
| 286 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
| 287 |
+
tm.assert_extension_array_equal(uniques, expected_uniques)
|
| 288 |
+
|
| 289 |
+
def test_factorize_equivalence(self, data_for_grouping):
|
| 290 |
+
codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True)
|
| 291 |
+
codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True)
|
| 292 |
+
|
| 293 |
+
tm.assert_numpy_array_equal(codes_1, codes_2)
|
| 294 |
+
tm.assert_extension_array_equal(uniques_1, uniques_2)
|
| 295 |
+
assert len(uniques_1) == len(pd.unique(uniques_1))
|
| 296 |
+
assert uniques_1.dtype == data_for_grouping.dtype
|
| 297 |
+
|
| 298 |
+
def test_factorize_empty(self, data):
|
| 299 |
+
codes, uniques = pd.factorize(data[:0])
|
| 300 |
+
expected_codes = np.array([], dtype=np.intp)
|
| 301 |
+
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
|
| 302 |
+
|
| 303 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
| 304 |
+
tm.assert_extension_array_equal(uniques, expected_uniques)
|
| 305 |
+
|
| 306 |
+
def test_fillna_copy_frame(self, data_missing):
|
| 307 |
+
arr = data_missing.take([1, 1])
|
| 308 |
+
df = pd.DataFrame({"A": arr})
|
| 309 |
+
df_orig = df.copy()
|
| 310 |
+
|
| 311 |
+
filled_val = df.iloc[0, 0]
|
| 312 |
+
result = df.fillna(filled_val)
|
| 313 |
+
|
| 314 |
+
result.iloc[0, 0] = filled_val
|
| 315 |
+
|
| 316 |
+
tm.assert_frame_equal(df, df_orig)
|
| 317 |
+
|
| 318 |
+
def test_fillna_copy_series(self, data_missing):
|
| 319 |
+
arr = data_missing.take([1, 1])
|
| 320 |
+
ser = pd.Series(arr, copy=False)
|
| 321 |
+
ser_orig = ser.copy()
|
| 322 |
+
|
| 323 |
+
filled_val = ser[0]
|
| 324 |
+
result = ser.fillna(filled_val)
|
| 325 |
+
result.iloc[0] = filled_val
|
| 326 |
+
|
| 327 |
+
tm.assert_series_equal(ser, ser_orig)
|
| 328 |
+
|
| 329 |
+
def test_fillna_length_mismatch(self, data_missing):
|
| 330 |
+
msg = "Length of 'value' does not match."
|
| 331 |
+
with pytest.raises(ValueError, match=msg):
|
| 332 |
+
data_missing.fillna(data_missing.take([1]))
|
| 333 |
+
|
| 334 |
+
# Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool]
|
| 335 |
+
_combine_le_expected_dtype: Dtype = NumpyEADtype("bool")
|
| 336 |
+
|
| 337 |
+
def test_combine_le(self, data_repeated):
|
| 338 |
+
# GH 20825
|
| 339 |
+
# Test that combine works when doing a <= (le) comparison
|
| 340 |
+
orig_data1, orig_data2 = data_repeated(2)
|
| 341 |
+
s1 = pd.Series(orig_data1)
|
| 342 |
+
s2 = pd.Series(orig_data2)
|
| 343 |
+
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
|
| 344 |
+
expected = pd.Series(
|
| 345 |
+
pd.array(
|
| 346 |
+
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
|
| 347 |
+
dtype=self._combine_le_expected_dtype,
|
| 348 |
+
)
|
| 349 |
+
)
|
| 350 |
+
tm.assert_series_equal(result, expected)
|
| 351 |
+
|
| 352 |
+
val = s1.iloc[0]
|
| 353 |
+
result = s1.combine(val, lambda x1, x2: x1 <= x2)
|
| 354 |
+
expected = pd.Series(
|
| 355 |
+
pd.array(
|
| 356 |
+
[a <= val for a in list(orig_data1)],
|
| 357 |
+
dtype=self._combine_le_expected_dtype,
|
| 358 |
+
)
|
| 359 |
+
)
|
| 360 |
+
tm.assert_series_equal(result, expected)
|
| 361 |
+
|
| 362 |
+
def test_combine_add(self, data_repeated):
|
| 363 |
+
# GH 20825
|
| 364 |
+
orig_data1, orig_data2 = data_repeated(2)
|
| 365 |
+
s1 = pd.Series(orig_data1)
|
| 366 |
+
s2 = pd.Series(orig_data2)
|
| 367 |
+
|
| 368 |
+
# Check if the operation is supported pointwise for our scalars. If not,
|
| 369 |
+
# we will expect Series.combine to raise as well.
|
| 370 |
+
try:
|
| 371 |
+
with np.errstate(over="ignore"):
|
| 372 |
+
expected = pd.Series(
|
| 373 |
+
orig_data1._from_sequence(
|
| 374 |
+
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
except TypeError:
|
| 378 |
+
# If the operation is not supported pointwise for our scalars,
|
| 379 |
+
# then Series.combine should also raise
|
| 380 |
+
with pytest.raises(TypeError):
|
| 381 |
+
s1.combine(s2, lambda x1, x2: x1 + x2)
|
| 382 |
+
return
|
| 383 |
+
|
| 384 |
+
result = s1.combine(s2, lambda x1, x2: x1 + x2)
|
| 385 |
+
tm.assert_series_equal(result, expected)
|
| 386 |
+
|
| 387 |
+
val = s1.iloc[0]
|
| 388 |
+
result = s1.combine(val, lambda x1, x2: x1 + x2)
|
| 389 |
+
expected = pd.Series(
|
| 390 |
+
orig_data1._from_sequence([a + val for a in list(orig_data1)])
|
| 391 |
+
)
|
| 392 |
+
tm.assert_series_equal(result, expected)
|
| 393 |
+
|
| 394 |
+
def test_combine_first(self, data):
|
| 395 |
+
# https://github.com/pandas-dev/pandas/issues/24147
|
| 396 |
+
a = pd.Series(data[:3])
|
| 397 |
+
b = pd.Series(data[2:5], index=[2, 3, 4])
|
| 398 |
+
result = a.combine_first(b)
|
| 399 |
+
expected = pd.Series(data[:5])
|
| 400 |
+
tm.assert_series_equal(result, expected)
|
| 401 |
+
|
| 402 |
+
@pytest.mark.parametrize("frame", [True, False])
|
| 403 |
+
@pytest.mark.parametrize(
|
| 404 |
+
"periods, indices",
|
| 405 |
+
[(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])],
|
| 406 |
+
)
|
| 407 |
+
def test_container_shift(self, data, frame, periods, indices):
|
| 408 |
+
# https://github.com/pandas-dev/pandas/issues/22386
|
| 409 |
+
subset = data[:5]
|
| 410 |
+
data = pd.Series(subset, name="A")
|
| 411 |
+
expected = pd.Series(subset.take(indices, allow_fill=True), name="A")
|
| 412 |
+
|
| 413 |
+
if frame:
|
| 414 |
+
result = data.to_frame(name="A").assign(B=1).shift(periods)
|
| 415 |
+
expected = pd.concat(
|
| 416 |
+
[expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1
|
| 417 |
+
)
|
| 418 |
+
compare = tm.assert_frame_equal
|
| 419 |
+
else:
|
| 420 |
+
result = data.shift(periods)
|
| 421 |
+
compare = tm.assert_series_equal
|
| 422 |
+
|
| 423 |
+
compare(result, expected)
|
| 424 |
+
|
| 425 |
+
def test_shift_0_periods(self, data):
|
| 426 |
+
# GH#33856 shifting with periods=0 should return a copy, not same obj
|
| 427 |
+
result = data.shift(0)
|
| 428 |
+
assert data[0] != data[1] # otherwise below is invalid
|
| 429 |
+
data[0] = data[1]
|
| 430 |
+
assert result[0] != result[1] # i.e. not the same object/view
|
| 431 |
+
|
| 432 |
+
@pytest.mark.parametrize("periods", [1, -2])
|
| 433 |
+
def test_diff(self, data, periods):
|
| 434 |
+
data = data[:5]
|
| 435 |
+
if is_bool_dtype(data.dtype):
|
| 436 |
+
op = operator.xor
|
| 437 |
+
else:
|
| 438 |
+
op = operator.sub
|
| 439 |
+
try:
|
| 440 |
+
# does this array implement ops?
|
| 441 |
+
op(data, data)
|
| 442 |
+
except Exception:
|
| 443 |
+
pytest.skip(f"{type(data)} does not support diff")
|
| 444 |
+
s = pd.Series(data)
|
| 445 |
+
result = s.diff(periods)
|
| 446 |
+
expected = pd.Series(op(data, data.shift(periods)))
|
| 447 |
+
tm.assert_series_equal(result, expected)
|
| 448 |
+
|
| 449 |
+
df = pd.DataFrame({"A": data, "B": [1.0] * 5})
|
| 450 |
+
result = df.diff(periods)
|
| 451 |
+
if periods == 1:
|
| 452 |
+
b = [np.nan, 0, 0, 0, 0]
|
| 453 |
+
else:
|
| 454 |
+
b = [0, 0, 0, np.nan, np.nan]
|
| 455 |
+
expected = pd.DataFrame({"A": expected, "B": b})
|
| 456 |
+
tm.assert_frame_equal(result, expected)
|
| 457 |
+
|
| 458 |
+
@pytest.mark.parametrize(
|
| 459 |
+
"periods, indices",
|
| 460 |
+
[[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],
|
| 461 |
+
)
|
| 462 |
+
def test_shift_non_empty_array(self, data, periods, indices):
|
| 463 |
+
# https://github.com/pandas-dev/pandas/issues/23911
|
| 464 |
+
subset = data[:2]
|
| 465 |
+
result = subset.shift(periods)
|
| 466 |
+
expected = subset.take(indices, allow_fill=True)
|
| 467 |
+
tm.assert_extension_array_equal(result, expected)
|
| 468 |
+
|
| 469 |
+
@pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4])
|
| 470 |
+
def test_shift_empty_array(self, data, periods):
|
| 471 |
+
# https://github.com/pandas-dev/pandas/issues/23911
|
| 472 |
+
empty = data[:0]
|
| 473 |
+
result = empty.shift(periods)
|
| 474 |
+
expected = empty
|
| 475 |
+
tm.assert_extension_array_equal(result, expected)
|
| 476 |
+
|
| 477 |
+
def test_shift_zero_copies(self, data):
|
| 478 |
+
# GH#31502
|
| 479 |
+
result = data.shift(0)
|
| 480 |
+
assert result is not data
|
| 481 |
+
|
| 482 |
+
result = data[:0].shift(2)
|
| 483 |
+
assert result is not data
|
| 484 |
+
|
| 485 |
+
def test_shift_fill_value(self, data):
|
| 486 |
+
arr = data[:4]
|
| 487 |
+
fill_value = data[0]
|
| 488 |
+
result = arr.shift(1, fill_value=fill_value)
|
| 489 |
+
expected = data.take([0, 0, 1, 2])
|
| 490 |
+
tm.assert_extension_array_equal(result, expected)
|
| 491 |
+
|
| 492 |
+
result = arr.shift(-2, fill_value=fill_value)
|
| 493 |
+
expected = data.take([2, 3, 0, 0])
|
| 494 |
+
tm.assert_extension_array_equal(result, expected)
|
| 495 |
+
|
| 496 |
+
def test_not_hashable(self, data):
|
| 497 |
+
# We are in general mutable, so not hashable
|
| 498 |
+
with pytest.raises(TypeError, match="unhashable type"):
|
| 499 |
+
hash(data)
|
| 500 |
+
|
| 501 |
+
def test_hash_pandas_object_works(self, data, as_frame):
|
| 502 |
+
# https://github.com/pandas-dev/pandas/issues/23066
|
| 503 |
+
data = pd.Series(data)
|
| 504 |
+
if as_frame:
|
| 505 |
+
data = data.to_frame()
|
| 506 |
+
a = pd.util.hash_pandas_object(data)
|
| 507 |
+
b = pd.util.hash_pandas_object(data)
|
| 508 |
+
tm.assert_equal(a, b)
|
| 509 |
+
|
| 510 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
| 511 |
+
if data_for_sorting.dtype._is_boolean:
|
| 512 |
+
return self._test_searchsorted_bool_dtypes(data_for_sorting, as_series)
|
| 513 |
+
|
| 514 |
+
b, c, a = data_for_sorting
|
| 515 |
+
arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
|
| 516 |
+
|
| 517 |
+
if as_series:
|
| 518 |
+
arr = pd.Series(arr)
|
| 519 |
+
assert arr.searchsorted(a) == 0
|
| 520 |
+
assert arr.searchsorted(a, side="right") == 1
|
| 521 |
+
|
| 522 |
+
assert arr.searchsorted(b) == 1
|
| 523 |
+
assert arr.searchsorted(b, side="right") == 2
|
| 524 |
+
|
| 525 |
+
assert arr.searchsorted(c) == 2
|
| 526 |
+
assert arr.searchsorted(c, side="right") == 3
|
| 527 |
+
|
| 528 |
+
result = arr.searchsorted(arr.take([0, 2]))
|
| 529 |
+
expected = np.array([0, 2], dtype=np.intp)
|
| 530 |
+
|
| 531 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 532 |
+
|
| 533 |
+
# sorter
|
| 534 |
+
sorter = np.array([1, 2, 0])
|
| 535 |
+
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
|
| 536 |
+
|
| 537 |
+
def _test_searchsorted_bool_dtypes(self, data_for_sorting, as_series):
|
| 538 |
+
# We call this from test_searchsorted in cases where we have a
|
| 539 |
+
# boolean-like dtype. The non-bool test assumes we have more than 2
|
| 540 |
+
# unique values.
|
| 541 |
+
dtype = data_for_sorting.dtype
|
| 542 |
+
data_for_sorting = pd.array([True, False], dtype=dtype)
|
| 543 |
+
b, a = data_for_sorting
|
| 544 |
+
arr = type(data_for_sorting)._from_sequence([a, b])
|
| 545 |
+
|
| 546 |
+
if as_series:
|
| 547 |
+
arr = pd.Series(arr)
|
| 548 |
+
assert arr.searchsorted(a) == 0
|
| 549 |
+
assert arr.searchsorted(a, side="right") == 1
|
| 550 |
+
|
| 551 |
+
assert arr.searchsorted(b) == 1
|
| 552 |
+
assert arr.searchsorted(b, side="right") == 2
|
| 553 |
+
|
| 554 |
+
result = arr.searchsorted(arr.take([0, 1]))
|
| 555 |
+
expected = np.array([0, 1], dtype=np.intp)
|
| 556 |
+
|
| 557 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 558 |
+
|
| 559 |
+
# sorter
|
| 560 |
+
sorter = np.array([1, 0])
|
| 561 |
+
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
|
| 562 |
+
|
| 563 |
+
def test_where_series(self, data, na_value, as_frame):
|
| 564 |
+
assert data[0] != data[1]
|
| 565 |
+
cls = type(data)
|
| 566 |
+
a, b = data[:2]
|
| 567 |
+
|
| 568 |
+
orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
|
| 569 |
+
ser = orig.copy()
|
| 570 |
+
cond = np.array([True, True, False, False])
|
| 571 |
+
|
| 572 |
+
if as_frame:
|
| 573 |
+
ser = ser.to_frame(name="a")
|
| 574 |
+
cond = cond.reshape(-1, 1)
|
| 575 |
+
|
| 576 |
+
result = ser.where(cond)
|
| 577 |
+
expected = pd.Series(
|
| 578 |
+
cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
|
| 579 |
+
)
|
| 580 |
+
|
| 581 |
+
if as_frame:
|
| 582 |
+
expected = expected.to_frame(name="a")
|
| 583 |
+
tm.assert_equal(result, expected)
|
| 584 |
+
|
| 585 |
+
ser.mask(~cond, inplace=True)
|
| 586 |
+
tm.assert_equal(ser, expected)
|
| 587 |
+
|
| 588 |
+
# array other
|
| 589 |
+
ser = orig.copy()
|
| 590 |
+
if as_frame:
|
| 591 |
+
ser = ser.to_frame(name="a")
|
| 592 |
+
cond = np.array([True, False, True, True])
|
| 593 |
+
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
|
| 594 |
+
if as_frame:
|
| 595 |
+
other = pd.DataFrame({"a": other})
|
| 596 |
+
cond = pd.DataFrame({"a": cond})
|
| 597 |
+
result = ser.where(cond, other)
|
| 598 |
+
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
|
| 599 |
+
if as_frame:
|
| 600 |
+
expected = expected.to_frame(name="a")
|
| 601 |
+
tm.assert_equal(result, expected)
|
| 602 |
+
|
| 603 |
+
ser.mask(~cond, other, inplace=True)
|
| 604 |
+
tm.assert_equal(ser, expected)
|
| 605 |
+
|
| 606 |
+
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
|
| 607 |
+
def test_repeat(self, data, repeats, as_series, use_numpy):
|
| 608 |
+
arr = type(data)._from_sequence(data[:3], dtype=data.dtype)
|
| 609 |
+
if as_series:
|
| 610 |
+
arr = pd.Series(arr)
|
| 611 |
+
|
| 612 |
+
result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)
|
| 613 |
+
|
| 614 |
+
repeats = [repeats] * 3 if isinstance(repeats, int) else repeats
|
| 615 |
+
expected = [x for x, n in zip(arr, repeats) for _ in range(n)]
|
| 616 |
+
expected = type(data)._from_sequence(expected, dtype=data.dtype)
|
| 617 |
+
if as_series:
|
| 618 |
+
expected = pd.Series(expected, index=arr.index.repeat(repeats))
|
| 619 |
+
|
| 620 |
+
tm.assert_equal(result, expected)
|
| 621 |
+
|
| 622 |
+
@pytest.mark.parametrize(
|
| 623 |
+
"repeats, kwargs, error, msg",
|
| 624 |
+
[
|
| 625 |
+
(2, {"axis": 1}, ValueError, "axis"),
|
| 626 |
+
(-1, {}, ValueError, "negative"),
|
| 627 |
+
([1, 2], {}, ValueError, "shape"),
|
| 628 |
+
(2, {"foo": "bar"}, TypeError, "'foo'"),
|
| 629 |
+
],
|
| 630 |
+
)
|
| 631 |
+
def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
|
| 632 |
+
with pytest.raises(error, match=msg):
|
| 633 |
+
if use_numpy:
|
| 634 |
+
np.repeat(data, repeats, **kwargs)
|
| 635 |
+
else:
|
| 636 |
+
data.repeat(repeats, **kwargs)
|
| 637 |
+
|
| 638 |
+
def test_delete(self, data):
|
| 639 |
+
result = data.delete(0)
|
| 640 |
+
expected = data[1:]
|
| 641 |
+
tm.assert_extension_array_equal(result, expected)
|
| 642 |
+
|
| 643 |
+
result = data.delete([1, 3])
|
| 644 |
+
expected = data._concat_same_type([data[[0]], data[[2]], data[4:]])
|
| 645 |
+
tm.assert_extension_array_equal(result, expected)
|
| 646 |
+
|
| 647 |
+
def test_insert(self, data):
|
| 648 |
+
# insert at the beginning
|
| 649 |
+
result = data[1:].insert(0, data[0])
|
| 650 |
+
tm.assert_extension_array_equal(result, data)
|
| 651 |
+
|
| 652 |
+
result = data[1:].insert(-len(data[1:]), data[0])
|
| 653 |
+
tm.assert_extension_array_equal(result, data)
|
| 654 |
+
|
| 655 |
+
# insert at the middle
|
| 656 |
+
result = data[:-1].insert(4, data[-1])
|
| 657 |
+
|
| 658 |
+
taker = np.arange(len(data))
|
| 659 |
+
taker[5:] = taker[4:-1]
|
| 660 |
+
taker[4] = len(data) - 1
|
| 661 |
+
expected = data.take(taker)
|
| 662 |
+
tm.assert_extension_array_equal(result, expected)
|
| 663 |
+
|
| 664 |
+
def test_insert_invalid(self, data, invalid_scalar):
|
| 665 |
+
item = invalid_scalar
|
| 666 |
+
|
| 667 |
+
with pytest.raises((TypeError, ValueError)):
|
| 668 |
+
data.insert(0, item)
|
| 669 |
+
|
| 670 |
+
with pytest.raises((TypeError, ValueError)):
|
| 671 |
+
data.insert(4, item)
|
| 672 |
+
|
| 673 |
+
with pytest.raises((TypeError, ValueError)):
|
| 674 |
+
data.insert(len(data) - 1, item)
|
| 675 |
+
|
| 676 |
+
def test_insert_invalid_loc(self, data):
|
| 677 |
+
ub = len(data)
|
| 678 |
+
|
| 679 |
+
with pytest.raises(IndexError):
|
| 680 |
+
data.insert(ub + 1, data[0])
|
| 681 |
+
|
| 682 |
+
with pytest.raises(IndexError):
|
| 683 |
+
data.insert(-ub - 1, data[0])
|
| 684 |
+
|
| 685 |
+
with pytest.raises(TypeError):
|
| 686 |
+
# we expect TypeError here instead of IndexError to match np.insert
|
| 687 |
+
data.insert(1.5, data[0])
|
| 688 |
+
|
| 689 |
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
|
| 690 |
+
def test_equals(self, data, na_value, as_series, box):
|
| 691 |
+
data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)
|
| 692 |
+
data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype)
|
| 693 |
+
|
| 694 |
+
data = tm.box_expected(data, box, transpose=False)
|
| 695 |
+
data2 = tm.box_expected(data2, box, transpose=False)
|
| 696 |
+
data_na = tm.box_expected(data_na, box, transpose=False)
|
| 697 |
+
|
| 698 |
+
# we are asserting with `is True/False` explicitly, to test that the
|
| 699 |
+
# result is an actual Python bool, and not something "truthy"
|
| 700 |
+
|
| 701 |
+
assert data.equals(data) is True
|
| 702 |
+
assert data.equals(data.copy()) is True
|
| 703 |
+
|
| 704 |
+
# unequal other data
|
| 705 |
+
assert data.equals(data2) is False
|
| 706 |
+
assert data.equals(data_na) is False
|
| 707 |
+
|
| 708 |
+
# different length
|
| 709 |
+
assert data[:2].equals(data[:3]) is False
|
| 710 |
+
|
| 711 |
+
# empty are equal
|
| 712 |
+
assert data[:0].equals(data[:0]) is True
|
| 713 |
+
|
| 714 |
+
# other types
|
| 715 |
+
assert data.equals(None) is False
|
| 716 |
+
assert data[[0]].equals(data[0]) is False
|
| 717 |
+
|
| 718 |
+
def test_equals_same_data_different_object(self, data):
|
| 719 |
+
# https://github.com/pandas-dev/pandas/issues/34660
|
| 720 |
+
assert pd.Series(data).equals(pd.Series(data))
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import final
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import pandas._testing as tm
|
| 7 |
+
from pandas.api.types import is_numeric_dtype
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class BaseReduceTests:
|
| 11 |
+
"""
|
| 12 |
+
Reduction specific tests. Generally these only
|
| 13 |
+
make sense for numeric/boolean operations.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
| 17 |
+
# Specify if we expect this reduction to succeed.
|
| 18 |
+
return False
|
| 19 |
+
|
| 20 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
| 21 |
+
# We perform the same operation on the np.float64 data and check
|
| 22 |
+
# that the results match. Override if you need to cast to something
|
| 23 |
+
# other than float64.
|
| 24 |
+
res_op = getattr(ser, op_name)
|
| 25 |
+
|
| 26 |
+
try:
|
| 27 |
+
alt = ser.astype("float64")
|
| 28 |
+
except (TypeError, ValueError):
|
| 29 |
+
# e.g. Interval can't cast (TypeError), StringArray can't cast
|
| 30 |
+
# (ValueError), so let's cast to object and do
|
| 31 |
+
# the reduction pointwise
|
| 32 |
+
alt = ser.astype(object)
|
| 33 |
+
|
| 34 |
+
exp_op = getattr(alt, op_name)
|
| 35 |
+
if op_name == "count":
|
| 36 |
+
result = res_op()
|
| 37 |
+
expected = exp_op()
|
| 38 |
+
else:
|
| 39 |
+
result = res_op(skipna=skipna)
|
| 40 |
+
expected = exp_op(skipna=skipna)
|
| 41 |
+
tm.assert_almost_equal(result, expected)
|
| 42 |
+
|
| 43 |
+
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
|
| 44 |
+
# Find the expected dtype when the given reduction is done on a DataFrame
|
| 45 |
+
# column with this array. The default assumes float64-like behavior,
|
| 46 |
+
# i.e. retains the dtype.
|
| 47 |
+
return arr.dtype
|
| 48 |
+
|
| 49 |
+
# We anticipate that authors should not need to override check_reduce_frame,
|
| 50 |
+
# but should be able to do any necessary overriding in
|
| 51 |
+
# _get_expected_reduction_dtype. If you have a use case where this
|
| 52 |
+
# does not hold, please let us know at github.com/pandas-dev/pandas/issues.
|
| 53 |
+
@final
|
| 54 |
+
def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
|
| 55 |
+
# Check that the 2D reduction done in a DataFrame reduction "looks like"
|
| 56 |
+
# a wrapped version of the 1D reduction done by Series.
|
| 57 |
+
arr = ser.array
|
| 58 |
+
df = pd.DataFrame({"a": arr})
|
| 59 |
+
|
| 60 |
+
kwargs = {"ddof": 1} if op_name in ["var", "std"] else {}
|
| 61 |
+
|
| 62 |
+
cmp_dtype = self._get_expected_reduction_dtype(arr, op_name, skipna)
|
| 63 |
+
|
| 64 |
+
# The DataFrame method just calls arr._reduce with keepdims=True,
|
| 65 |
+
# so this first check is perfunctory.
|
| 66 |
+
result1 = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs)
|
| 67 |
+
result2 = getattr(df, op_name)(skipna=skipna, **kwargs).array
|
| 68 |
+
tm.assert_extension_array_equal(result1, result2)
|
| 69 |
+
|
| 70 |
+
# Check that the 2D reduction looks like a wrapped version of the
|
| 71 |
+
# 1D reduction
|
| 72 |
+
if not skipna and ser.isna().any():
|
| 73 |
+
expected = pd.array([pd.NA], dtype=cmp_dtype)
|
| 74 |
+
else:
|
| 75 |
+
exp_value = getattr(ser.dropna(), op_name)()
|
| 76 |
+
expected = pd.array([exp_value], dtype=cmp_dtype)
|
| 77 |
+
|
| 78 |
+
tm.assert_extension_array_equal(result1, expected)
|
| 79 |
+
|
| 80 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 81 |
+
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
|
| 82 |
+
op_name = all_boolean_reductions
|
| 83 |
+
ser = pd.Series(data)
|
| 84 |
+
|
| 85 |
+
if not self._supports_reduction(ser, op_name):
|
| 86 |
+
# TODO: the message being checked here isn't actually checking anything
|
| 87 |
+
msg = (
|
| 88 |
+
"[Cc]annot perform|Categorical is not ordered for operation|"
|
| 89 |
+
"does not support reduction|"
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
with pytest.raises(TypeError, match=msg):
|
| 93 |
+
getattr(ser, op_name)(skipna=skipna)
|
| 94 |
+
|
| 95 |
+
else:
|
| 96 |
+
self.check_reduce(ser, op_name, skipna)
|
| 97 |
+
|
| 98 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
| 99 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 100 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
|
| 101 |
+
op_name = all_numeric_reductions
|
| 102 |
+
ser = pd.Series(data)
|
| 103 |
+
|
| 104 |
+
if not self._supports_reduction(ser, op_name):
|
| 105 |
+
# TODO: the message being checked here isn't actually checking anything
|
| 106 |
+
msg = (
|
| 107 |
+
"[Cc]annot perform|Categorical is not ordered for operation|"
|
| 108 |
+
"does not support reduction|"
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
+
with pytest.raises(TypeError, match=msg):
|
| 112 |
+
getattr(ser, op_name)(skipna=skipna)
|
| 113 |
+
|
| 114 |
+
else:
|
| 115 |
+
# min/max with empty produce numpy warnings
|
| 116 |
+
self.check_reduce(ser, op_name, skipna)
|
| 117 |
+
|
| 118 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 119 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
|
| 120 |
+
op_name = all_numeric_reductions
|
| 121 |
+
ser = pd.Series(data)
|
| 122 |
+
if not is_numeric_dtype(ser.dtype):
|
| 123 |
+
pytest.skip(f"{ser.dtype} is not numeric dtype")
|
| 124 |
+
|
| 125 |
+
if op_name in ["count", "kurt", "sem"]:
|
| 126 |
+
pytest.skip(f"{op_name} not an array method")
|
| 127 |
+
|
| 128 |
+
if not self._supports_reduction(ser, op_name):
|
| 129 |
+
pytest.skip(f"Reduction {op_name} not supported for this dtype")
|
| 130 |
+
|
| 131 |
+
self.check_reduce_frame(ser, op_name, skipna)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
# TODO(3.0): remove BaseNoReduceTests, BaseNumericReduceTests,
|
| 135 |
+
# BaseBooleanReduceTests
|
| 136 |
+
class BaseNoReduceTests(BaseReduceTests):
|
| 137 |
+
"""we don't define any reductions"""
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
class BaseNumericReduceTests(BaseReduceTests):
|
| 141 |
+
# For backward compatibility only, this only runs the numeric reductions
|
| 142 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
| 143 |
+
if op_name in ["any", "all"]:
|
| 144 |
+
pytest.skip("These are tested in BaseBooleanReduceTests")
|
| 145 |
+
return True
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class BaseBooleanReduceTests(BaseReduceTests):
|
| 149 |
+
# For backward compatibility only, this only runs the numeric reductions
|
| 150 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
| 151 |
+
if op_name not in ["any", "all"]:
|
| 152 |
+
pytest.skip("These are tested in BaseNumericReduceTests")
|
| 153 |
+
return True
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
|
| 3 |
+
import numpy as np
|
| 4 |
+
import pytest
|
| 5 |
+
|
| 6 |
+
import pandas as pd
|
| 7 |
+
import pandas._testing as tm
|
| 8 |
+
from pandas.api.extensions import ExtensionArray
|
| 9 |
+
from pandas.core.internals.blocks import EABackedBlock
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class BaseReshapingTests:
|
| 13 |
+
"""Tests for reshaping and concatenation."""
|
| 14 |
+
|
| 15 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
| 16 |
+
def test_concat(self, data, in_frame):
|
| 17 |
+
wrapped = pd.Series(data)
|
| 18 |
+
if in_frame:
|
| 19 |
+
wrapped = pd.DataFrame(wrapped)
|
| 20 |
+
result = pd.concat([wrapped, wrapped], ignore_index=True)
|
| 21 |
+
|
| 22 |
+
assert len(result) == len(data) * 2
|
| 23 |
+
|
| 24 |
+
if in_frame:
|
| 25 |
+
dtype = result.dtypes[0]
|
| 26 |
+
else:
|
| 27 |
+
dtype = result.dtype
|
| 28 |
+
|
| 29 |
+
assert dtype == data.dtype
|
| 30 |
+
if hasattr(result._mgr, "blocks"):
|
| 31 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
| 32 |
+
assert isinstance(result._mgr.arrays[0], ExtensionArray)
|
| 33 |
+
|
| 34 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
| 35 |
+
def test_concat_all_na_block(self, data_missing, in_frame):
|
| 36 |
+
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
|
| 37 |
+
na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
|
| 38 |
+
if in_frame:
|
| 39 |
+
valid_block = pd.DataFrame({"a": valid_block})
|
| 40 |
+
na_block = pd.DataFrame({"a": na_block})
|
| 41 |
+
result = pd.concat([valid_block, na_block])
|
| 42 |
+
if in_frame:
|
| 43 |
+
expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
|
| 44 |
+
tm.assert_frame_equal(result, expected)
|
| 45 |
+
else:
|
| 46 |
+
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
|
| 47 |
+
tm.assert_series_equal(result, expected)
|
| 48 |
+
|
| 49 |
+
def test_concat_mixed_dtypes(self, data):
|
| 50 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
| 51 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
| 52 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
| 53 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
| 54 |
+
dfs = [df1, df2, df3]
|
| 55 |
+
|
| 56 |
+
# dataframes
|
| 57 |
+
result = pd.concat(dfs)
|
| 58 |
+
expected = pd.concat([x.astype(object) for x in dfs])
|
| 59 |
+
tm.assert_frame_equal(result, expected)
|
| 60 |
+
|
| 61 |
+
# series
|
| 62 |
+
result = pd.concat([x["A"] for x in dfs])
|
| 63 |
+
expected = pd.concat([x["A"].astype(object) for x in dfs])
|
| 64 |
+
tm.assert_series_equal(result, expected)
|
| 65 |
+
|
| 66 |
+
# simple test for just EA and one other
|
| 67 |
+
result = pd.concat([df1, df2.astype(object)])
|
| 68 |
+
expected = pd.concat([df1.astype("object"), df2.astype("object")])
|
| 69 |
+
tm.assert_frame_equal(result, expected)
|
| 70 |
+
|
| 71 |
+
result = pd.concat([df1["A"], df2["A"].astype(object)])
|
| 72 |
+
expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")])
|
| 73 |
+
tm.assert_series_equal(result, expected)
|
| 74 |
+
|
| 75 |
+
def test_concat_columns(self, data, na_value):
|
| 76 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
| 77 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]})
|
| 78 |
+
|
| 79 |
+
expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]})
|
| 80 |
+
result = pd.concat([df1, df2], axis=1)
|
| 81 |
+
tm.assert_frame_equal(result, expected)
|
| 82 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
| 83 |
+
tm.assert_frame_equal(result, expected)
|
| 84 |
+
|
| 85 |
+
# non-aligned
|
| 86 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3])
|
| 87 |
+
expected = pd.DataFrame(
|
| 88 |
+
{
|
| 89 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
| 90 |
+
"B": [np.nan, 1, 2, 3],
|
| 91 |
+
}
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
result = pd.concat([df1, df2], axis=1)
|
| 95 |
+
tm.assert_frame_equal(result, expected)
|
| 96 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
| 97 |
+
tm.assert_frame_equal(result, expected)
|
| 98 |
+
|
| 99 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
| 100 |
+
# GH 20756
|
| 101 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
| 102 |
+
df2 = pd.DataFrame({"B": data[3:7]})
|
| 103 |
+
expected = pd.DataFrame(
|
| 104 |
+
{
|
| 105 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
| 106 |
+
"B": data[3:7],
|
| 107 |
+
}
|
| 108 |
+
)
|
| 109 |
+
result = pd.concat([df1, df2], axis=1, copy=False)
|
| 110 |
+
tm.assert_frame_equal(result, expected)
|
| 111 |
+
|
| 112 |
+
def test_concat_with_reindex(self, data):
|
| 113 |
+
# GH-33027
|
| 114 |
+
a = pd.DataFrame({"a": data[:5]})
|
| 115 |
+
b = pd.DataFrame({"b": data[:5]})
|
| 116 |
+
result = pd.concat([a, b], ignore_index=True)
|
| 117 |
+
expected = pd.DataFrame(
|
| 118 |
+
{
|
| 119 |
+
"a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True),
|
| 120 |
+
"b": data.take(([-1] * 5) + list(range(5)), allow_fill=True),
|
| 121 |
+
}
|
| 122 |
+
)
|
| 123 |
+
tm.assert_frame_equal(result, expected)
|
| 124 |
+
|
| 125 |
+
def test_align(self, data, na_value):
|
| 126 |
+
a = data[:3]
|
| 127 |
+
b = data[2:5]
|
| 128 |
+
r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
|
| 129 |
+
|
| 130 |
+
# Assumes that the ctor can take a list of scalars of the type
|
| 131 |
+
e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype))
|
| 132 |
+
e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype))
|
| 133 |
+
tm.assert_series_equal(r1, e1)
|
| 134 |
+
tm.assert_series_equal(r2, e2)
|
| 135 |
+
|
| 136 |
+
def test_align_frame(self, data, na_value):
|
| 137 |
+
a = data[:3]
|
| 138 |
+
b = data[2:5]
|
| 139 |
+
r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3]))
|
| 140 |
+
|
| 141 |
+
# Assumes that the ctor can take a list of scalars of the type
|
| 142 |
+
e1 = pd.DataFrame(
|
| 143 |
+
{"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)}
|
| 144 |
+
)
|
| 145 |
+
e2 = pd.DataFrame(
|
| 146 |
+
{"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)}
|
| 147 |
+
)
|
| 148 |
+
tm.assert_frame_equal(r1, e1)
|
| 149 |
+
tm.assert_frame_equal(r2, e2)
|
| 150 |
+
|
| 151 |
+
def test_align_series_frame(self, data, na_value):
|
| 152 |
+
# https://github.com/pandas-dev/pandas/issues/20576
|
| 153 |
+
ser = pd.Series(data, name="a")
|
| 154 |
+
df = pd.DataFrame({"col": np.arange(len(ser) + 1)})
|
| 155 |
+
r1, r2 = ser.align(df)
|
| 156 |
+
|
| 157 |
+
e1 = pd.Series(
|
| 158 |
+
data._from_sequence(list(data) + [na_value], dtype=data.dtype),
|
| 159 |
+
name=ser.name,
|
| 160 |
+
)
|
| 161 |
+
|
| 162 |
+
tm.assert_series_equal(r1, e1)
|
| 163 |
+
tm.assert_frame_equal(r2, df)
|
| 164 |
+
|
| 165 |
+
def test_set_frame_expand_regular_with_extension(self, data):
|
| 166 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
| 167 |
+
df["B"] = data
|
| 168 |
+
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
|
| 169 |
+
tm.assert_frame_equal(df, expected)
|
| 170 |
+
|
| 171 |
+
def test_set_frame_expand_extension_with_regular(self, data):
|
| 172 |
+
df = pd.DataFrame({"A": data})
|
| 173 |
+
df["B"] = [1] * len(data)
|
| 174 |
+
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
|
| 175 |
+
tm.assert_frame_equal(df, expected)
|
| 176 |
+
|
| 177 |
+
def test_set_frame_overwrite_object(self, data):
|
| 178 |
+
# https://github.com/pandas-dev/pandas/issues/20555
|
| 179 |
+
df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)
|
| 180 |
+
df["A"] = data
|
| 181 |
+
assert df.dtypes["A"] == data.dtype
|
| 182 |
+
|
| 183 |
+
def test_merge(self, data, na_value):
|
| 184 |
+
# GH-20743
|
| 185 |
+
df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]})
|
| 186 |
+
df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]})
|
| 187 |
+
|
| 188 |
+
res = pd.merge(df1, df2)
|
| 189 |
+
exp = pd.DataFrame(
|
| 190 |
+
{
|
| 191 |
+
"int1": [1, 1, 2],
|
| 192 |
+
"int2": [1, 2, 3],
|
| 193 |
+
"key": [0, 0, 1],
|
| 194 |
+
"ext": data._from_sequence(
|
| 195 |
+
[data[0], data[0], data[1]], dtype=data.dtype
|
| 196 |
+
),
|
| 197 |
+
}
|
| 198 |
+
)
|
| 199 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
| 200 |
+
|
| 201 |
+
res = pd.merge(df1, df2, how="outer")
|
| 202 |
+
exp = pd.DataFrame(
|
| 203 |
+
{
|
| 204 |
+
"int1": [1, 1, 2, 3, np.nan],
|
| 205 |
+
"int2": [1, 2, 3, np.nan, 4],
|
| 206 |
+
"key": [0, 0, 1, 2, 3],
|
| 207 |
+
"ext": data._from_sequence(
|
| 208 |
+
[data[0], data[0], data[1], data[2], na_value], dtype=data.dtype
|
| 209 |
+
),
|
| 210 |
+
}
|
| 211 |
+
)
|
| 212 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
| 213 |
+
|
| 214 |
+
def test_merge_on_extension_array(self, data):
|
| 215 |
+
# GH 23020
|
| 216 |
+
a, b = data[:2]
|
| 217 |
+
key = type(data)._from_sequence([a, b], dtype=data.dtype)
|
| 218 |
+
|
| 219 |
+
df = pd.DataFrame({"key": key, "val": [1, 2]})
|
| 220 |
+
result = pd.merge(df, df, on="key")
|
| 221 |
+
expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]})
|
| 222 |
+
tm.assert_frame_equal(result, expected)
|
| 223 |
+
|
| 224 |
+
# order
|
| 225 |
+
result = pd.merge(df.iloc[[1, 0]], df, on="key")
|
| 226 |
+
expected = expected.iloc[[1, 0]].reset_index(drop=True)
|
| 227 |
+
tm.assert_frame_equal(result, expected)
|
| 228 |
+
|
| 229 |
+
def test_merge_on_extension_array_duplicates(self, data):
|
| 230 |
+
# GH 23020
|
| 231 |
+
a, b = data[:2]
|
| 232 |
+
key = type(data)._from_sequence([a, b, a], dtype=data.dtype)
|
| 233 |
+
df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
| 234 |
+
df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
| 235 |
+
|
| 236 |
+
result = pd.merge(df1, df2, on="key")
|
| 237 |
+
expected = pd.DataFrame(
|
| 238 |
+
{
|
| 239 |
+
"key": key.take([0, 0, 1, 2, 2]),
|
| 240 |
+
"val_x": [1, 1, 2, 3, 3],
|
| 241 |
+
"val_y": [1, 3, 2, 1, 3],
|
| 242 |
+
}
|
| 243 |
+
)
|
| 244 |
+
tm.assert_frame_equal(result, expected)
|
| 245 |
+
|
| 246 |
+
@pytest.mark.filterwarnings(
|
| 247 |
+
"ignore:The previous implementation of stack is deprecated"
|
| 248 |
+
)
|
| 249 |
+
@pytest.mark.parametrize(
|
| 250 |
+
"columns",
|
| 251 |
+
[
|
| 252 |
+
["A", "B"],
|
| 253 |
+
pd.MultiIndex.from_tuples(
|
| 254 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
| 255 |
+
),
|
| 256 |
+
],
|
| 257 |
+
)
|
| 258 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
| 259 |
+
def test_stack(self, data, columns, future_stack):
|
| 260 |
+
df = pd.DataFrame({"A": data[:5], "B": data[:5]})
|
| 261 |
+
df.columns = columns
|
| 262 |
+
result = df.stack(future_stack=future_stack)
|
| 263 |
+
expected = df.astype(object).stack(future_stack=future_stack)
|
| 264 |
+
# we need a second astype(object), in case the constructor inferred
|
| 265 |
+
# object -> specialized, as is done for period.
|
| 266 |
+
expected = expected.astype(object)
|
| 267 |
+
|
| 268 |
+
if isinstance(expected, pd.Series):
|
| 269 |
+
assert result.dtype == df.iloc[:, 0].dtype
|
| 270 |
+
else:
|
| 271 |
+
assert all(result.dtypes == df.iloc[:, 0].dtype)
|
| 272 |
+
|
| 273 |
+
result = result.astype(object)
|
| 274 |
+
tm.assert_equal(result, expected)
|
| 275 |
+
|
| 276 |
+
@pytest.mark.parametrize(
|
| 277 |
+
"index",
|
| 278 |
+
[
|
| 279 |
+
# Two levels, uniform.
|
| 280 |
+
pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),
|
| 281 |
+
# non-uniform
|
| 282 |
+
pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
|
| 283 |
+
# three levels, non-uniform
|
| 284 |
+
pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),
|
| 285 |
+
pd.MultiIndex.from_tuples(
|
| 286 |
+
[
|
| 287 |
+
("A", "a", 1),
|
| 288 |
+
("A", "b", 0),
|
| 289 |
+
("A", "a", 0),
|
| 290 |
+
("B", "a", 0),
|
| 291 |
+
("B", "c", 1),
|
| 292 |
+
]
|
| 293 |
+
),
|
| 294 |
+
],
|
| 295 |
+
)
|
| 296 |
+
@pytest.mark.parametrize("obj", ["series", "frame"])
|
| 297 |
+
def test_unstack(self, data, index, obj):
|
| 298 |
+
data = data[: len(index)]
|
| 299 |
+
if obj == "series":
|
| 300 |
+
ser = pd.Series(data, index=index)
|
| 301 |
+
else:
|
| 302 |
+
ser = pd.DataFrame({"A": data, "B": data}, index=index)
|
| 303 |
+
|
| 304 |
+
n = index.nlevels
|
| 305 |
+
levels = list(range(n))
|
| 306 |
+
# [0, 1, 2]
|
| 307 |
+
# [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
|
| 308 |
+
combinations = itertools.chain.from_iterable(
|
| 309 |
+
itertools.permutations(levels, i) for i in range(1, n)
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
for level in combinations:
|
| 313 |
+
result = ser.unstack(level=level)
|
| 314 |
+
assert all(
|
| 315 |
+
isinstance(result[col].array, type(data)) for col in result.columns
|
| 316 |
+
)
|
| 317 |
+
|
| 318 |
+
if obj == "series":
|
| 319 |
+
# We should get the same result with to_frame+unstack+droplevel
|
| 320 |
+
df = ser.to_frame()
|
| 321 |
+
|
| 322 |
+
alt = df.unstack(level=level).droplevel(0, axis=1)
|
| 323 |
+
tm.assert_frame_equal(result, alt)
|
| 324 |
+
|
| 325 |
+
obj_ser = ser.astype(object)
|
| 326 |
+
|
| 327 |
+
expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value)
|
| 328 |
+
if obj == "series":
|
| 329 |
+
assert (expected.dtypes == object).all()
|
| 330 |
+
|
| 331 |
+
result = result.astype(object)
|
| 332 |
+
tm.assert_frame_equal(result, expected)
|
| 333 |
+
|
| 334 |
+
def test_ravel(self, data):
|
| 335 |
+
# as long as EA is 1D-only, ravel is a no-op
|
| 336 |
+
result = data.ravel()
|
| 337 |
+
assert type(result) == type(data)
|
| 338 |
+
|
| 339 |
+
if data.dtype._is_immutable:
|
| 340 |
+
pytest.skip(f"test_ravel assumes mutability and {data.dtype} is immutable")
|
| 341 |
+
|
| 342 |
+
# Check that we have a view, not a copy
|
| 343 |
+
result[0] = result[1]
|
| 344 |
+
assert data[0] == data[1]
|
| 345 |
+
|
| 346 |
+
def test_transpose(self, data):
|
| 347 |
+
result = data.transpose()
|
| 348 |
+
assert type(result) == type(data)
|
| 349 |
+
|
| 350 |
+
# check we get a new object
|
| 351 |
+
assert result is not data
|
| 352 |
+
|
| 353 |
+
# If we ever _did_ support 2D, shape should be reversed
|
| 354 |
+
assert result.shape == data.shape[::-1]
|
| 355 |
+
|
| 356 |
+
if data.dtype._is_immutable:
|
| 357 |
+
pytest.skip(
|
| 358 |
+
f"test_transpose assumes mutability and {data.dtype} is immutable"
|
| 359 |
+
)
|
| 360 |
+
|
| 361 |
+
# Check that we have a view, not a copy
|
| 362 |
+
result[0] = result[1]
|
| 363 |
+
assert data[0] == data[1]
|
| 364 |
+
|
| 365 |
+
def test_transpose_frame(self, data):
|
| 366 |
+
df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"])
|
| 367 |
+
result = df.T
|
| 368 |
+
expected = pd.DataFrame(
|
| 369 |
+
{
|
| 370 |
+
"a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype),
|
| 371 |
+
"b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype),
|
| 372 |
+
"c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype),
|
| 373 |
+
"d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype),
|
| 374 |
+
},
|
| 375 |
+
index=["A", "B"],
|
| 376 |
+
)
|
| 377 |
+
tm.assert_frame_equal(result, expected)
|
| 378 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
|
| 379 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]])
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py
ADDED
|
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file contains a minimal set of tests for compliance with the extension
|
| 3 |
+
array interface test suite, and should contain no other tests.
|
| 4 |
+
The test suite for the full functionality of the array is located in
|
| 5 |
+
`pandas/tests/arrays/`.
|
| 6 |
+
|
| 7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
| 8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
| 9 |
+
parent method).
|
| 10 |
+
|
| 11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
| 12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
| 13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
import string
|
| 17 |
+
|
| 18 |
+
import numpy as np
|
| 19 |
+
import pytest
|
| 20 |
+
|
| 21 |
+
from pandas._config import using_pyarrow_string_dtype
|
| 22 |
+
|
| 23 |
+
import pandas as pd
|
| 24 |
+
from pandas import Categorical
|
| 25 |
+
import pandas._testing as tm
|
| 26 |
+
from pandas.api.types import CategoricalDtype
|
| 27 |
+
from pandas.tests.extension import base
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def make_data():
|
| 31 |
+
while True:
|
| 32 |
+
values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
| 33 |
+
# ensure we meet the requirements
|
| 34 |
+
# 1. first two not null
|
| 35 |
+
# 2. first and second are different
|
| 36 |
+
if values[0] != values[1]:
|
| 37 |
+
break
|
| 38 |
+
return values
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
@pytest.fixture
|
| 42 |
+
def dtype():
|
| 43 |
+
return CategoricalDtype()
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
@pytest.fixture
|
| 47 |
+
def data():
|
| 48 |
+
"""Length-100 array for this type.
|
| 49 |
+
|
| 50 |
+
* data[0] and data[1] should both be non missing
|
| 51 |
+
* data[0] and data[1] should not be equal
|
| 52 |
+
"""
|
| 53 |
+
return Categorical(make_data())
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
@pytest.fixture
|
| 57 |
+
def data_missing():
|
| 58 |
+
"""Length 2 array with [NA, Valid]"""
|
| 59 |
+
return Categorical([np.nan, "A"])
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@pytest.fixture
|
| 63 |
+
def data_for_sorting():
|
| 64 |
+
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@pytest.fixture
|
| 68 |
+
def data_missing_for_sorting():
|
| 69 |
+
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@pytest.fixture
|
| 73 |
+
def data_for_grouping():
|
| 74 |
+
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
class TestCategorical(base.ExtensionTests):
|
| 78 |
+
@pytest.mark.xfail(reason="Memory usage doesn't match")
|
| 79 |
+
def test_memory_usage(self, data):
|
| 80 |
+
# TODO: Is this deliberate?
|
| 81 |
+
super().test_memory_usage(data)
|
| 82 |
+
|
| 83 |
+
def test_contains(self, data, data_missing):
|
| 84 |
+
# GH-37867
|
| 85 |
+
# na value handling in Categorical.__contains__ is deprecated.
|
| 86 |
+
# See base.BaseInterFaceTests.test_contains for more details.
|
| 87 |
+
|
| 88 |
+
na_value = data.dtype.na_value
|
| 89 |
+
# ensure data without missing values
|
| 90 |
+
data = data[~data.isna()]
|
| 91 |
+
|
| 92 |
+
# first elements are non-missing
|
| 93 |
+
assert data[0] in data
|
| 94 |
+
assert data_missing[0] in data_missing
|
| 95 |
+
|
| 96 |
+
# check the presence of na_value
|
| 97 |
+
assert na_value in data_missing
|
| 98 |
+
assert na_value not in data
|
| 99 |
+
|
| 100 |
+
# Categoricals can contain other nan-likes than na_value
|
| 101 |
+
for na_value_obj in tm.NULL_OBJECTS:
|
| 102 |
+
if na_value_obj is na_value:
|
| 103 |
+
continue
|
| 104 |
+
assert na_value_obj not in data
|
| 105 |
+
# this section suffers from super method
|
| 106 |
+
if not using_pyarrow_string_dtype():
|
| 107 |
+
assert na_value_obj in data_missing
|
| 108 |
+
|
| 109 |
+
def test_empty(self, dtype):
|
| 110 |
+
cls = dtype.construct_array_type()
|
| 111 |
+
result = cls._empty((4,), dtype=dtype)
|
| 112 |
+
|
| 113 |
+
assert isinstance(result, cls)
|
| 114 |
+
# the dtype we passed is not initialized, so will not match the
|
| 115 |
+
# dtype on our result.
|
| 116 |
+
assert result.dtype == CategoricalDtype([])
|
| 117 |
+
|
| 118 |
+
@pytest.mark.skip(reason="Backwards compatibility")
|
| 119 |
+
def test_getitem_scalar(self, data):
|
| 120 |
+
# CategoricalDtype.type isn't "correct" since it should
|
| 121 |
+
# be a parent of the elements (object). But don't want
|
| 122 |
+
# to break things by changing.
|
| 123 |
+
super().test_getitem_scalar(data)
|
| 124 |
+
|
| 125 |
+
@pytest.mark.xfail(reason="Unobserved categories included")
|
| 126 |
+
def test_value_counts(self, all_data, dropna):
|
| 127 |
+
return super().test_value_counts(all_data, dropna)
|
| 128 |
+
|
| 129 |
+
def test_combine_add(self, data_repeated):
|
| 130 |
+
# GH 20825
|
| 131 |
+
# When adding categoricals in combine, result is a string
|
| 132 |
+
orig_data1, orig_data2 = data_repeated(2)
|
| 133 |
+
s1 = pd.Series(orig_data1)
|
| 134 |
+
s2 = pd.Series(orig_data2)
|
| 135 |
+
result = s1.combine(s2, lambda x1, x2: x1 + x2)
|
| 136 |
+
expected = pd.Series(
|
| 137 |
+
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
|
| 138 |
+
)
|
| 139 |
+
tm.assert_series_equal(result, expected)
|
| 140 |
+
|
| 141 |
+
val = s1.iloc[0]
|
| 142 |
+
result = s1.combine(val, lambda x1, x2: x1 + x2)
|
| 143 |
+
expected = pd.Series([a + val for a in list(orig_data1)])
|
| 144 |
+
tm.assert_series_equal(result, expected)
|
| 145 |
+
|
| 146 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
| 147 |
+
def test_map(self, data, na_action):
|
| 148 |
+
result = data.map(lambda x: x, na_action=na_action)
|
| 149 |
+
tm.assert_extension_array_equal(result, data)
|
| 150 |
+
|
| 151 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
| 152 |
+
# frame & scalar
|
| 153 |
+
op_name = all_arithmetic_operators
|
| 154 |
+
if op_name == "__rmod__":
|
| 155 |
+
request.applymarker(
|
| 156 |
+
pytest.mark.xfail(
|
| 157 |
+
reason="rmod never called when string is first argument"
|
| 158 |
+
)
|
| 159 |
+
)
|
| 160 |
+
super().test_arith_frame_with_scalar(data, op_name)
|
| 161 |
+
|
| 162 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
|
| 163 |
+
op_name = all_arithmetic_operators
|
| 164 |
+
if op_name == "__rmod__":
|
| 165 |
+
request.applymarker(
|
| 166 |
+
pytest.mark.xfail(
|
| 167 |
+
reason="rmod never called when string is first argument"
|
| 168 |
+
)
|
| 169 |
+
)
|
| 170 |
+
super().test_arith_series_with_scalar(data, op_name)
|
| 171 |
+
|
| 172 |
+
def _compare_other(self, ser: pd.Series, data, op, other):
|
| 173 |
+
op_name = f"__{op.__name__}__"
|
| 174 |
+
if op_name not in ["__eq__", "__ne__"]:
|
| 175 |
+
msg = "Unordered Categoricals can only compare equality or not"
|
| 176 |
+
with pytest.raises(TypeError, match=msg):
|
| 177 |
+
op(data, other)
|
| 178 |
+
else:
|
| 179 |
+
return super()._compare_other(ser, data, op, other)
|
| 180 |
+
|
| 181 |
+
@pytest.mark.xfail(reason="Categorical overrides __repr__")
|
| 182 |
+
@pytest.mark.parametrize("size", ["big", "small"])
|
| 183 |
+
def test_array_repr(self, data, size):
|
| 184 |
+
super().test_array_repr(data, size)
|
| 185 |
+
|
| 186 |
+
@pytest.mark.xfail(reason="TBD")
|
| 187 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 188 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
| 189 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
| 193 |
+
def test_repr_2d(self, data):
|
| 194 |
+
# Categorical __repr__ doesn't include "Categorical", so we need
|
| 195 |
+
# to special-case
|
| 196 |
+
res = repr(data.reshape(1, -1))
|
| 197 |
+
assert res.count("\nCategories") == 1
|
| 198 |
+
|
| 199 |
+
res = repr(data.reshape(-1, 1))
|
| 200 |
+
assert res.count("\nCategories") == 1
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/test_common.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import pytest
|
| 3 |
+
|
| 4 |
+
from pandas.core.dtypes import dtypes
|
| 5 |
+
from pandas.core.dtypes.common import is_extension_array_dtype
|
| 6 |
+
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import pandas._testing as tm
|
| 9 |
+
from pandas.core.arrays import ExtensionArray
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class DummyDtype(dtypes.ExtensionDtype):
|
| 13 |
+
pass
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class DummyArray(ExtensionArray):
|
| 17 |
+
def __init__(self, data) -> None:
|
| 18 |
+
self.data = data
|
| 19 |
+
|
| 20 |
+
def __array__(self, dtype=None, copy=None):
|
| 21 |
+
return self.data
|
| 22 |
+
|
| 23 |
+
@property
|
| 24 |
+
def dtype(self):
|
| 25 |
+
return DummyDtype()
|
| 26 |
+
|
| 27 |
+
def astype(self, dtype, copy=True):
|
| 28 |
+
# we don't support anything but a single dtype
|
| 29 |
+
if isinstance(dtype, DummyDtype):
|
| 30 |
+
if copy:
|
| 31 |
+
return type(self)(self.data)
|
| 32 |
+
return self
|
| 33 |
+
elif not copy:
|
| 34 |
+
return np.asarray(self, dtype=dtype)
|
| 35 |
+
else:
|
| 36 |
+
return np.array(self, dtype=dtype, copy=copy)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class TestExtensionArrayDtype:
|
| 40 |
+
@pytest.mark.parametrize(
|
| 41 |
+
"values",
|
| 42 |
+
[
|
| 43 |
+
pd.Categorical([]),
|
| 44 |
+
pd.Categorical([]).dtype,
|
| 45 |
+
pd.Series(pd.Categorical([])),
|
| 46 |
+
DummyDtype(),
|
| 47 |
+
DummyArray(np.array([1, 2])),
|
| 48 |
+
],
|
| 49 |
+
)
|
| 50 |
+
def test_is_extension_array_dtype(self, values):
|
| 51 |
+
assert is_extension_array_dtype(values)
|
| 52 |
+
|
| 53 |
+
@pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))])
|
| 54 |
+
def test_is_not_extension_array_dtype(self, values):
|
| 55 |
+
assert not is_extension_array_dtype(values)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def test_astype():
|
| 59 |
+
arr = DummyArray(np.array([1, 2, 3]))
|
| 60 |
+
expected = np.array([1, 2, 3], dtype=object)
|
| 61 |
+
|
| 62 |
+
result = arr.astype(object)
|
| 63 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 64 |
+
|
| 65 |
+
result = arr.astype("object")
|
| 66 |
+
tm.assert_numpy_array_equal(result, expected)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def test_astype_no_copy():
|
| 70 |
+
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
|
| 71 |
+
result = arr.astype(arr.dtype, copy=False)
|
| 72 |
+
|
| 73 |
+
assert arr is result
|
| 74 |
+
|
| 75 |
+
result = arr.astype(arr.dtype)
|
| 76 |
+
assert arr is not result
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()])
|
| 80 |
+
def test_is_extension_array_dtype(dtype):
|
| 81 |
+
assert isinstance(dtype, dtypes.ExtensionDtype)
|
| 82 |
+
assert is_extension_array_dtype(dtype)
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
class CapturingStringArray(pd.arrays.StringArray):
|
| 86 |
+
"""Extend StringArray to capture arguments to __getitem__"""
|
| 87 |
+
|
| 88 |
+
def __getitem__(self, item):
|
| 89 |
+
self.last_item_arg = item
|
| 90 |
+
return super().__getitem__(item)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def test_ellipsis_index():
|
| 94 |
+
# GH#42430 1D slices over extension types turn into N-dimensional slices
|
| 95 |
+
# over ExtensionArrays
|
| 96 |
+
df = pd.DataFrame(
|
| 97 |
+
{"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))}
|
| 98 |
+
)
|
| 99 |
+
_ = df.iloc[:1]
|
| 100 |
+
|
| 101 |
+
# String comparison because there's no native way to compare slices.
|
| 102 |
+
# Before the fix for GH#42430, last_item_arg would get set to the 2D slice
|
| 103 |
+
# (Ellipsis, slice(None, 1, None))
|
| 104 |
+
out = df["col1"].array.last_item_arg
|
| 105 |
+
assert str(out) == "slice(None, 1, None)"
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file contains a minimal set of tests for compliance with the extension
|
| 3 |
+
array interface test suite, and should contain no other tests.
|
| 4 |
+
The test suite for the full functionality of the array is located in
|
| 5 |
+
`pandas/tests/arrays/`.
|
| 6 |
+
|
| 7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
| 8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
| 9 |
+
parent method).
|
| 10 |
+
|
| 11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
| 12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
| 13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
import numpy as np
|
| 17 |
+
import pytest
|
| 18 |
+
|
| 19 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
| 20 |
+
|
| 21 |
+
import pandas as pd
|
| 22 |
+
import pandas._testing as tm
|
| 23 |
+
from pandas.core.arrays import DatetimeArray
|
| 24 |
+
from pandas.tests.extension import base
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
@pytest.fixture(params=["US/Central"])
|
| 28 |
+
def dtype(request):
|
| 29 |
+
return DatetimeTZDtype(unit="ns", tz=request.param)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
@pytest.fixture
|
| 33 |
+
def data(dtype):
|
| 34 |
+
data = DatetimeArray._from_sequence(
|
| 35 |
+
pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype
|
| 36 |
+
)
|
| 37 |
+
return data
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@pytest.fixture
|
| 41 |
+
def data_missing(dtype):
|
| 42 |
+
return DatetimeArray._from_sequence(
|
| 43 |
+
np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.fixture
|
| 48 |
+
def data_for_sorting(dtype):
|
| 49 |
+
a = pd.Timestamp("2000-01-01")
|
| 50 |
+
b = pd.Timestamp("2000-01-02")
|
| 51 |
+
c = pd.Timestamp("2000-01-03")
|
| 52 |
+
return DatetimeArray._from_sequence(
|
| 53 |
+
np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@pytest.fixture
|
| 58 |
+
def data_missing_for_sorting(dtype):
|
| 59 |
+
a = pd.Timestamp("2000-01-01")
|
| 60 |
+
b = pd.Timestamp("2000-01-02")
|
| 61 |
+
return DatetimeArray._from_sequence(
|
| 62 |
+
np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@pytest.fixture
|
| 67 |
+
def data_for_grouping(dtype):
|
| 68 |
+
"""
|
| 69 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
| 70 |
+
|
| 71 |
+
Where A < B < C and NA is missing
|
| 72 |
+
"""
|
| 73 |
+
a = pd.Timestamp("2000-01-01")
|
| 74 |
+
b = pd.Timestamp("2000-01-02")
|
| 75 |
+
c = pd.Timestamp("2000-01-03")
|
| 76 |
+
na = "NaT"
|
| 77 |
+
return DatetimeArray._from_sequence(
|
| 78 |
+
np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype
|
| 79 |
+
)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
@pytest.fixture
|
| 83 |
+
def na_cmp():
|
| 84 |
+
def cmp(a, b):
|
| 85 |
+
return a is pd.NaT and a is b
|
| 86 |
+
|
| 87 |
+
return cmp
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
# ----------------------------------------------------------------------------
|
| 91 |
+
class TestDatetimeArray(base.ExtensionTests):
|
| 92 |
+
def _get_expected_exception(self, op_name, obj, other):
|
| 93 |
+
if op_name in ["__sub__", "__rsub__"]:
|
| 94 |
+
return None
|
| 95 |
+
return super()._get_expected_exception(op_name, obj, other)
|
| 96 |
+
|
| 97 |
+
def _supports_accumulation(self, ser, op_name: str) -> bool:
|
| 98 |
+
return op_name in ["cummin", "cummax"]
|
| 99 |
+
|
| 100 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
| 101 |
+
return op_name in ["min", "max", "median", "mean", "std", "any", "all"]
|
| 102 |
+
|
| 103 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 104 |
+
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
|
| 105 |
+
meth = all_boolean_reductions
|
| 106 |
+
msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in"
|
| 107 |
+
with tm.assert_produces_warning(
|
| 108 |
+
FutureWarning, match=msg, check_stacklevel=False
|
| 109 |
+
):
|
| 110 |
+
super().test_reduce_series_boolean(data, all_boolean_reductions, skipna)
|
| 111 |
+
|
| 112 |
+
def test_series_constructor(self, data):
|
| 113 |
+
# Series construction drops any .freq attr
|
| 114 |
+
data = data._with_freq(None)
|
| 115 |
+
super().test_series_constructor(data)
|
| 116 |
+
|
| 117 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
| 118 |
+
def test_map(self, data, na_action):
|
| 119 |
+
result = data.map(lambda x: x, na_action=na_action)
|
| 120 |
+
tm.assert_extension_array_equal(result, data)
|
| 121 |
+
|
| 122 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
| 123 |
+
if op_name in ["median", "mean", "std"]:
|
| 124 |
+
alt = ser.astype("int64")
|
| 125 |
+
|
| 126 |
+
res_op = getattr(ser, op_name)
|
| 127 |
+
exp_op = getattr(alt, op_name)
|
| 128 |
+
result = res_op(skipna=skipna)
|
| 129 |
+
expected = exp_op(skipna=skipna)
|
| 130 |
+
if op_name in ["mean", "median"]:
|
| 131 |
+
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype"
|
| 132 |
+
# has no attribute "tz"
|
| 133 |
+
tz = ser.dtype.tz # type: ignore[union-attr]
|
| 134 |
+
expected = pd.Timestamp(expected, tz=tz)
|
| 135 |
+
else:
|
| 136 |
+
expected = pd.Timedelta(expected)
|
| 137 |
+
tm.assert_almost_equal(result, expected)
|
| 138 |
+
|
| 139 |
+
else:
|
| 140 |
+
return super().check_reduce(ser, op_name, skipna)
|
| 141 |
+
|
| 142 |
+
|
| 143 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
| 144 |
+
pass
|
mgm/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py
ADDED
|
@@ -0,0 +1,503 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This file contains a minimal set of tests for compliance with the extension
|
| 3 |
+
array interface test suite, and should contain no other tests.
|
| 4 |
+
The test suite for the full functionality of the array is located in
|
| 5 |
+
`pandas/tests/arrays/`.
|
| 6 |
+
|
| 7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
| 8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
| 9 |
+
parent method).
|
| 10 |
+
|
| 11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
| 12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
| 13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
| 14 |
+
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
import numpy as np
|
| 18 |
+
import pytest
|
| 19 |
+
|
| 20 |
+
from pandas.errors import PerformanceWarning
|
| 21 |
+
|
| 22 |
+
import pandas as pd
|
| 23 |
+
from pandas import SparseDtype
|
| 24 |
+
import pandas._testing as tm
|
| 25 |
+
from pandas.arrays import SparseArray
|
| 26 |
+
from pandas.tests.extension import base
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def make_data(fill_value):
|
| 30 |
+
rng = np.random.default_rng(2)
|
| 31 |
+
if np.isnan(fill_value):
|
| 32 |
+
data = rng.uniform(size=100)
|
| 33 |
+
else:
|
| 34 |
+
data = rng.integers(1, 100, size=100, dtype=int)
|
| 35 |
+
if data[0] == data[1]:
|
| 36 |
+
data[0] += 1
|
| 37 |
+
|
| 38 |
+
data[2::3] = fill_value
|
| 39 |
+
return data
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
@pytest.fixture
|
| 43 |
+
def dtype():
|
| 44 |
+
return SparseDtype()
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.fixture(params=[0, np.nan])
|
| 48 |
+
def data(request):
|
| 49 |
+
"""Length-100 PeriodArray for semantics test."""
|
| 50 |
+
res = SparseArray(make_data(request.param), fill_value=request.param)
|
| 51 |
+
return res
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
@pytest.fixture
|
| 55 |
+
def data_for_twos():
|
| 56 |
+
return SparseArray(np.ones(100) * 2)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@pytest.fixture(params=[0, np.nan])
|
| 60 |
+
def data_missing(request):
|
| 61 |
+
"""Length 2 array with [NA, Valid]"""
|
| 62 |
+
return SparseArray([np.nan, 1], fill_value=request.param)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@pytest.fixture(params=[0, np.nan])
|
| 66 |
+
def data_repeated(request):
|
| 67 |
+
"""Return different versions of data for count times"""
|
| 68 |
+
|
| 69 |
+
def gen(count):
|
| 70 |
+
for _ in range(count):
|
| 71 |
+
yield SparseArray(make_data(request.param), fill_value=request.param)
|
| 72 |
+
|
| 73 |
+
yield gen
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
@pytest.fixture(params=[0, np.nan])
|
| 77 |
+
def data_for_sorting(request):
|
| 78 |
+
return SparseArray([2, 3, 1], fill_value=request.param)
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@pytest.fixture(params=[0, np.nan])
|
| 82 |
+
def data_missing_for_sorting(request):
|
| 83 |
+
return SparseArray([2, np.nan, 1], fill_value=request.param)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@pytest.fixture
|
| 87 |
+
def na_cmp():
|
| 88 |
+
return lambda left, right: pd.isna(left) and pd.isna(right)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
@pytest.fixture(params=[0, np.nan])
|
| 92 |
+
def data_for_grouping(request):
|
| 93 |
+
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
@pytest.fixture(params=[0, np.nan])
|
| 97 |
+
def data_for_compare(request):
|
| 98 |
+
return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param)
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
class TestSparseArray(base.ExtensionTests):
|
| 102 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 106 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
|
| 107 |
+
if all_numeric_reductions in [
|
| 108 |
+
"prod",
|
| 109 |
+
"median",
|
| 110 |
+
"var",
|
| 111 |
+
"std",
|
| 112 |
+
"sem",
|
| 113 |
+
"skew",
|
| 114 |
+
"kurt",
|
| 115 |
+
]:
|
| 116 |
+
mark = pytest.mark.xfail(
|
| 117 |
+
reason="This should be viable but is not implemented"
|
| 118 |
+
)
|
| 119 |
+
request.node.add_marker(mark)
|
| 120 |
+
elif (
|
| 121 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
| 122 |
+
and data.dtype.kind == "f"
|
| 123 |
+
and not skipna
|
| 124 |
+
):
|
| 125 |
+
mark = pytest.mark.xfail(reason="getting a non-nan float")
|
| 126 |
+
request.node.add_marker(mark)
|
| 127 |
+
|
| 128 |
+
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
|
| 129 |
+
|
| 130 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
| 131 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
|
| 132 |
+
if all_numeric_reductions in [
|
| 133 |
+
"prod",
|
| 134 |
+
"median",
|
| 135 |
+
"var",
|
| 136 |
+
"std",
|
| 137 |
+
"sem",
|
| 138 |
+
"skew",
|
| 139 |
+
"kurt",
|
| 140 |
+
]:
|
| 141 |
+
mark = pytest.mark.xfail(
|
| 142 |
+
reason="This should be viable but is not implemented"
|
| 143 |
+
)
|
| 144 |
+
request.node.add_marker(mark)
|
| 145 |
+
elif (
|
| 146 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
| 147 |
+
and data.dtype.kind == "f"
|
| 148 |
+
and not skipna
|
| 149 |
+
):
|
| 150 |
+
mark = pytest.mark.xfail(reason="ExtensionArray NA mask are different")
|
| 151 |
+
request.node.add_marker(mark)
|
| 152 |
+
|
| 153 |
+
super().test_reduce_frame(data, all_numeric_reductions, skipna)
|
| 154 |
+
|
| 155 |
+
def _check_unsupported(self, data):
|
| 156 |
+
if data.dtype == SparseDtype(int, 0):
|
| 157 |
+
pytest.skip("Can't store nan in int array.")
|
| 158 |
+
|
| 159 |
+
def test_concat_mixed_dtypes(self, data):
|
| 160 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
| 161 |
+
# This should be the same, aside from concat([sparse, float])
|
| 162 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
| 163 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
| 164 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
| 165 |
+
dfs = [df1, df2, df3]
|
| 166 |
+
|
| 167 |
+
# dataframes
|
| 168 |
+
result = pd.concat(dfs)
|
| 169 |
+
expected = pd.concat(
|
| 170 |
+
[x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]
|
| 171 |
+
)
|
| 172 |
+
tm.assert_frame_equal(result, expected)
|
| 173 |
+
|
| 174 |
+
@pytest.mark.filterwarnings(
|
| 175 |
+
"ignore:The previous implementation of stack is deprecated"
|
| 176 |
+
)
|
| 177 |
+
@pytest.mark.parametrize(
|
| 178 |
+
"columns",
|
| 179 |
+
[
|
| 180 |
+
["A", "B"],
|
| 181 |
+
pd.MultiIndex.from_tuples(
|
| 182 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
| 183 |
+
),
|
| 184 |
+
],
|
| 185 |
+
)
|
| 186 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
| 187 |
+
def test_stack(self, data, columns, future_stack):
|
| 188 |
+
super().test_stack(data, columns, future_stack)
|
| 189 |
+
|
| 190 |
+
def test_concat_columns(self, data, na_value):
|
| 191 |
+
self._check_unsupported(data)
|
| 192 |
+
super().test_concat_columns(data, na_value)
|
| 193 |
+
|
| 194 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
| 195 |
+
self._check_unsupported(data)
|
| 196 |
+
super().test_concat_extension_arrays_copy_false(data, na_value)
|
| 197 |
+
|
| 198 |
+
def test_align(self, data, na_value):
|
| 199 |
+
self._check_unsupported(data)
|
| 200 |
+
super().test_align(data, na_value)
|
| 201 |
+
|
| 202 |
+
def test_align_frame(self, data, na_value):
|
| 203 |
+
self._check_unsupported(data)
|
| 204 |
+
super().test_align_frame(data, na_value)
|
| 205 |
+
|
| 206 |
+
def test_align_series_frame(self, data, na_value):
|
| 207 |
+
self._check_unsupported(data)
|
| 208 |
+
super().test_align_series_frame(data, na_value)
|
| 209 |
+
|
| 210 |
+
def test_merge(self, data, na_value):
|
| 211 |
+
self._check_unsupported(data)
|
| 212 |
+
super().test_merge(data, na_value)
|
| 213 |
+
|
| 214 |
+
def test_get(self, data):
|
| 215 |
+
ser = pd.Series(data, index=[2 * i for i in range(len(data))])
|
| 216 |
+
if np.isnan(ser.values.fill_value):
|
| 217 |
+
assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2])
|
| 218 |
+
else:
|
| 219 |
+
assert ser.get(4) == ser.iloc[2]
|
| 220 |
+
assert ser.get(2) == ser.iloc[1]
|
| 221 |
+
|
| 222 |
+
def test_reindex(self, data, na_value):
|
| 223 |
+
self._check_unsupported(data)
|
| 224 |
+
super().test_reindex(data, na_value)
|
| 225 |
+
|
| 226 |
+
def test_isna(self, data_missing):
|
| 227 |
+
sarr = SparseArray(data_missing)
|
| 228 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
| 229 |
+
expected = SparseArray([True, False], dtype=expected_dtype)
|
| 230 |
+
result = sarr.isna()
|
| 231 |
+
tm.assert_sp_array_equal(result, expected)
|
| 232 |
+
|
| 233 |
+
# test isna for arr without na
|
| 234 |
+
sarr = sarr.fillna(0)
|
| 235 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
| 236 |
+
expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype)
|
| 237 |
+
tm.assert_equal(sarr.isna(), expected)
|
| 238 |
+
|
| 239 |
+
def test_fillna_limit_backfill(self, data_missing):
|
| 240 |
+
warns = (PerformanceWarning, FutureWarning)
|
| 241 |
+
with tm.assert_produces_warning(warns, check_stacklevel=False):
|
| 242 |
+
super().test_fillna_limit_backfill(data_missing)
|
| 243 |
+
|
| 244 |
+
def test_fillna_no_op_returns_copy(self, data, request):
|
| 245 |
+
if np.isnan(data.fill_value):
|
| 246 |
+
request.applymarker(
|
| 247 |
+
pytest.mark.xfail(reason="returns array with different fill value")
|
| 248 |
+
)
|
| 249 |
+
super().test_fillna_no_op_returns_copy(data)
|
| 250 |
+
|
| 251 |
+
@pytest.mark.xfail(reason="Unsupported")
|
| 252 |
+
def test_fillna_series(self, data_missing):
|
| 253 |
+
# this one looks doable.
|
| 254 |
+
# TODO: this fails bc we do not pass through data_missing. If we did,
|
| 255 |
+
# the 0-fill case would xpass
|
| 256 |
+
super().test_fillna_series()
|
| 257 |
+
|
| 258 |
+
def test_fillna_frame(self, data_missing):
|
| 259 |
+
# Have to override to specify that fill_value will change.
|
| 260 |
+
fill_value = data_missing[1]
|
| 261 |
+
|
| 262 |
+
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
|
| 263 |
+
|
| 264 |
+
if pd.isna(data_missing.fill_value):
|
| 265 |
+
dtype = SparseDtype(data_missing.dtype, fill_value)
|
| 266 |
+
else:
|
| 267 |
+
dtype = data_missing.dtype
|
| 268 |
+
|
| 269 |
+
expected = pd.DataFrame(
|
| 270 |
+
{
|
| 271 |
+
"A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),
|
| 272 |
+
"B": [1, 2],
|
| 273 |
+
}
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
tm.assert_frame_equal(result, expected)
|
| 277 |
+
|
| 278 |
+
_combine_le_expected_dtype = "Sparse[bool]"
|
| 279 |
+
|
| 280 |
+
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
|
| 281 |
+
arr = data_missing.take([1, 1])
|
| 282 |
+
df = pd.DataFrame({"A": arr}, copy=False)
|
| 283 |
+
|
| 284 |
+
filled_val = df.iloc[0, 0]
|
| 285 |
+
result = df.fillna(filled_val)
|
| 286 |
+
|
| 287 |
+
if hasattr(df._mgr, "blocks"):
|
| 288 |
+
if using_copy_on_write:
|
| 289 |
+
assert df.values.base is result.values.base
|
| 290 |
+
else:
|
| 291 |
+
assert df.values.base is not result.values.base
|
| 292 |
+
assert df.A._values.to_dense() is arr.to_dense()
|
| 293 |
+
|
| 294 |
+
def test_fillna_copy_series(self, data_missing, using_copy_on_write):
|
| 295 |
+
arr = data_missing.take([1, 1])
|
| 296 |
+
ser = pd.Series(arr, copy=False)
|
| 297 |
+
|
| 298 |
+
filled_val = ser[0]
|
| 299 |
+
result = ser.fillna(filled_val)
|
| 300 |
+
|
| 301 |
+
if using_copy_on_write:
|
| 302 |
+
assert ser._values is result._values
|
| 303 |
+
|
| 304 |
+
else:
|
| 305 |
+
assert ser._values is not result._values
|
| 306 |
+
assert ser._values.to_dense() is arr.to_dense()
|
| 307 |
+
|
| 308 |
+
@pytest.mark.xfail(reason="Not Applicable")
|
| 309 |
+
def test_fillna_length_mismatch(self, data_missing):
|
| 310 |
+
super().test_fillna_length_mismatch(data_missing)
|
| 311 |
+
|
| 312 |
+
def test_where_series(self, data, na_value):
|
| 313 |
+
assert data[0] != data[1]
|
| 314 |
+
cls = type(data)
|
| 315 |
+
a, b = data[:2]
|
| 316 |
+
|
| 317 |
+
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
|
| 318 |
+
|
| 319 |
+
cond = np.array([True, True, False, False])
|
| 320 |
+
result = ser.where(cond)
|
| 321 |
+
|
| 322 |
+
new_dtype = SparseDtype("float", 0.0)
|
| 323 |
+
expected = pd.Series(
|
| 324 |
+
cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)
|
| 325 |
+
)
|
| 326 |
+
tm.assert_series_equal(result, expected)
|
| 327 |
+
|
| 328 |
+
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
|
| 329 |
+
cond = np.array([True, False, True, True])
|
| 330 |
+
result = ser.where(cond, other)
|
| 331 |
+
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
|
| 332 |
+
tm.assert_series_equal(result, expected)
|
| 333 |
+
|
| 334 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
| 335 |
+
with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):
|
| 336 |
+
super().test_searchsorted(data_for_sorting, as_series)
|
| 337 |
+
|
| 338 |
+
def test_shift_0_periods(self, data):
|
| 339 |
+
# GH#33856 shifting with periods=0 should return a copy, not same obj
|
| 340 |
+
result = data.shift(0)
|
| 341 |
+
|
| 342 |
+
data._sparse_values[0] = data._sparse_values[1]
|
| 343 |
+
assert result._sparse_values[0] != result._sparse_values[1]
|
| 344 |
+
|
| 345 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
| 346 |
+
def test_argmin_argmax_all_na(self, method, data, na_value):
|
| 347 |
+
# overriding because Sparse[int64, 0] cannot handle na_value
|
| 348 |
+
self._check_unsupported(data)
|
| 349 |
+
super().test_argmin_argmax_all_na(method, data, na_value)
|
| 350 |
+
|
| 351 |
+
@pytest.mark.fails_arm_wheels
|
| 352 |
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
|
| 353 |
+
def test_equals(self, data, na_value, as_series, box):
|
| 354 |
+
self._check_unsupported(data)
|
| 355 |
+
super().test_equals(data, na_value, as_series, box)
|
| 356 |
+
|
| 357 |
+
@pytest.mark.fails_arm_wheels
|
| 358 |
+
def test_equals_same_data_different_object(self, data):
|
| 359 |
+
super().test_equals_same_data_different_object(data)
|
| 360 |
+
|
| 361 |
+
@pytest.mark.parametrize(
|
| 362 |
+
"func, na_action, expected",
|
| 363 |
+
[
|
| 364 |
+
(lambda x: x, None, SparseArray([1.0, np.nan])),
|
| 365 |
+
(lambda x: x, "ignore", SparseArray([1.0, np.nan])),
|
| 366 |
+
(str, None, SparseArray(["1.0", "nan"], fill_value="nan")),
|
| 367 |
+
(str, "ignore", SparseArray(["1.0", np.nan])),
|
| 368 |
+
],
|
| 369 |
+
)
|
| 370 |
+
def test_map(self, func, na_action, expected):
|
| 371 |
+
# GH52096
|
| 372 |
+
data = SparseArray([1, np.nan])
|
| 373 |
+
result = data.map(func, na_action=na_action)
|
| 374 |
+
tm.assert_extension_array_equal(result, expected)
|
| 375 |
+
|
| 376 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
| 377 |
+
def test_map_raises(self, data, na_action):
|
| 378 |
+
# GH52096
|
| 379 |
+
msg = "fill value in the sparse values not supported"
|
| 380 |
+
with pytest.raises(ValueError, match=msg):
|
| 381 |
+
data.map(lambda x: np.nan, na_action=na_action)
|
| 382 |
+
|
| 383 |
+
@pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")
|
| 384 |
+
def test_astype_string(self, data, nullable_string_dtype):
|
| 385 |
+
# TODO: this fails bc we do not pass through nullable_string_dtype;
|
| 386 |
+
# If we did, the 0-cases would xpass
|
| 387 |
+
super().test_astype_string(data)
|
| 388 |
+
|
| 389 |
+
series_scalar_exc = None
|
| 390 |
+
frame_scalar_exc = None
|
| 391 |
+
divmod_exc = None
|
| 392 |
+
series_array_exc = None
|
| 393 |
+
|
| 394 |
+
def _skip_if_different_combine(self, data):
|
| 395 |
+
if data.fill_value == 0:
|
| 396 |
+
# arith ops call on dtype.fill_value so that the sparsity
|
| 397 |
+
# is maintained. Combine can't be called on a dtype in
|
| 398 |
+
# general, so we can't make the expected. This is tested elsewhere
|
| 399 |
+
pytest.skip("Incorrected expected from Series.combine and tested elsewhere")
|
| 400 |
+
|
| 401 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
|
| 402 |
+
self._skip_if_different_combine(data)
|
| 403 |
+
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
|
| 404 |
+
|
| 405 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
| 406 |
+
self._skip_if_different_combine(data)
|
| 407 |
+
super().test_arith_series_with_array(data, all_arithmetic_operators)
|
| 408 |
+
|
| 409 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
| 410 |
+
if data.dtype.fill_value != 0:
|
| 411 |
+
pass
|
| 412 |
+
elif all_arithmetic_operators.strip("_") not in [
|
| 413 |
+
"mul",
|
| 414 |
+
"rmul",
|
| 415 |
+
"floordiv",
|
| 416 |
+
"rfloordiv",
|
| 417 |
+
"pow",
|
| 418 |
+
"mod",
|
| 419 |
+
"rmod",
|
| 420 |
+
]:
|
| 421 |
+
mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch")
|
| 422 |
+
request.applymarker(mark)
|
| 423 |
+
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
|
| 424 |
+
|
| 425 |
+
def _compare_other(
|
| 426 |
+
self, ser: pd.Series, data_for_compare: SparseArray, comparison_op, other
|
| 427 |
+
):
|
| 428 |
+
op = comparison_op
|
| 429 |
+
|
| 430 |
+
result = op(data_for_compare, other)
|
| 431 |
+
if isinstance(other, pd.Series):
|
| 432 |
+
assert isinstance(result, pd.Series)
|
| 433 |
+
assert isinstance(result.dtype, SparseDtype)
|
| 434 |
+
else:
|
| 435 |
+
assert isinstance(result, SparseArray)
|
| 436 |
+
assert result.dtype.subtype == np.bool_
|
| 437 |
+
|
| 438 |
+
if isinstance(other, pd.Series):
|
| 439 |
+
fill_value = op(data_for_compare.fill_value, other._values.fill_value)
|
| 440 |
+
expected = SparseArray(
|
| 441 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
| 442 |
+
fill_value=fill_value,
|
| 443 |
+
dtype=np.bool_,
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
else:
|
| 447 |
+
fill_value = np.all(
|
| 448 |
+
op(np.asarray(data_for_compare.fill_value), np.asarray(other))
|
| 449 |
+
)
|
| 450 |
+
|
| 451 |
+
expected = SparseArray(
|
| 452 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
| 453 |
+
fill_value=fill_value,
|
| 454 |
+
dtype=np.bool_,
|
| 455 |
+
)
|
| 456 |
+
if isinstance(other, pd.Series):
|
| 457 |
+
# error: Incompatible types in assignment
|
| 458 |
+
expected = pd.Series(expected) # type: ignore[assignment]
|
| 459 |
+
tm.assert_equal(result, expected)
|
| 460 |
+
|
| 461 |
+
def test_scalar(self, data_for_compare: SparseArray, comparison_op):
|
| 462 |
+
ser = pd.Series(data_for_compare)
|
| 463 |
+
self._compare_other(ser, data_for_compare, comparison_op, 0)
|
| 464 |
+
self._compare_other(ser, data_for_compare, comparison_op, 1)
|
| 465 |
+
self._compare_other(ser, data_for_compare, comparison_op, -1)
|
| 466 |
+
self._compare_other(ser, data_for_compare, comparison_op, np.nan)
|
| 467 |
+
|
| 468 |
+
def test_array(self, data_for_compare: SparseArray, comparison_op, request):
|
| 469 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ in [
|
| 470 |
+
"eq",
|
| 471 |
+
"ge",
|
| 472 |
+
"le",
|
| 473 |
+
]:
|
| 474 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
| 475 |
+
request.applymarker(mark)
|
| 476 |
+
|
| 477 |
+
arr = np.linspace(-4, 5, 10)
|
| 478 |
+
ser = pd.Series(data_for_compare)
|
| 479 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
| 480 |
+
|
| 481 |
+
def test_sparse_array(self, data_for_compare: SparseArray, comparison_op, request):
|
| 482 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ != "gt":
|
| 483 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
| 484 |
+
request.applymarker(mark)
|
| 485 |
+
|
| 486 |
+
ser = pd.Series(data_for_compare)
|
| 487 |
+
arr = data_for_compare + 1
|
| 488 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
| 489 |
+
arr = data_for_compare * 2
|
| 490 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
| 491 |
+
|
| 492 |
+
@pytest.mark.xfail(reason="Different repr")
|
| 493 |
+
def test_array_repr(self, data, size):
|
| 494 |
+
super().test_array_repr(data, size)
|
| 495 |
+
|
| 496 |
+
@pytest.mark.xfail(reason="result does not match expected")
|
| 497 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
| 498 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
| 499 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def test_array_type_with_arg(dtype):
|
| 503 |
+
assert dtype.construct_array_type() is SparseArray
|
mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (167 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_arrow_interface.cpython-310.pyc
ADDED
|
Binary file (1.41 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_stack_unstack.cpython-310.pyc
ADDED
|
Binary file (68.2 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/frame/__pycache__/test_subclass.cpython-310.pyc
ADDED
|
Binary file (23.9 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/pandas/tests/io/conftest.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import shlex
|
| 2 |
+
import subprocess
|
| 3 |
+
import time
|
| 4 |
+
import uuid
|
| 5 |
+
|
| 6 |
+
import pytest
|
| 7 |
+
|
| 8 |
+
from pandas.compat import (
|
| 9 |
+
is_ci_environment,
|
| 10 |
+
is_platform_arm,
|
| 11 |
+
is_platform_mac,
|
| 12 |
+
is_platform_windows,
|
| 13 |
+
)
|
| 14 |
+
import pandas.util._test_decorators as td
|
| 15 |
+
|
| 16 |
+
import pandas.io.common as icom
|
| 17 |
+
from pandas.io.parsers import read_csv
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
@pytest.fixture
|
| 21 |
+
def compression_to_extension():
|
| 22 |
+
return {value: key for key, value in icom.extension_to_compression.items()}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
@pytest.fixture
|
| 26 |
+
def tips_file(datapath):
|
| 27 |
+
"""Path to the tips dataset"""
|
| 28 |
+
return datapath("io", "data", "csv", "tips.csv")
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@pytest.fixture
|
| 32 |
+
def jsonl_file(datapath):
|
| 33 |
+
"""Path to a JSONL dataset"""
|
| 34 |
+
return datapath("io", "parser", "data", "items.jsonl")
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@pytest.fixture
|
| 38 |
+
def salaries_table(datapath):
|
| 39 |
+
"""DataFrame with the salaries dataset"""
|
| 40 |
+
return read_csv(datapath("io", "parser", "data", "salaries.csv"), sep="\t")
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
@pytest.fixture
|
| 44 |
+
def feather_file(datapath):
|
| 45 |
+
return datapath("io", "data", "feather", "feather-0_3_1.feather")
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
@pytest.fixture
|
| 49 |
+
def xml_file(datapath):
|
| 50 |
+
return datapath("io", "data", "xml", "books.xml")
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
@pytest.fixture
|
| 54 |
+
def s3_base(worker_id, monkeypatch):
|
| 55 |
+
"""
|
| 56 |
+
Fixture for mocking S3 interaction.
|
| 57 |
+
|
| 58 |
+
Sets up moto server in separate process locally
|
| 59 |
+
Return url for motoserver/moto CI service
|
| 60 |
+
"""
|
| 61 |
+
pytest.importorskip("s3fs")
|
| 62 |
+
pytest.importorskip("boto3")
|
| 63 |
+
|
| 64 |
+
# temporary workaround as moto fails for botocore >= 1.11 otherwise,
|
| 65 |
+
# see https://github.com/spulec/moto/issues/1924 & 1952
|
| 66 |
+
monkeypatch.setenv("AWS_ACCESS_KEY_ID", "foobar_key")
|
| 67 |
+
monkeypatch.setenv("AWS_SECRET_ACCESS_KEY", "foobar_secret")
|
| 68 |
+
if is_ci_environment():
|
| 69 |
+
if is_platform_arm() or is_platform_mac() or is_platform_windows():
|
| 70 |
+
# NOT RUN on Windows/macOS/ARM, only Ubuntu
|
| 71 |
+
# - subprocess in CI can cause timeouts
|
| 72 |
+
# - GitHub Actions do not support
|
| 73 |
+
# container services for the above OSs
|
| 74 |
+
# - CircleCI will probably hit the Docker rate pull limit
|
| 75 |
+
pytest.skip(
|
| 76 |
+
"S3 tests do not have a corresponding service in "
|
| 77 |
+
"Windows, macOS or ARM platforms"
|
| 78 |
+
)
|
| 79 |
+
else:
|
| 80 |
+
# set in .github/workflows/unit-tests.yml
|
| 81 |
+
yield "http://localhost:5000"
|
| 82 |
+
else:
|
| 83 |
+
requests = pytest.importorskip("requests")
|
| 84 |
+
pytest.importorskip("moto")
|
| 85 |
+
pytest.importorskip("flask") # server mode needs flask too
|
| 86 |
+
|
| 87 |
+
# Launching moto in server mode, i.e., as a separate process
|
| 88 |
+
# with an S3 endpoint on localhost
|
| 89 |
+
|
| 90 |
+
worker_id = "5" if worker_id == "master" else worker_id.lstrip("gw")
|
| 91 |
+
endpoint_port = f"555{worker_id}"
|
| 92 |
+
endpoint_uri = f"http://127.0.0.1:{endpoint_port}/"
|
| 93 |
+
|
| 94 |
+
# pipe to null to avoid logging in terminal
|
| 95 |
+
with subprocess.Popen(
|
| 96 |
+
shlex.split(f"moto_server s3 -p {endpoint_port}"),
|
| 97 |
+
stdout=subprocess.DEVNULL,
|
| 98 |
+
stderr=subprocess.DEVNULL,
|
| 99 |
+
) as proc:
|
| 100 |
+
timeout = 5
|
| 101 |
+
while timeout > 0:
|
| 102 |
+
try:
|
| 103 |
+
# OK to go once server is accepting connections
|
| 104 |
+
r = requests.get(endpoint_uri)
|
| 105 |
+
if r.ok:
|
| 106 |
+
break
|
| 107 |
+
except Exception:
|
| 108 |
+
pass
|
| 109 |
+
timeout -= 0.1
|
| 110 |
+
time.sleep(0.1)
|
| 111 |
+
yield endpoint_uri
|
| 112 |
+
|
| 113 |
+
proc.terminate()
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
@pytest.fixture
|
| 117 |
+
def s3so(s3_base):
|
| 118 |
+
return {"client_kwargs": {"endpoint_url": s3_base}}
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
@pytest.fixture
|
| 122 |
+
def s3_resource(s3_base):
|
| 123 |
+
import boto3
|
| 124 |
+
|
| 125 |
+
s3 = boto3.resource("s3", endpoint_url=s3_base)
|
| 126 |
+
return s3
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
@pytest.fixture
|
| 130 |
+
def s3_public_bucket(s3_resource):
|
| 131 |
+
bucket = s3_resource.Bucket(f"pandas-test-{uuid.uuid4()}")
|
| 132 |
+
bucket.create()
|
| 133 |
+
yield bucket
|
| 134 |
+
bucket.objects.delete()
|
| 135 |
+
bucket.delete()
|
| 136 |
+
|
| 137 |
+
|
| 138 |
+
@pytest.fixture
|
| 139 |
+
def s3_public_bucket_with_data(
|
| 140 |
+
s3_public_bucket, tips_file, jsonl_file, feather_file, xml_file
|
| 141 |
+
):
|
| 142 |
+
"""
|
| 143 |
+
The following datasets
|
| 144 |
+
are loaded.
|
| 145 |
+
|
| 146 |
+
- tips.csv
|
| 147 |
+
- tips.csv.gz
|
| 148 |
+
- tips.csv.bz2
|
| 149 |
+
- items.jsonl
|
| 150 |
+
"""
|
| 151 |
+
test_s3_files = [
|
| 152 |
+
("tips#1.csv", tips_file),
|
| 153 |
+
("tips.csv", tips_file),
|
| 154 |
+
("tips.csv.gz", tips_file + ".gz"),
|
| 155 |
+
("tips.csv.bz2", tips_file + ".bz2"),
|
| 156 |
+
("items.jsonl", jsonl_file),
|
| 157 |
+
("simple_dataset.feather", feather_file),
|
| 158 |
+
("books.xml", xml_file),
|
| 159 |
+
]
|
| 160 |
+
for s3_key, file_name in test_s3_files:
|
| 161 |
+
with open(file_name, "rb") as f:
|
| 162 |
+
s3_public_bucket.put_object(Key=s3_key, Body=f)
|
| 163 |
+
return s3_public_bucket
|
| 164 |
+
|
| 165 |
+
|
| 166 |
+
@pytest.fixture
|
| 167 |
+
def s3_private_bucket(s3_resource):
|
| 168 |
+
bucket = s3_resource.Bucket(f"cant_get_it-{uuid.uuid4()}")
|
| 169 |
+
bucket.create(ACL="private")
|
| 170 |
+
yield bucket
|
| 171 |
+
bucket.objects.delete()
|
| 172 |
+
bucket.delete()
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
@pytest.fixture
|
| 176 |
+
def s3_private_bucket_with_data(
|
| 177 |
+
s3_private_bucket, tips_file, jsonl_file, feather_file, xml_file
|
| 178 |
+
):
|
| 179 |
+
"""
|
| 180 |
+
The following datasets
|
| 181 |
+
are loaded.
|
| 182 |
+
|
| 183 |
+
- tips.csv
|
| 184 |
+
- tips.csv.gz
|
| 185 |
+
- tips.csv.bz2
|
| 186 |
+
- items.jsonl
|
| 187 |
+
"""
|
| 188 |
+
test_s3_files = [
|
| 189 |
+
("tips#1.csv", tips_file),
|
| 190 |
+
("tips.csv", tips_file),
|
| 191 |
+
("tips.csv.gz", tips_file + ".gz"),
|
| 192 |
+
("tips.csv.bz2", tips_file + ".bz2"),
|
| 193 |
+
("items.jsonl", jsonl_file),
|
| 194 |
+
("simple_dataset.feather", feather_file),
|
| 195 |
+
("books.xml", xml_file),
|
| 196 |
+
]
|
| 197 |
+
for s3_key, file_name in test_s3_files:
|
| 198 |
+
with open(file_name, "rb") as f:
|
| 199 |
+
s3_private_bucket.put_object(Key=s3_key, Body=f)
|
| 200 |
+
return s3_private_bucket
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
_compression_formats_params = [
|
| 204 |
+
(".no_compress", None),
|
| 205 |
+
("", None),
|
| 206 |
+
(".gz", "gzip"),
|
| 207 |
+
(".GZ", "gzip"),
|
| 208 |
+
(".bz2", "bz2"),
|
| 209 |
+
(".BZ2", "bz2"),
|
| 210 |
+
(".zip", "zip"),
|
| 211 |
+
(".ZIP", "zip"),
|
| 212 |
+
(".xz", "xz"),
|
| 213 |
+
(".XZ", "xz"),
|
| 214 |
+
pytest.param((".zst", "zstd"), marks=td.skip_if_no("zstandard")),
|
| 215 |
+
pytest.param((".ZST", "zstd"), marks=td.skip_if_no("zstandard")),
|
| 216 |
+
]
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
@pytest.fixture(params=_compression_formats_params[1:])
|
| 220 |
+
def compression_format(request):
|
| 221 |
+
return request.param
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
@pytest.fixture(params=_compression_formats_params)
|
| 225 |
+
def compression_ext(request):
|
| 226 |
+
return request.param[0]
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
@pytest.fixture(
|
| 230 |
+
params=[
|
| 231 |
+
"python",
|
| 232 |
+
pytest.param("pyarrow", marks=td.skip_if_no("pyarrow")),
|
| 233 |
+
]
|
| 234 |
+
)
|
| 235 |
+
def string_storage(request):
|
| 236 |
+
"""
|
| 237 |
+
Parametrized fixture for pd.options.mode.string_storage.
|
| 238 |
+
|
| 239 |
+
* 'python'
|
| 240 |
+
* 'pyarrow'
|
| 241 |
+
"""
|
| 242 |
+
return request.param
|
mgm/lib/python3.10/site-packages/pandas/tests/io/generate_legacy_storage_files.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
self-contained to write legacy storage pickle files
|
| 3 |
+
|
| 4 |
+
To use this script. Create an environment where you want
|
| 5 |
+
generate pickles, say its for 0.20.3, with your pandas clone
|
| 6 |
+
in ~/pandas
|
| 7 |
+
|
| 8 |
+
. activate pandas_0.20.3
|
| 9 |
+
cd ~/pandas/pandas
|
| 10 |
+
|
| 11 |
+
$ python -m tests.io.generate_legacy_storage_files \
|
| 12 |
+
tests/io/data/legacy_pickle/0.20.3/ pickle
|
| 13 |
+
|
| 14 |
+
This script generates a storage file for the current arch, system,
|
| 15 |
+
and python version
|
| 16 |
+
pandas version: 0.20.3
|
| 17 |
+
output dir : pandas/pandas/tests/io/data/legacy_pickle/0.20.3/
|
| 18 |
+
storage format: pickle
|
| 19 |
+
created pickle file: 0.20.3_x86_64_darwin_3.5.2.pickle
|
| 20 |
+
|
| 21 |
+
The idea here is you are using the *current* version of the
|
| 22 |
+
generate_legacy_storage_files with an *older* version of pandas to
|
| 23 |
+
generate a pickle file. We will then check this file into a current
|
| 24 |
+
branch, and test using test_pickle.py. This will load the *older*
|
| 25 |
+
pickles and test versus the current data that is generated
|
| 26 |
+
(with main). These are then compared.
|
| 27 |
+
|
| 28 |
+
If we have cases where we changed the signature (e.g. we renamed
|
| 29 |
+
offset -> freq in Timestamp). Then we have to conditionally execute
|
| 30 |
+
in the generate_legacy_storage_files.py to make it
|
| 31 |
+
run under the older AND the newer version.
|
| 32 |
+
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
from datetime import timedelta
|
| 36 |
+
import os
|
| 37 |
+
import pickle
|
| 38 |
+
import platform as pl
|
| 39 |
+
import sys
|
| 40 |
+
|
| 41 |
+
# Remove script directory from path, otherwise Python will try to
|
| 42 |
+
# import the JSON test directory as the json module
|
| 43 |
+
sys.path.pop(0)
|
| 44 |
+
|
| 45 |
+
import numpy as np
|
| 46 |
+
|
| 47 |
+
import pandas
|
| 48 |
+
from pandas import (
|
| 49 |
+
Categorical,
|
| 50 |
+
DataFrame,
|
| 51 |
+
Index,
|
| 52 |
+
MultiIndex,
|
| 53 |
+
NaT,
|
| 54 |
+
Period,
|
| 55 |
+
RangeIndex,
|
| 56 |
+
Series,
|
| 57 |
+
Timestamp,
|
| 58 |
+
bdate_range,
|
| 59 |
+
date_range,
|
| 60 |
+
interval_range,
|
| 61 |
+
period_range,
|
| 62 |
+
timedelta_range,
|
| 63 |
+
)
|
| 64 |
+
from pandas.arrays import SparseArray
|
| 65 |
+
|
| 66 |
+
from pandas.tseries.offsets import (
|
| 67 |
+
FY5253,
|
| 68 |
+
BusinessDay,
|
| 69 |
+
BusinessHour,
|
| 70 |
+
CustomBusinessDay,
|
| 71 |
+
DateOffset,
|
| 72 |
+
Day,
|
| 73 |
+
Easter,
|
| 74 |
+
Hour,
|
| 75 |
+
LastWeekOfMonth,
|
| 76 |
+
Minute,
|
| 77 |
+
MonthBegin,
|
| 78 |
+
MonthEnd,
|
| 79 |
+
QuarterBegin,
|
| 80 |
+
QuarterEnd,
|
| 81 |
+
SemiMonthBegin,
|
| 82 |
+
SemiMonthEnd,
|
| 83 |
+
Week,
|
| 84 |
+
WeekOfMonth,
|
| 85 |
+
YearBegin,
|
| 86 |
+
YearEnd,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _create_sp_series():
|
| 91 |
+
nan = np.nan
|
| 92 |
+
|
| 93 |
+
# nan-based
|
| 94 |
+
arr = np.arange(15, dtype=np.float64)
|
| 95 |
+
arr[7:12] = nan
|
| 96 |
+
arr[-1:] = nan
|
| 97 |
+
|
| 98 |
+
bseries = Series(SparseArray(arr, kind="block"))
|
| 99 |
+
bseries.name = "bseries"
|
| 100 |
+
return bseries
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def _create_sp_tsseries():
|
| 104 |
+
nan = np.nan
|
| 105 |
+
|
| 106 |
+
# nan-based
|
| 107 |
+
arr = np.arange(15, dtype=np.float64)
|
| 108 |
+
arr[7:12] = nan
|
| 109 |
+
arr[-1:] = nan
|
| 110 |
+
|
| 111 |
+
date_index = bdate_range("1/1/2011", periods=len(arr))
|
| 112 |
+
bseries = Series(SparseArray(arr, kind="block"), index=date_index)
|
| 113 |
+
bseries.name = "btsseries"
|
| 114 |
+
return bseries
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def _create_sp_frame():
|
| 118 |
+
nan = np.nan
|
| 119 |
+
|
| 120 |
+
data = {
|
| 121 |
+
"A": [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
|
| 122 |
+
"B": [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
|
| 123 |
+
"C": np.arange(10).astype(np.int64),
|
| 124 |
+
"D": [0, 1, 2, 3, 4, 5, nan, nan, nan, nan],
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
dates = bdate_range("1/1/2011", periods=10)
|
| 128 |
+
return DataFrame(data, index=dates).apply(SparseArray)
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def create_pickle_data():
|
| 132 |
+
"""create the pickle data"""
|
| 133 |
+
data = {
|
| 134 |
+
"A": [0.0, 1.0, 2.0, 3.0, np.nan],
|
| 135 |
+
"B": [0, 1, 0, 1, 0],
|
| 136 |
+
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
|
| 137 |
+
"D": date_range("1/1/2009", periods=5),
|
| 138 |
+
"E": [0.0, 1, Timestamp("20100101"), "foo", 2.0],
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
scalars = {"timestamp": Timestamp("20130101"), "period": Period("2012", "M")}
|
| 142 |
+
|
| 143 |
+
index = {
|
| 144 |
+
"int": Index(np.arange(10)),
|
| 145 |
+
"date": date_range("20130101", periods=10),
|
| 146 |
+
"period": period_range("2013-01-01", freq="M", periods=10),
|
| 147 |
+
"float": Index(np.arange(10, dtype=np.float64)),
|
| 148 |
+
"uint": Index(np.arange(10, dtype=np.uint64)),
|
| 149 |
+
"timedelta": timedelta_range("00:00:00", freq="30min", periods=10),
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
index["range"] = RangeIndex(10)
|
| 153 |
+
|
| 154 |
+
index["interval"] = interval_range(0, periods=10)
|
| 155 |
+
|
| 156 |
+
mi = {
|
| 157 |
+
"reg2": MultiIndex.from_tuples(
|
| 158 |
+
tuple(
|
| 159 |
+
zip(
|
| 160 |
+
*[
|
| 161 |
+
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
|
| 162 |
+
["one", "two", "one", "two", "one", "two", "one", "two"],
|
| 163 |
+
]
|
| 164 |
+
)
|
| 165 |
+
),
|
| 166 |
+
names=["first", "second"],
|
| 167 |
+
)
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
series = {
|
| 171 |
+
"float": Series(data["A"]),
|
| 172 |
+
"int": Series(data["B"]),
|
| 173 |
+
"mixed": Series(data["E"]),
|
| 174 |
+
"ts": Series(
|
| 175 |
+
np.arange(10).astype(np.int64), index=date_range("20130101", periods=10)
|
| 176 |
+
),
|
| 177 |
+
"mi": Series(
|
| 178 |
+
np.arange(5).astype(np.float64),
|
| 179 |
+
index=MultiIndex.from_tuples(
|
| 180 |
+
tuple(zip(*[[1, 1, 2, 2, 2], [3, 4, 3, 4, 5]])), names=["one", "two"]
|
| 181 |
+
),
|
| 182 |
+
),
|
| 183 |
+
"dup": Series(np.arange(5).astype(np.float64), index=["A", "B", "C", "D", "A"]),
|
| 184 |
+
"cat": Series(Categorical(["foo", "bar", "baz"])),
|
| 185 |
+
"dt": Series(date_range("20130101", periods=5)),
|
| 186 |
+
"dt_tz": Series(date_range("20130101", periods=5, tz="US/Eastern")),
|
| 187 |
+
"period": Series([Period("2000Q1")] * 5),
|
| 188 |
+
}
|
| 189 |
+
|
| 190 |
+
mixed_dup_df = DataFrame(data)
|
| 191 |
+
mixed_dup_df.columns = list("ABCDA")
|
| 192 |
+
frame = {
|
| 193 |
+
"float": DataFrame({"A": series["float"], "B": series["float"] + 1}),
|
| 194 |
+
"int": DataFrame({"A": series["int"], "B": series["int"] + 1}),
|
| 195 |
+
"mixed": DataFrame({k: data[k] for k in ["A", "B", "C", "D"]}),
|
| 196 |
+
"mi": DataFrame(
|
| 197 |
+
{"A": np.arange(5).astype(np.float64), "B": np.arange(5).astype(np.int64)},
|
| 198 |
+
index=MultiIndex.from_tuples(
|
| 199 |
+
tuple(
|
| 200 |
+
zip(
|
| 201 |
+
*[
|
| 202 |
+
["bar", "bar", "baz", "baz", "baz"],
|
| 203 |
+
["one", "two", "one", "two", "three"],
|
| 204 |
+
]
|
| 205 |
+
)
|
| 206 |
+
),
|
| 207 |
+
names=["first", "second"],
|
| 208 |
+
),
|
| 209 |
+
),
|
| 210 |
+
"dup": DataFrame(
|
| 211 |
+
np.arange(15).reshape(5, 3).astype(np.float64), columns=["A", "B", "A"]
|
| 212 |
+
),
|
| 213 |
+
"cat_onecol": DataFrame({"A": Categorical(["foo", "bar"])}),
|
| 214 |
+
"cat_and_float": DataFrame(
|
| 215 |
+
{
|
| 216 |
+
"A": Categorical(["foo", "bar", "baz"]),
|
| 217 |
+
"B": np.arange(3).astype(np.int64),
|
| 218 |
+
}
|
| 219 |
+
),
|
| 220 |
+
"mixed_dup": mixed_dup_df,
|
| 221 |
+
"dt_mixed_tzs": DataFrame(
|
| 222 |
+
{
|
| 223 |
+
"A": Timestamp("20130102", tz="US/Eastern"),
|
| 224 |
+
"B": Timestamp("20130603", tz="CET"),
|
| 225 |
+
},
|
| 226 |
+
index=range(5),
|
| 227 |
+
),
|
| 228 |
+
"dt_mixed2_tzs": DataFrame(
|
| 229 |
+
{
|
| 230 |
+
"A": Timestamp("20130102", tz="US/Eastern"),
|
| 231 |
+
"B": Timestamp("20130603", tz="CET"),
|
| 232 |
+
"C": Timestamp("20130603", tz="UTC"),
|
| 233 |
+
},
|
| 234 |
+
index=range(5),
|
| 235 |
+
),
|
| 236 |
+
}
|
| 237 |
+
|
| 238 |
+
cat = {
|
| 239 |
+
"int8": Categorical(list("abcdefg")),
|
| 240 |
+
"int16": Categorical(np.arange(1000)),
|
| 241 |
+
"int32": Categorical(np.arange(10000)),
|
| 242 |
+
}
|
| 243 |
+
|
| 244 |
+
timestamp = {
|
| 245 |
+
"normal": Timestamp("2011-01-01"),
|
| 246 |
+
"nat": NaT,
|
| 247 |
+
"tz": Timestamp("2011-01-01", tz="US/Eastern"),
|
| 248 |
+
}
|
| 249 |
+
|
| 250 |
+
off = {
|
| 251 |
+
"DateOffset": DateOffset(years=1),
|
| 252 |
+
"DateOffset_h_ns": DateOffset(hour=6, nanoseconds=5824),
|
| 253 |
+
"BusinessDay": BusinessDay(offset=timedelta(seconds=9)),
|
| 254 |
+
"BusinessHour": BusinessHour(normalize=True, n=6, end="15:14"),
|
| 255 |
+
"CustomBusinessDay": CustomBusinessDay(weekmask="Mon Fri"),
|
| 256 |
+
"SemiMonthBegin": SemiMonthBegin(day_of_month=9),
|
| 257 |
+
"SemiMonthEnd": SemiMonthEnd(day_of_month=24),
|
| 258 |
+
"MonthBegin": MonthBegin(1),
|
| 259 |
+
"MonthEnd": MonthEnd(1),
|
| 260 |
+
"QuarterBegin": QuarterBegin(1),
|
| 261 |
+
"QuarterEnd": QuarterEnd(1),
|
| 262 |
+
"Day": Day(1),
|
| 263 |
+
"YearBegin": YearBegin(1),
|
| 264 |
+
"YearEnd": YearEnd(1),
|
| 265 |
+
"Week": Week(1),
|
| 266 |
+
"Week_Tues": Week(2, normalize=False, weekday=1),
|
| 267 |
+
"WeekOfMonth": WeekOfMonth(week=3, weekday=4),
|
| 268 |
+
"LastWeekOfMonth": LastWeekOfMonth(n=1, weekday=3),
|
| 269 |
+
"FY5253": FY5253(n=2, weekday=6, startingMonth=7, variation="last"),
|
| 270 |
+
"Easter": Easter(),
|
| 271 |
+
"Hour": Hour(1),
|
| 272 |
+
"Minute": Minute(1),
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
return {
|
| 276 |
+
"series": series,
|
| 277 |
+
"frame": frame,
|
| 278 |
+
"index": index,
|
| 279 |
+
"scalars": scalars,
|
| 280 |
+
"mi": mi,
|
| 281 |
+
"sp_series": {"float": _create_sp_series(), "ts": _create_sp_tsseries()},
|
| 282 |
+
"sp_frame": {"float": _create_sp_frame()},
|
| 283 |
+
"cat": cat,
|
| 284 |
+
"timestamp": timestamp,
|
| 285 |
+
"offsets": off,
|
| 286 |
+
}
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
def platform_name():
|
| 290 |
+
return "_".join(
|
| 291 |
+
[
|
| 292 |
+
str(pandas.__version__),
|
| 293 |
+
str(pl.machine()),
|
| 294 |
+
str(pl.system().lower()),
|
| 295 |
+
str(pl.python_version()),
|
| 296 |
+
]
|
| 297 |
+
)
|
| 298 |
+
|
| 299 |
+
|
| 300 |
+
def write_legacy_pickles(output_dir):
|
| 301 |
+
version = pandas.__version__
|
| 302 |
+
|
| 303 |
+
print(
|
| 304 |
+
"This script generates a storage file for the current arch, system, "
|
| 305 |
+
"and python version"
|
| 306 |
+
)
|
| 307 |
+
print(f" pandas version: {version}")
|
| 308 |
+
print(f" output dir : {output_dir}")
|
| 309 |
+
print(" storage format: pickle")
|
| 310 |
+
|
| 311 |
+
pth = f"{platform_name()}.pickle"
|
| 312 |
+
|
| 313 |
+
with open(os.path.join(output_dir, pth), "wb") as fh:
|
| 314 |
+
pickle.dump(create_pickle_data(), fh, pickle.DEFAULT_PROTOCOL)
|
| 315 |
+
|
| 316 |
+
print(f"created pickle file: {pth}")
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
def write_legacy_file():
|
| 320 |
+
# force our cwd to be the first searched
|
| 321 |
+
sys.path.insert(0, "")
|
| 322 |
+
|
| 323 |
+
if not 3 <= len(sys.argv) <= 4:
|
| 324 |
+
sys.exit(
|
| 325 |
+
"Specify output directory and storage type: generate_legacy_"
|
| 326 |
+
"storage_files.py <output_dir> <storage_type> "
|
| 327 |
+
)
|
| 328 |
+
|
| 329 |
+
output_dir = str(sys.argv[1])
|
| 330 |
+
storage_type = str(sys.argv[2])
|
| 331 |
+
|
| 332 |
+
if not os.path.exists(output_dir):
|
| 333 |
+
os.mkdir(output_dir)
|
| 334 |
+
|
| 335 |
+
if storage_type == "pickle":
|
| 336 |
+
write_legacy_pickles(output_dir=output_dir)
|
| 337 |
+
else:
|
| 338 |
+
sys.exit("storage_type must be one of {'pickle'}")
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
if __name__ == "__main__":
|
| 342 |
+
write_legacy_file()
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__init__.py
ADDED
|
File without changes
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (169 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/conftest.cpython-310.pyc
ADDED
|
Binary file (437 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_compression.cpython-310.pyc
ADDED
|
Binary file (4.43 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_deprecated_kwargs.cpython-310.pyc
ADDED
|
Binary file (925 Bytes). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema.cpython-310.pyc
ADDED
|
Binary file (23.8 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_json_table_schema_ext_dtype.cpython-310.pyc
ADDED
|
Binary file (8.36 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_normalize.cpython-310.pyc
ADDED
|
Binary file (20 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_pandas.cpython-310.pyc
ADDED
|
Binary file (65.6 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_readlines.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
mgm/lib/python3.10/site-packages/pandas/tests/io/json/__pycache__/test_ujson.cpython-310.pyc
ADDED
|
Binary file (35.5 kB). View file
|
|
|